mirror of https://github.com/kubernetes/kops.git
make gomod crds
This commit is contained in:
parent
82e25599f7
commit
3a616b6721
1
go.sum
1
go.sum
|
|
@ -1212,6 +1212,7 @@ k8s.io/cloud-provider v0.20.0/go.mod h1:Lz/luSVD5BrHDDhtVdjFh0C2qQCRYdf0b9BHQ9L+
|
|||
k8s.io/cloud-provider-openstack v1.19.2 h1:u0bvV8gnro4kJkwpQZTO3sQWbrocQMggdXn95jQecjA=
|
||||
k8s.io/cloud-provider-openstack v1.19.2/go.mod h1:J/X/tgKwUDtsrfdJ4aL0iEQLi/8chdbs4rAiYznYwPQ=
|
||||
k8s.io/cluster-bootstrap v0.20.0/go.mod h1:6WZaNIBvcvL7MkPzSRKrZDIr4u+ePW2oIWoRsEFMjmE=
|
||||
k8s.io/code-generator v0.20.0 h1:c8JaABvEEZPDE8MICTOtveHX2axchl+EptM+o4OGvbg=
|
||||
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ=
|
||||
k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA=
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (devel)
|
||||
controller-gen.kubebuilder.io/version: v0.2.8
|
||||
creationTimestamp: null
|
||||
name: instancegroups.kops.k8s.io
|
||||
spec:
|
||||
|
|
@ -42,13 +42,18 @@ spec:
|
|||
name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: InstanceGroup represents a group of instances (either nodes or masters) with the same configuration
|
||||
description: InstanceGroup represents a group of instances (either nodes or
|
||||
masters) with the same configuration
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
|
@ -56,12 +61,14 @@ spec:
|
|||
description: InstanceGroupSpec is the specification for an instanceGroup
|
||||
properties:
|
||||
additionalSecurityGroups:
|
||||
description: AdditionalSecurityGroups attaches additional security groups (e.g. i-123456)
|
||||
description: AdditionalSecurityGroups attaches additional security
|
||||
groups (e.g. i-123456)
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
additionalUserData:
|
||||
description: AdditionalUserData is any additional user-data to be passed to the host
|
||||
description: AdditionalUserData is any additional user-data to be
|
||||
passed to the host
|
||||
items:
|
||||
description: UserData defines a user-data section
|
||||
properties:
|
||||
|
|
@ -77,34 +84,42 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
associatePublicIp:
|
||||
description: AssociatePublicIP is true if we want instances to have a public IP
|
||||
description: AssociatePublicIP is true if we want instances to have
|
||||
a public IP
|
||||
type: boolean
|
||||
cloudLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: CloudLabels indicates the labels for instances in this group, at the AWS level
|
||||
description: CloudLabels indicates the labels for instances in this
|
||||
group, at the AWS level
|
||||
type: object
|
||||
compressUserData:
|
||||
description: CompressUserData compresses parts of the user data to save space
|
||||
description: CompressUserData compresses parts of the user data to
|
||||
save space
|
||||
type: boolean
|
||||
detailedInstanceMonitoring:
|
||||
description: DetailedInstanceMonitoring defines if detailed-monitoring is enabled (AWS only)
|
||||
description: DetailedInstanceMonitoring defines if detailed-monitoring
|
||||
is enabled (AWS only)
|
||||
type: boolean
|
||||
externalLoadBalancers:
|
||||
description: ExternalLoadBalancers define loadbalancers that should be attached to the instancegroup
|
||||
description: ExternalLoadBalancers define loadbalancers that should
|
||||
be attached to the instancegroup
|
||||
items:
|
||||
description: LoadBalancer defines a load balancer
|
||||
properties:
|
||||
loadBalancerName:
|
||||
description: LoadBalancerName to associate with this instance group (AWS ELB)
|
||||
description: LoadBalancerName to associate with this instance
|
||||
group (AWS ELB)
|
||||
type: string
|
||||
targetGroupArn:
|
||||
description: TargetGroupARN to associate with this instance group (AWS ALB/NLB)
|
||||
description: TargetGroupARN to associate with this instance
|
||||
group (AWS ALB/NLB)
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
fileAssets:
|
||||
description: FileAssets is a collection of file assets for this instance group
|
||||
description: FileAssets is a collection of file assets for this instance
|
||||
group
|
||||
items:
|
||||
description: FileAssetSpec defines the structure for a file asset
|
||||
properties:
|
||||
|
|
@ -121,38 +136,45 @@ spec:
|
|||
description: Path is the location this file should reside
|
||||
type: string
|
||||
roles:
|
||||
description: Roles is a list of roles the file asset should be applied, defaults to all
|
||||
description: Roles is a list of roles the file asset should
|
||||
be applied, defaults to all
|
||||
items:
|
||||
description: InstanceGroupRole string describes the roles of the nodes in this InstanceGroup (master or nodes)
|
||||
description: InstanceGroupRole string describes the roles
|
||||
of the nodes in this InstanceGroup (master or nodes)
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
hooks:
|
||||
description: 'Hooks is a list of hooks for this instanceGroup, note: these can override the cluster wide ones if required'
|
||||
description: 'Hooks is a list of hooks for this instanceGroup, note:
|
||||
these can override the cluster wide ones if required'
|
||||
items:
|
||||
description: HookSpec is a definition hook
|
||||
properties:
|
||||
before:
|
||||
description: Before is a series of systemd units which this hook must run before
|
||||
description: Before is a series of systemd units which this
|
||||
hook must run before
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
disabled:
|
||||
description: Disabled indicates if you want the unit switched off
|
||||
description: Disabled indicates if you want the unit switched
|
||||
off
|
||||
type: boolean
|
||||
execContainer:
|
||||
description: ExecContainer is the image itself
|
||||
properties:
|
||||
command:
|
||||
description: Command is the command supplied to the above image
|
||||
description: Command is the command supplied to the above
|
||||
image
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
environment:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Environment is a map of environment variables added to the hook
|
||||
description: Environment is a map of environment variables
|
||||
added to the hook
|
||||
type: object
|
||||
image:
|
||||
description: Image is the docker image
|
||||
|
|
@ -162,88 +184,117 @@ spec:
|
|||
description: Manifest is a raw systemd unit file
|
||||
type: string
|
||||
name:
|
||||
description: Name is an optional name for the hook, otherwise the name is kops-hook-<index>
|
||||
description: Name is an optional name for the hook, otherwise
|
||||
the name is kops-hook-<index>
|
||||
type: string
|
||||
requires:
|
||||
description: Requires is a series of systemd units the action requires
|
||||
description: Requires is a series of systemd units the action
|
||||
requires
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
roles:
|
||||
description: Roles is an optional list of roles the hook should be rolled out to, defaults to all
|
||||
description: Roles is an optional list of roles the hook should
|
||||
be rolled out to, defaults to all
|
||||
items:
|
||||
description: InstanceGroupRole string describes the roles of the nodes in this InstanceGroup (master or nodes)
|
||||
description: InstanceGroupRole string describes the roles
|
||||
of the nodes in this InstanceGroup (master or nodes)
|
||||
type: string
|
||||
type: array
|
||||
useRawManifest:
|
||||
description: UseRawManifest indicates that the contents of Manifest should be used as the contents of the systemd unit, unmodified. Before and Requires are ignored when used together with this value (and validation shouldn't allow them to be set)
|
||||
description: UseRawManifest indicates that the contents of Manifest
|
||||
should be used as the contents of the systemd unit, unmodified.
|
||||
Before and Requires are ignored when used together with this
|
||||
value (and validation shouldn't allow them to be set)
|
||||
type: boolean
|
||||
type: object
|
||||
type: array
|
||||
iam:
|
||||
description: IAMProfileSpec defines the identity of the cloud group IAM profile (AWS only).
|
||||
description: IAMProfileSpec defines the identity of the cloud group
|
||||
IAM profile (AWS only).
|
||||
properties:
|
||||
profile:
|
||||
description: Profile of the cloud group IAM profile. In aws this is the arn for the iam instance profile
|
||||
description: Profile of the cloud group IAM profile. In aws this
|
||||
is the arn for the iam instance profile
|
||||
type: string
|
||||
type: object
|
||||
image:
|
||||
description: Image is the instance (ami etc) we should use
|
||||
type: string
|
||||
instanceInterruptionBehavior:
|
||||
description: InstanceInterruptionBehavior defines if a spot instance should be terminated, hibernated, or stopped after interruption
|
||||
description: InstanceInterruptionBehavior defines if a spot instance
|
||||
should be terminated, hibernated, or stopped after interruption
|
||||
type: string
|
||||
instanceMetadata:
|
||||
description: InstanceMetadata defines the EC2 instance metadata service options (AWS Only)
|
||||
description: InstanceMetadata defines the EC2 instance metadata service
|
||||
options (AWS Only)
|
||||
properties:
|
||||
httpPutResponseHopLimit:
|
||||
description: HTTPPutResponseHopLimit is the desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. The default value is 1.
|
||||
description: HTTPPutResponseHopLimit is the desired HTTP PUT response
|
||||
hop limit for instance metadata requests. The larger the number,
|
||||
the further instance metadata requests can travel. The default
|
||||
value is 1.
|
||||
format: int64
|
||||
type: integer
|
||||
httpTokens:
|
||||
description: HTTPTokens is the state of token usage for the instance metadata requests. If the parameter is not specified in the request, the default state is "optional".
|
||||
description: HTTPTokens is the state of token usage for the instance
|
||||
metadata requests. If the parameter is not specified in the
|
||||
request, the default state is "optional".
|
||||
type: string
|
||||
type: object
|
||||
instanceProtection:
|
||||
description: InstanceProtection makes new instances in an autoscaling group protected from scale in
|
||||
description: InstanceProtection makes new instances in an autoscaling
|
||||
group protected from scale in
|
||||
type: boolean
|
||||
kubelet:
|
||||
description: Kubelet overrides kubelet config from the ClusterSpec
|
||||
properties:
|
||||
allowPrivileged:
|
||||
description: AllowPrivileged enables containers to request privileged mode (defaults to false)
|
||||
description: AllowPrivileged enables containers to request privileged
|
||||
mode (defaults to false)
|
||||
type: boolean
|
||||
allowedUnsafeSysctls:
|
||||
description: AllowedUnsafeSysctls are passed to the kubelet config to whitelist allowable sysctls
|
||||
description: AllowedUnsafeSysctls are passed to the kubelet config
|
||||
to whitelist allowable sysctls
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
anonymousAuth:
|
||||
description: AnonymousAuth permits you to control auth to the kubelet api
|
||||
description: AnonymousAuth permits you to control auth to the
|
||||
kubelet api
|
||||
type: boolean
|
||||
apiServers:
|
||||
description: APIServers is not used for clusters version 1.6 and later - flag removed
|
||||
description: APIServers is not used for clusters version 1.6 and
|
||||
later - flag removed
|
||||
type: string
|
||||
authenticationTokenWebhook:
|
||||
description: AuthenticationTokenWebhook uses the TokenReview API to determine authentication for bearer tokens.
|
||||
description: AuthenticationTokenWebhook uses the TokenReview API
|
||||
to determine authentication for bearer tokens.
|
||||
type: boolean
|
||||
authenticationTokenWebhookCacheTtl:
|
||||
description: AuthenticationTokenWebhook sets the duration to cache responses from the webhook token authenticator. Default is 2m. (default 2m0s)
|
||||
description: AuthenticationTokenWebhook sets the duration to cache
|
||||
responses from the webhook token authenticator. Default is 2m.
|
||||
(default 2m0s)
|
||||
type: string
|
||||
authorizationMode:
|
||||
description: AuthorizationMode is the authorization mode the kubelet is running in
|
||||
description: AuthorizationMode is the authorization mode the kubelet
|
||||
is running in
|
||||
type: string
|
||||
babysitDaemons:
|
||||
description: The node has babysitter process monitoring docker and kubelet. Removed as of 1.7
|
||||
description: The node has babysitter process monitoring docker
|
||||
and kubelet. Removed as of 1.7
|
||||
type: boolean
|
||||
bootstrapKubeconfig:
|
||||
description: BootstrapKubeconfig is the path to a kubeconfig file that will be used to get client certificate for kubelet
|
||||
description: BootstrapKubeconfig is the path to a kubeconfig file
|
||||
that will be used to get client certificate for kubelet
|
||||
type: string
|
||||
cgroupDriver:
|
||||
description: CgroupDriver allows the explicit setting of the kubelet cgroup driver. If omitted, defaults to cgroupfs.
|
||||
description: CgroupDriver allows the explicit setting of the kubelet
|
||||
cgroup driver. If omitted, defaults to cgroupfs.
|
||||
type: string
|
||||
cgroupRoot:
|
||||
description: cgroupRoot is the root cgroup to use for pods. This is handled by the container runtime on a best effort basis.
|
||||
description: cgroupRoot is the root cgroup to use for pods. This
|
||||
is handled by the container runtime on a best effort basis.
|
||||
type: string
|
||||
clientCaFile:
|
||||
description: ClientCAFile is the path to a CA certificate
|
||||
|
|
@ -258,192 +309,270 @@ spec:
|
|||
description: ClusterDomain is the DNS domain for this cluster
|
||||
type: string
|
||||
configureCbr0:
|
||||
description: configureCBR0 enables the kubelet to configure cbr0 based on Node.Spec.PodCIDR.
|
||||
description: configureCBR0 enables the kubelet to configure cbr0
|
||||
based on Node.Spec.PodCIDR.
|
||||
type: boolean
|
||||
cpuCFSQuota:
|
||||
description: CPUCFSQuota enables CPU CFS quota enforcement for containers that specify CPU limits
|
||||
description: CPUCFSQuota enables CPU CFS quota enforcement for
|
||||
containers that specify CPU limits
|
||||
type: boolean
|
||||
cpuCFSQuotaPeriod:
|
||||
description: CPUCFSQuotaPeriod sets CPU CFS quota period value, cpu.cfs_period_us, defaults to Linux Kernel default
|
||||
description: CPUCFSQuotaPeriod sets CPU CFS quota period value,
|
||||
cpu.cfs_period_us, defaults to Linux Kernel default
|
||||
type: string
|
||||
cpuManagerPolicy:
|
||||
description: CpuManagerPolicy allows for changing the default policy of None to static
|
||||
description: CpuManagerPolicy allows for changing the default
|
||||
policy of None to static
|
||||
type: string
|
||||
dockerDisableSharedPID:
|
||||
description: DockerDisableSharedPID uses a shared PID namespace for containers in a pod.
|
||||
description: DockerDisableSharedPID uses a shared PID namespace
|
||||
for containers in a pod.
|
||||
type: boolean
|
||||
enableCustomMetrics:
|
||||
description: Enable gathering custom metrics.
|
||||
type: boolean
|
||||
enableDebuggingHandlers:
|
||||
description: EnableDebuggingHandlers enables server endpoints for log collection and local running of containers and commands
|
||||
description: EnableDebuggingHandlers enables server endpoints
|
||||
for log collection and local running of containers and commands
|
||||
type: boolean
|
||||
enforceNodeAllocatable:
|
||||
description: Enforce Allocatable across pods whenever the overall usage across all pods exceeds Allocatable.
|
||||
description: Enforce Allocatable across pods whenever the overall
|
||||
usage across all pods exceeds Allocatable.
|
||||
type: string
|
||||
evictionHard:
|
||||
description: Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'.
|
||||
description: Comma-delimited list of hard eviction expressions. For
|
||||
example, 'memory.available<300Mi'.
|
||||
type: string
|
||||
evictionMaxPodGracePeriod:
|
||||
description: Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
|
||||
description: Maximum allowed grace period (in seconds) to use
|
||||
when terminating pods in response to a soft eviction threshold
|
||||
being met.
|
||||
format: int32
|
||||
type: integer
|
||||
evictionMinimumReclaim:
|
||||
description: Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
|
||||
description: Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi)
|
||||
that describes the minimum amount of resource the kubelet will
|
||||
reclaim when performing a pod eviction if that resource is under
|
||||
pressure.
|
||||
type: string
|
||||
evictionPressureTransitionPeriod:
|
||||
description: Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.
|
||||
description: Duration for which the kubelet has to wait before
|
||||
transitioning out of an eviction pressure condition.
|
||||
type: string
|
||||
evictionSoft:
|
||||
description: Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'.
|
||||
description: Comma-delimited list of soft eviction expressions. For
|
||||
example, 'memory.available<300Mi'.
|
||||
type: string
|
||||
evictionSoftGracePeriod:
|
||||
description: Comma-delimited list of grace periods for each soft eviction signal. For example, 'memory.available=30s'.
|
||||
description: Comma-delimited list of grace periods for each soft
|
||||
eviction signal. For example, 'memory.available=30s'.
|
||||
type: string
|
||||
experimentalAllowedUnsafeSysctls:
|
||||
description: ExperimentalAllowedUnsafeSysctls are passed to the kubelet config to whitelist allowable sysctls Was promoted to beta and renamed. https://github.com/kubernetes/kubernetes/pull/63717
|
||||
description: ExperimentalAllowedUnsafeSysctls are passed to the
|
||||
kubelet config to whitelist allowable sysctls Was promoted to
|
||||
beta and renamed. https://github.com/kubernetes/kubernetes/pull/63717
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
failSwapOn:
|
||||
description: Tells the Kubelet to fail to start if swap is enabled on the node.
|
||||
description: Tells the Kubelet to fail to start if swap is enabled
|
||||
on the node.
|
||||
type: boolean
|
||||
featureGates:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
description: FeatureGates is set of key=value pairs that describe
|
||||
feature gates for alpha/experimental features.
|
||||
type: object
|
||||
hairpinMode:
|
||||
description: 'How should the kubelet configure the container bridge for hairpin packets. Setting this flag allows endpoints in a Service to loadbalance back to themselves if they should try to access their own Service. Values: "promiscuous-bridge": make the container bridge promiscuous. "hairpin-veth": set the hairpin flag on container veth interfaces. "none": do nothing. Setting --configure-cbr0 to false implies that to achieve hairpin NAT one must set --hairpin-mode=veth-flag, because bridge assumes the existence of a container bridge named cbr0.'
|
||||
description: 'How should the kubelet configure the container bridge
|
||||
for hairpin packets. Setting this flag allows endpoints in a
|
||||
Service to loadbalance back to themselves if they should try
|
||||
to access their own Service. Values: "promiscuous-bridge":
|
||||
make the container bridge promiscuous. "hairpin-veth": set
|
||||
the hairpin flag on container veth interfaces. "none": do
|
||||
nothing. Setting --configure-cbr0 to false implies that to achieve
|
||||
hairpin NAT one must set --hairpin-mode=veth-flag, because bridge
|
||||
assumes the existence of a container bridge named cbr0.'
|
||||
type: string
|
||||
hostnameOverride:
|
||||
description: HostnameOverride is the hostname used to identify the kubelet instead of the actual hostname.
|
||||
description: HostnameOverride is the hostname used to identify
|
||||
the kubelet instead of the actual hostname.
|
||||
type: string
|
||||
housekeepingInterval:
|
||||
description: HousekeepingInterval allows to specify interval between container housekeepings.
|
||||
description: HousekeepingInterval allows to specify interval between
|
||||
container housekeepings.
|
||||
type: string
|
||||
imageGCHighThresholdPercent:
|
||||
description: ImageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run.
|
||||
description: ImageGCHighThresholdPercent is the percent of disk
|
||||
usage after which image garbage collection is always run.
|
||||
format: int32
|
||||
type: integer
|
||||
imageGCLowThresholdPercent:
|
||||
description: ImageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
|
||||
description: ImageGCLowThresholdPercent is the percent of disk
|
||||
usage before which image garbage collection is never run. Lowest
|
||||
disk usage to garbage collect to.
|
||||
format: int32
|
||||
type: integer
|
||||
imagePullProgressDeadline:
|
||||
description: ImagePullProgressDeadline is the timeout for image pulls If no pulling progress is made before this deadline, the image pulling will be cancelled. (default 1m0s)
|
||||
description: ImagePullProgressDeadline is the timeout for image
|
||||
pulls If no pulling progress is made before this deadline, the
|
||||
image pulling will be cancelled. (default 1m0s)
|
||||
type: string
|
||||
kubeReserved:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Resource reservation for kubernetes system daemons like the kubelet, container runtime, node problem detector, etc.
|
||||
description: Resource reservation for kubernetes system daemons
|
||||
like the kubelet, container runtime, node problem detector,
|
||||
etc.
|
||||
type: object
|
||||
kubeReservedCgroup:
|
||||
description: Control group for kube daemons.
|
||||
type: string
|
||||
kubeconfigPath:
|
||||
description: KubeconfigPath is the path of kubeconfig for the kubelet
|
||||
description: KubeconfigPath is the path of kubeconfig for the
|
||||
kubelet
|
||||
type: string
|
||||
kubeletCgroups:
|
||||
description: KubeletCgroups is the absolute name of cgroups to isolate the kubelet in.
|
||||
description: KubeletCgroups is the absolute name of cgroups to
|
||||
isolate the kubelet in.
|
||||
type: string
|
||||
logLevel:
|
||||
description: LogLevel is the logging level of the kubelet
|
||||
format: int32
|
||||
type: integer
|
||||
maxPods:
|
||||
description: MaxPods is the number of pods that can run on this Kubelet.
|
||||
description: MaxPods is the number of pods that can run on this
|
||||
Kubelet.
|
||||
format: int32
|
||||
type: integer
|
||||
networkPluginMTU:
|
||||
description: NetworkPluginMTU is the MTU to be passed to the network plugin, and overrides the default MTU for cases where it cannot be automatically computed (such as IPSEC).
|
||||
description: NetworkPluginMTU is the MTU to be passed to the network
|
||||
plugin, and overrides the default MTU for cases where it cannot
|
||||
be automatically computed (such as IPSEC).
|
||||
format: int32
|
||||
type: integer
|
||||
networkPluginName:
|
||||
description: NetworkPluginName is the name of the network plugin to be invoked for various events in kubelet/pod lifecycle
|
||||
description: NetworkPluginName is the name of the network plugin
|
||||
to be invoked for various events in kubelet/pod lifecycle
|
||||
type: string
|
||||
nodeLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: NodeLabels to add when registering the node in the cluster.
|
||||
description: NodeLabels to add when registering the node in the
|
||||
cluster.
|
||||
type: object
|
||||
nodeStatusUpdateFrequency:
|
||||
description: NodeStatusUpdateFrequency Specifies how often kubelet posts node status to master (default 10s) must work with nodeMonitorGracePeriod in KubeControllerManagerConfig.
|
||||
description: NodeStatusUpdateFrequency Specifies how often kubelet
|
||||
posts node status to master (default 10s) must work with nodeMonitorGracePeriod
|
||||
in KubeControllerManagerConfig.
|
||||
type: string
|
||||
nonMasqueradeCIDR:
|
||||
description: 'NonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.'
|
||||
description: 'NonMasqueradeCIDR configures masquerading: traffic
|
||||
to IPs outside this range will use IP masquerade.'
|
||||
type: string
|
||||
nvidiaGPUs:
|
||||
description: NvidiaGPUs is the number of NVIDIA GPU devices on this node.
|
||||
description: NvidiaGPUs is the number of NVIDIA GPU devices on
|
||||
this node.
|
||||
format: int32
|
||||
type: integer
|
||||
podCIDR:
|
||||
description: PodCIDR is the CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.
|
||||
description: PodCIDR is the CIDR to use for pod IP addresses,
|
||||
only used in standalone mode. In cluster mode, this is obtained
|
||||
from the master.
|
||||
type: string
|
||||
podInfraContainerImage:
|
||||
description: PodInfraContainerImage is the image whose network/ipc containers in each pod will use.
|
||||
description: PodInfraContainerImage is the image whose network/ipc
|
||||
containers in each pod will use.
|
||||
type: string
|
||||
podManifestPath:
|
||||
description: config is the path to the config file or directory of files
|
||||
description: config is the path to the config file or directory
|
||||
of files
|
||||
type: string
|
||||
protectKernelDefaults:
|
||||
description: 'Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet''s --config flag.'
|
||||
description: 'Default kubelet behaviour for kernel tuning. If
|
||||
set, kubelet errors if any of kernel tunables is different than
|
||||
kubelet defaults. (DEPRECATED: This parameter should be set
|
||||
via the config file specified by the Kubelet''s --config flag.'
|
||||
type: boolean
|
||||
readOnlyPort:
|
||||
description: ReadOnlyPort is the port used by the kubelet api for read-only access (default 10255)
|
||||
description: ReadOnlyPort is the port used by the kubelet api
|
||||
for read-only access (default 10255)
|
||||
format: int32
|
||||
type: integer
|
||||
reconcileCIDR:
|
||||
description: ReconcileCIDR is Reconcile node CIDR with the CIDR specified by the API server. No-op if register-node or configure-cbr0 is false.
|
||||
description: ReconcileCIDR is Reconcile node CIDR with the CIDR
|
||||
specified by the API server. No-op if register-node or configure-cbr0
|
||||
is false.
|
||||
type: boolean
|
||||
registerNode:
|
||||
description: RegisterNode enables automatic registration with the apiserver.
|
||||
description: RegisterNode enables automatic registration with
|
||||
the apiserver.
|
||||
type: boolean
|
||||
registerSchedulable:
|
||||
description: registerSchedulable tells the kubelet to register the node as schedulable. No-op if register-node is false.
|
||||
description: registerSchedulable tells the kubelet to register
|
||||
the node as schedulable. No-op if register-node is false.
|
||||
type: boolean
|
||||
registryBurst:
|
||||
description: RegistryBurst Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0 (default 10)
|
||||
description: RegistryBurst Maximum size of a bursty pulls, temporarily
|
||||
allows pulls to burst to this number, while still not exceeding
|
||||
registry-qps. Only used if --registry-qps > 0 (default 10)
|
||||
format: int32
|
||||
type: integer
|
||||
registryPullQPS:
|
||||
description: RegistryPullQPS if > 0, limit registry pull QPS to this value. If 0, unlimited. (default 5)
|
||||
description: RegistryPullQPS if > 0, limit registry pull QPS to
|
||||
this value. If 0, unlimited. (default 5)
|
||||
format: int32
|
||||
type: integer
|
||||
requireKubeconfig:
|
||||
description: RequireKubeconfig indicates a kubeconfig is required
|
||||
type: boolean
|
||||
resolvConf:
|
||||
description: ResolverConfig is the resolver configuration file used as the basis for the container DNS resolution configuration."), []
|
||||
description: ResolverConfig is the resolver configuration file
|
||||
used as the basis for the container DNS resolution configuration."),
|
||||
[]
|
||||
type: string
|
||||
rootDir:
|
||||
description: RootDir is the directory path for managing kubelet files (volume mounts,etc)
|
||||
description: RootDir is the directory path for managing kubelet
|
||||
files (volume mounts,etc)
|
||||
type: string
|
||||
rotateCertificates:
|
||||
description: rotateCertificates enables client certificate rotation.
|
||||
type: boolean
|
||||
runtimeCgroups:
|
||||
description: Cgroups that container runtime is expected to be isolated in.
|
||||
description: Cgroups that container runtime is expected to be
|
||||
isolated in.
|
||||
type: string
|
||||
runtimeRequestTimeout:
|
||||
description: RuntimeRequestTimeout is timeout for runtime requests on - pull, logs, exec and attach
|
||||
description: RuntimeRequestTimeout is timeout for runtime requests
|
||||
on - pull, logs, exec and attach
|
||||
type: string
|
||||
seccompProfileRoot:
|
||||
description: SeccompProfileRoot is the directory path for seccomp profiles.
|
||||
description: SeccompProfileRoot is the directory path for seccomp
|
||||
profiles.
|
||||
type: string
|
||||
serializeImagePulls:
|
||||
description: '// SerializeImagePulls when enabled, tells the Kubelet to pull images one // at a time. We recommend *not* changing the default value on nodes that // run docker daemon with version < 1.9 or an Aufs storage backend. // Issue #10959 has more details.'
|
||||
description: '// SerializeImagePulls when enabled, tells the Kubelet
|
||||
to pull images one // at a time. We recommend *not* changing
|
||||
the default value on nodes that // run docker daemon with version <
|
||||
1.9 or an Aufs storage backend. // Issue #10959 has more details.'
|
||||
type: boolean
|
||||
streamingConnectionIdleTimeout:
|
||||
description: StreamingConnectionIdleTimeout is the maximum time a streaming connection can be idle before the connection is automatically closed
|
||||
description: StreamingConnectionIdleTimeout is the maximum time
|
||||
a streaming connection can be idle before the connection is
|
||||
automatically closed
|
||||
type: string
|
||||
systemCgroups:
|
||||
description: SystemCgroups is absolute name of cgroups in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot.
|
||||
description: SystemCgroups is absolute name of cgroups in which
|
||||
to place all non-kernel processes that are not already in a
|
||||
container. Empty for no container. Rolling back the flag requires
|
||||
a reboot.
|
||||
type: string
|
||||
systemReserved:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Capture resource reservation for OS system daemons like sshd, udev, etc.
|
||||
description: Capture resource reservation for OS system daemons
|
||||
like sshd, udev, etc.
|
||||
type: object
|
||||
systemReservedCgroup:
|
||||
description: Parent control group for OS system daemons.
|
||||
|
|
@ -457,7 +586,8 @@ spec:
|
|||
description: 'TODO: Remove unused TLSCertFile'
|
||||
type: string
|
||||
tlsCipherSuites:
|
||||
description: TLSCipherSuites indicates the allowed TLS cipher suite
|
||||
description: TLSCipherSuites indicates the allowed TLS cipher
|
||||
suite
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
|
@ -468,20 +598,26 @@ spec:
|
|||
description: 'TODO: Remove unused TLSPrivateKeyFile'
|
||||
type: string
|
||||
topologyManagerPolicy:
|
||||
description: TopologyManagerPolicy determines the allocation policy for the topology manager.
|
||||
description: TopologyManagerPolicy determines the allocation policy
|
||||
for the topology manager.
|
||||
type: string
|
||||
volumePluginDirectory:
|
||||
description: The full path of the directory in which to search for additional third party volume plugins (this path must be writeable, dependent on your choice of OS)
|
||||
description: The full path of the directory in which to search
|
||||
for additional third party volume plugins (this path must be
|
||||
writeable, dependent on your choice of OS)
|
||||
type: string
|
||||
volumeStatsAggPeriod:
|
||||
description: VolumeStatsAggPeriod is the interval for kubelet to calculate and cache the volume disk usage for all pods and volumes
|
||||
description: VolumeStatsAggPeriod is the interval for kubelet
|
||||
to calculate and cache the volume disk usage for all pods and
|
||||
volumes
|
||||
type: string
|
||||
type: object
|
||||
machineType:
|
||||
description: MachineType is the instance class
|
||||
type: string
|
||||
maxPrice:
|
||||
description: MaxPrice indicates this is a spot-pricing group, with the specified value as our max-price bid
|
||||
description: MaxPrice indicates this is a spot-pricing group, with
|
||||
the specified value as our max-price bid
|
||||
type: string
|
||||
maxSize:
|
||||
description: MaxSize is the maximum size of the pool
|
||||
|
|
@ -492,91 +628,140 @@ spec:
|
|||
format: int32
|
||||
type: integer
|
||||
mixedInstancesPolicy:
|
||||
description: MixedInstancesPolicy defined a optional backing of an AWS ASG by a EC2 Fleet (AWS Only)
|
||||
description: MixedInstancesPolicy defined a optional backing of an
|
||||
AWS ASG by a EC2 Fleet (AWS Only)
|
||||
properties:
|
||||
instances:
|
||||
description: Instances is a list of instance types which we are willing to run in the EC2 fleet
|
||||
description: Instances is a list of instance types which we are
|
||||
willing to run in the EC2 fleet
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
onDemandAboveBase:
|
||||
description: OnDemandAboveBase controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBase. The range is 0–100. The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.
|
||||
description: OnDemandAboveBase controls the percentages of On-Demand
|
||||
Instances and Spot Instances for your additional capacity beyond
|
||||
OnDemandBase. The range is 0–100. The default value is 100.
|
||||
If you leave this parameter set to 100, the percentages are
|
||||
100% for On-Demand Instances and 0% for Spot Instances.
|
||||
format: int64
|
||||
type: integer
|
||||
onDemandAllocationStrategy:
|
||||
description: OnDemandAllocationStrategy indicates how to allocate instance types to fulfill On-Demand capacity
|
||||
description: OnDemandAllocationStrategy indicates how to allocate
|
||||
instance types to fulfill On-Demand capacity
|
||||
type: string
|
||||
onDemandBase:
|
||||
description: OnDemandBase is the minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.
|
||||
description: OnDemandBase is the minimum amount of the Auto Scaling
|
||||
group's capacity that must be fulfilled by On-Demand Instances.
|
||||
This base portion is provisioned first as your group scales.
|
||||
format: int64
|
||||
type: integer
|
||||
spotAllocationStrategy:
|
||||
description: SpotAllocationStrategy diversifies your Spot capacity across multiple instance types to find the best pricing. Higher Spot availability may result from a larger number of instance types to choose from.
|
||||
description: SpotAllocationStrategy diversifies your Spot capacity
|
||||
across multiple instance types to find the best pricing. Higher
|
||||
Spot availability may result from a larger number of instance
|
||||
types to choose from.
|
||||
type: string
|
||||
spotInstancePools:
|
||||
description: SpotInstancePools is the number of Spot pools to use to allocate your Spot capacity (defaults to 2) pools are determined from the different instance types in the Overrides array of LaunchTemplate
|
||||
description: SpotInstancePools is the number of Spot pools to
|
||||
use to allocate your Spot capacity (defaults to 2) pools are
|
||||
determined from the different instance types in the Overrides
|
||||
array of LaunchTemplate
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
nodeLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: NodeLabels indicates the kubernetes labels for nodes in this group
|
||||
description: NodeLabels indicates the kubernetes labels for nodes
|
||||
in this group
|
||||
type: object
|
||||
role:
|
||||
description: 'Type determines the role of instances in this group: masters or nodes'
|
||||
description: 'Type determines the role of instances in this group:
|
||||
masters or nodes'
|
||||
type: string
|
||||
rollingUpdate:
|
||||
description: RollingUpdate defines the rolling-update behavior
|
||||
properties:
|
||||
drainAndTerminate:
|
||||
description: DrainAndTerminate enables draining and terminating nodes during rolling updates. Defaults to true.
|
||||
description: DrainAndTerminate enables draining and terminating
|
||||
nodes during rolling updates. Defaults to true.
|
||||
type: boolean
|
||||
maxSurge:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: 'MaxSurge is the maximum number of extra nodes that can be created during the update. The value can be an absolute number (for example 5) or a percentage of desired machines (for example 10%). The absolute number is calculated from a percentage by rounding up. Has no effect on instance groups with role "Master". Defaults to 1 on AWS, 0 otherwise. Example: when this is set to 30%, the InstanceGroup can be scaled up immediately when the rolling update starts, such that the total number of old and new nodes do not exceed 130% of desired nodes.'
|
||||
description: 'MaxSurge is the maximum number of extra nodes that
|
||||
can be created during the update. The value can be an absolute
|
||||
number (for example 5) or a percentage of desired machines (for
|
||||
example 10%). The absolute number is calculated from a percentage
|
||||
by rounding up. Has no effect on instance groups with role "Master".
|
||||
Defaults to 1 on AWS, 0 otherwise. Example: when this is set
|
||||
to 30%, the InstanceGroup can be scaled up immediately when
|
||||
the rolling update starts, such that the total number of old
|
||||
and new nodes do not exceed 130% of desired nodes.'
|
||||
x-kubernetes-int-or-string: true
|
||||
maxUnavailable:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: 'MaxUnavailable is the maximum number of nodes that can be unavailable during the update. The value can be an absolute number (for example 5) or a percentage of desired nodes (for example 10%). The absolute number is calculated from a percentage by rounding down. Defaults to 1 if MaxSurge is 0, otherwise defaults to 0. Example: when this is set to 30%, the InstanceGroup can be scaled down to 70% of desired nodes immediately when the rolling update starts. Once new nodes are ready, more old nodes can be drained, ensuring that the total number of nodes available at all times during the update is at least 70% of desired nodes.'
|
||||
description: 'MaxUnavailable is the maximum number of nodes that
|
||||
can be unavailable during the update. The value can be an absolute
|
||||
number (for example 5) or a percentage of desired nodes (for
|
||||
example 10%). The absolute number is calculated from a percentage
|
||||
by rounding down. Defaults to 1 if MaxSurge is 0, otherwise
|
||||
defaults to 0. Example: when this is set to 30%, the InstanceGroup
|
||||
can be scaled down to 70% of desired nodes immediately when
|
||||
the rolling update starts. Once new nodes are ready, more old
|
||||
nodes can be drained, ensuring that the total number of nodes
|
||||
available at all times during the update is at least 70% of
|
||||
desired nodes.'
|
||||
x-kubernetes-int-or-string: true
|
||||
type: object
|
||||
rootVolumeDeleteOnTermination:
|
||||
description: 'RootVolumeDeleteOnTermination configures root volume retention policy upon instance termination. The root volume is deleted by default. Cluster deletion does not remove retained root volumes. NOTE: This setting applies only to the Launch Configuration and does not affect Launch Templates.'
|
||||
description: 'RootVolumeDeleteOnTermination configures root volume
|
||||
retention policy upon instance termination. The root volume is deleted
|
||||
by default. Cluster deletion does not remove retained root volumes.
|
||||
NOTE: This setting applies only to the Launch Configuration and
|
||||
does not affect Launch Templates.'
|
||||
type: boolean
|
||||
rootVolumeEncryption:
|
||||
description: RootVolumeEncryption enables EBS root volume encryption for an instance
|
||||
description: RootVolumeEncryption enables EBS root volume encryption
|
||||
for an instance
|
||||
type: boolean
|
||||
rootVolumeEncryptionKey:
|
||||
description: RootVolumeEncryptionKey provides the key identifier for root volume encryption
|
||||
description: RootVolumeEncryptionKey provides the key identifier for
|
||||
root volume encryption
|
||||
type: string
|
||||
rootVolumeIops:
|
||||
description: If volume type is io1, then we need to specify the number of Iops.
|
||||
description: If volume type is io1, then we need to specify the number
|
||||
of Iops.
|
||||
format: int32
|
||||
type: integer
|
||||
rootVolumeOptimization:
|
||||
description: RootVolumeOptimization enables EBS optimization for an instance
|
||||
description: RootVolumeOptimization enables EBS optimization for an
|
||||
instance
|
||||
type: boolean
|
||||
rootVolumeSize:
|
||||
description: RootVolumeSize is the size of the EBS root volume to use, in GB
|
||||
description: RootVolumeSize is the size of the EBS root volume to
|
||||
use, in GB
|
||||
format: int32
|
||||
type: integer
|
||||
rootVolumeType:
|
||||
description: RootVolumeType is the type of the EBS root volume to use (e.g. gp2)
|
||||
description: RootVolumeType is the type of the EBS root volume to
|
||||
use (e.g. gp2)
|
||||
type: string
|
||||
securityGroupOverride:
|
||||
description: SecurityGroupOverride overrides the default security group created by Kops for this IG (AWS only).
|
||||
description: SecurityGroupOverride overrides the default security
|
||||
group created by Kops for this IG (AWS only).
|
||||
type: string
|
||||
spotDurationInMinutes:
|
||||
description: SpotDurationInMinutes indicates this is a spot-block group, with the specified value as the spot reservation time
|
||||
description: SpotDurationInMinutes indicates this is a spot-block
|
||||
group, with the specified value as the spot reservation time
|
||||
format: int64
|
||||
type: integer
|
||||
subnets:
|
||||
description: Subnets is the names of the Subnets (as specified in the Cluster) where machines in this instance group should be placed
|
||||
description: Subnets is the names of the Subnets (as specified in
|
||||
the Cluster) where machines in this instance group should be placed
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
|
@ -586,22 +771,27 @@ spec:
|
|||
type: string
|
||||
type: array
|
||||
sysctlParameters:
|
||||
description: SysctlParameters will configure kernel parameters using sysctl(8). When specified, each parameter must follow the form variable=value, the way it would appear in sysctl.conf.
|
||||
description: SysctlParameters will configure kernel parameters using
|
||||
sysctl(8). When specified, each parameter must follow the form variable=value,
|
||||
the way it would appear in sysctl.conf.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
taints:
|
||||
description: Taints indicates the kubernetes taints for nodes in this group
|
||||
description: Taints indicates the kubernetes taints for nodes in this
|
||||
group
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
tenancy:
|
||||
description: Describes the tenancy of the instance group. Can be either default or dedicated. Currently only applies to AWS.
|
||||
description: Describes the tenancy of the instance group. Can be either
|
||||
default or dedicated. Currently only applies to AWS.
|
||||
type: string
|
||||
volumeMounts:
|
||||
description: VolumeMounts a collection of volume mounts
|
||||
items:
|
||||
description: VolumeMountSpec defines the specification for mounting a device
|
||||
description: VolumeMountSpec defines the specification for mounting
|
||||
a device
|
||||
properties:
|
||||
device:
|
||||
description: Device is the device name to provision and mount
|
||||
|
|
@ -610,7 +800,8 @@ spec:
|
|||
description: Filesystem is the filesystem to mount
|
||||
type: string
|
||||
formatOptions:
|
||||
description: FormatOptions is a collection of options passed when formatting the device
|
||||
description: FormatOptions is a collection of options passed
|
||||
when formatting the device
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
|
@ -625,21 +816,29 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
volumes:
|
||||
description: Volumes is a collection of additional volumes to create for instances within this InstanceGroup
|
||||
description: Volumes is a collection of additional volumes to create
|
||||
for instances within this InstanceGroup
|
||||
items:
|
||||
description: VolumeSpec defined the spec for an additional volume attached to the instance group
|
||||
description: VolumeSpec defined the spec for an additional volume
|
||||
attached to the instance group
|
||||
properties:
|
||||
deleteOnTermination:
|
||||
description: 'DeleteOnTermination configures volume retention policy upon instance termination. The volume is deleted by default. Cluster deletion does not remove retained volumes. NOTE: This setting applies only to the Launch Configuration and does not affect Launch Templates.'
|
||||
description: 'DeleteOnTermination configures volume retention
|
||||
policy upon instance termination. The volume is deleted by
|
||||
default. Cluster deletion does not remove retained volumes.
|
||||
NOTE: This setting applies only to the Launch Configuration
|
||||
and does not affect Launch Templates.'
|
||||
type: boolean
|
||||
device:
|
||||
description: Device is an optional device name of the block device
|
||||
description: Device is an optional device name of the block
|
||||
device
|
||||
type: string
|
||||
encrypted:
|
||||
description: Encrypted indicates you want to encrypt the volume
|
||||
type: boolean
|
||||
iops:
|
||||
description: Iops is the provision iops for this iops (think io1 in aws)
|
||||
description: Iops is the provision iops for this iops (think
|
||||
io1 in aws)
|
||||
format: int64
|
||||
type: integer
|
||||
key:
|
||||
|
|
@ -650,12 +849,15 @@ spec:
|
|||
format: int64
|
||||
type: integer
|
||||
type:
|
||||
description: Type is the type of volume to create and is cloud specific
|
||||
description: Type is the type of volume to create and is cloud
|
||||
specific
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
zones:
|
||||
description: Zones is the names of the Zones where machines in this instance group should be placed This is needed for regional subnets (e.g. GCE), to restrict placement to particular zones
|
||||
description: Zones is the names of the Zones where machines in this
|
||||
instance group should be placed This is needed for regional subnets
|
||||
(e.g. GCE), to restrict placement to particular zones
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (devel)
|
||||
controller-gen.kubebuilder.io/version: v0.2.8
|
||||
creationTimestamp: null
|
||||
name: keysets.kops.k8s.io
|
||||
spec:
|
||||
|
|
@ -19,13 +19,18 @@ spec:
|
|||
- name: v1alpha2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Keyset is a set of system keypairs, or other secret material. It is a set to support credential rotation etc.
|
||||
description: Keyset is a set of system keypairs, or other secret material.
|
||||
It is a set to support credential rotation etc.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
|
@ -35,23 +40,28 @@ spec:
|
|||
keys:
|
||||
description: Keys is the set of keys that make up the keyset
|
||||
items:
|
||||
description: KeysetItem is an item (keypair or other secret material) in a Keyset
|
||||
description: KeysetItem is an item (keypair or other secret material)
|
||||
in a Keyset
|
||||
properties:
|
||||
id:
|
||||
description: Id is the unique identifier for this key in the keyset
|
||||
description: Id is the unique identifier for this key in the
|
||||
keyset
|
||||
type: string
|
||||
privateMaterial:
|
||||
description: PrivateMaterial holds secret material (e.g. a private key, or symmetric token)
|
||||
description: PrivateMaterial holds secret material (e.g. a private
|
||||
key, or symmetric token)
|
||||
format: byte
|
||||
type: string
|
||||
publicMaterial:
|
||||
description: PublicMaterial holds non-secret material (e.g. a certificate)
|
||||
description: PublicMaterial holds non-secret material (e.g.
|
||||
a certificate)
|
||||
format: byte
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type:
|
||||
description: Type is the type of the Keyset (PKI keypair, or secret token)
|
||||
description: Type is the type of the Keyset (PKI keypair, or secret
|
||||
token)
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: (devel)
|
||||
controller-gen.kubebuilder.io/version: v0.2.8
|
||||
creationTimestamp: null
|
||||
name: sshcredentials.kops.k8s.io
|
||||
spec:
|
||||
|
|
@ -22,10 +22,14 @@ spec:
|
|||
description: SSHCredential represent a set of kops secrets
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
|
|
|||
|
|
@ -1,5 +0,0 @@
|
|||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"decode.go",
|
||||
"decode_meta.go",
|
||||
"doc.go",
|
||||
"encode.go",
|
||||
"encoding_types.go",
|
||||
"encoding_types_1.1.go",
|
||||
"lex.go",
|
||||
"parse.go",
|
||||
"type_check.go",
|
||||
"type_fields.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/BurntSushi/toml",
|
||||
importpath = "github.com/BurntSushi/toml",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
||||
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
||||
|
|
@ -1,218 +0,0 @@
|
|||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
|
@ -1,509 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
|
|
@ -1,121 +0,0 @@
|
|||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
||||
|
|
@ -1,568 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
||||
|
|
@ -1,953 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
||||
|
|
@ -1,592 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
||||
|
|
@ -1,242 +0,0 @@
|
|||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# This is the official list of authors for copyright purposes.
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Andy Hochhaus <hochhaus@users.noreply.github.com>
|
||||
Antoine Pelisse <apelisse@gmail.com>
|
||||
GinFungYJF <645116215@qq.com>
|
||||
Google Inc.
|
||||
Improbable Worlds Ltd
|
||||
Jeff Hodges <jeff@somethingsimilar.com>
|
||||
John Millikin <jmillikin@gmail.com>
|
||||
Melinda Lu <melinda@vsco.co>
|
||||
Peter McAlpine <peter@aoeu.ca>
|
||||
RS <sayrer@gmail.com>
|
||||
Rodrigo Queiro <overdrigzed@gmail.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Yuki Yugui Sonoda <yugui@yugui.jp>
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
Ainsley Escorce-Jones <ains@users.noreply.github.com>
|
||||
Andy Hochhaus <hochhaus@users.noreply.github.com>
|
||||
Antoine Pelisse <apelisse@gmail.com>
|
||||
GinFungYJF <645116215@qq.com>
|
||||
Ian Cottrell <ian.the.hat@gmail.com>
|
||||
Jay Conrod <jayconrod@gmail.com>
|
||||
Jeff Grafton <ixdy@users.noreply.github.com>
|
||||
Jeff Hodges <jeff@somethingsimilar.com>
|
||||
John Millikin <jmillikin@gmail.com>
|
||||
Kristina <k.chodorow@gmail.com>
|
||||
Melinda Lu <melinda@vsco.co>
|
||||
Paul Bethe <pbethe@google.com>
|
||||
Peter McAlpine <peter@aoeu.ca>
|
||||
Rodrigo Queiro <overdrigzed@gmail.com>
|
||||
RS <sayrer@gmail.com>
|
||||
Stefan Sakalik <stefan@improbable.io>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Yuki Yugui Sonoda <yugui@yugui.jp>
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"diff.go",
|
||||
"fix.go",
|
||||
"fix-update.go",
|
||||
"gazelle.go",
|
||||
"langs.go",
|
||||
"metaresolver.go",
|
||||
"print.go",
|
||||
"update-repos.go",
|
||||
"version.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/cmd/gazelle",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/cmd/gazelle",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/config:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/flag:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/internal/version:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language/go:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language/proto:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/merger:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/repo:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/resolve:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/walk:go_default_library",
|
||||
"//vendor/github.com/pmezard/go-difflib/difflib:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "gazelle",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
)
|
||||
|
||||
var exitError = fmt.Errorf("encountered changes while running diff")
|
||||
|
||||
func diffFile(c *config.Config, f *rule.File) error {
|
||||
rel, err := filepath.Rel(c.RepoRoot, f.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting old path for file %q: %v", f.Path, err)
|
||||
}
|
||||
rel = filepath.ToSlash(rel)
|
||||
|
||||
date := "1970-01-01 00:00:00.000000000 +0000"
|
||||
diff := difflib.UnifiedDiff{
|
||||
Context: 3,
|
||||
FromDate: date,
|
||||
ToDate: date,
|
||||
}
|
||||
|
||||
if oldContent, err := ioutil.ReadFile(f.Path); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error reading original file: %v", err)
|
||||
} else if err != nil {
|
||||
diff.FromFile = "/dev/null"
|
||||
} else if err == nil {
|
||||
diff.A = difflib.SplitLines(string(oldContent))
|
||||
if c.ReadBuildFilesDir == "" {
|
||||
path, err := filepath.Rel(c.RepoRoot, f.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting old path for file %q: %v", f.Path, err)
|
||||
}
|
||||
diff.FromFile = filepath.ToSlash(path)
|
||||
} else {
|
||||
diff.FromFile = f.Path
|
||||
}
|
||||
}
|
||||
|
||||
newContent := f.Format()
|
||||
diff.B = difflib.SplitLines(string(newContent))
|
||||
outPath := findOutputPath(c, f)
|
||||
if c.WriteBuildFilesDir == "" {
|
||||
path, err := filepath.Rel(c.RepoRoot, f.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting new path for file %q: %v", f.Path, err)
|
||||
}
|
||||
diff.ToFile = filepath.ToSlash(path)
|
||||
} else {
|
||||
diff.ToFile = outPath
|
||||
}
|
||||
|
||||
uc := getUpdateConfig(c)
|
||||
var out io.Writer = os.Stdout
|
||||
if uc.patchPath != "" {
|
||||
out = &uc.patchBuffer
|
||||
}
|
||||
if err := difflib.WriteUnifiedDiff(out, diff); err != nil {
|
||||
return fmt.Errorf("error diffing %s: %v", f.Path, err)
|
||||
}
|
||||
if ds, _ := difflib.GetUnifiedDiffString(diff); ds != "" {
|
||||
return exitError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,592 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
gzflag "github.com/bazelbuild/bazel-gazelle/flag"
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/merger"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/resolve"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
"github.com/bazelbuild/bazel-gazelle/walk"
|
||||
)
|
||||
|
||||
// updateConfig holds configuration information needed to run the fix and
|
||||
// update commands. This includes everything in config.Config, but it also
|
||||
// includes some additional fields that aren't relevant to other packages.
|
||||
type updateConfig struct {
|
||||
dirs []string
|
||||
emit emitFunc
|
||||
repos []repo.Repo
|
||||
workspaceFiles []*rule.File
|
||||
walkMode walk.Mode
|
||||
patchPath string
|
||||
patchBuffer bytes.Buffer
|
||||
}
|
||||
|
||||
type emitFunc func(c *config.Config, f *rule.File) error
|
||||
|
||||
var modeFromName = map[string]emitFunc{
|
||||
"print": printFile,
|
||||
"fix": fixFile,
|
||||
"diff": diffFile,
|
||||
}
|
||||
|
||||
const updateName = "_update"
|
||||
|
||||
func getUpdateConfig(c *config.Config) *updateConfig {
|
||||
return c.Exts[updateName].(*updateConfig)
|
||||
}
|
||||
|
||||
type updateConfigurer struct {
|
||||
mode string
|
||||
recursive bool
|
||||
knownImports []string
|
||||
repoConfigPath string
|
||||
}
|
||||
|
||||
func (ucr *updateConfigurer) RegisterFlags(fs *flag.FlagSet, cmd string, c *config.Config) {
|
||||
uc := &updateConfig{}
|
||||
c.Exts[updateName] = uc
|
||||
|
||||
c.ShouldFix = cmd == "fix"
|
||||
|
||||
fs.StringVar(&ucr.mode, "mode", "fix", "print: prints all of the updated BUILD files\n\tfix: rewrites all of the BUILD files in place\n\tdiff: computes the rewrite but then just does a diff")
|
||||
fs.BoolVar(&ucr.recursive, "r", true, "when true, gazelle will update subdirectories recursively")
|
||||
fs.StringVar(&uc.patchPath, "patch", "", "when set with -mode=diff, gazelle will write to a file instead of stdout")
|
||||
fs.Var(&gzflag.MultiFlag{Values: &ucr.knownImports}, "known_import", "import path for which external resolution is skipped (can specify multiple times)")
|
||||
fs.StringVar(&ucr.repoConfigPath, "repo_config", "", "file where Gazelle should load repository configuration. Defaults to WORKSPACE.")
|
||||
}
|
||||
|
||||
func (ucr *updateConfigurer) CheckFlags(fs *flag.FlagSet, c *config.Config) error {
|
||||
uc := getUpdateConfig(c)
|
||||
|
||||
var ok bool
|
||||
uc.emit, ok = modeFromName[ucr.mode]
|
||||
if !ok {
|
||||
return fmt.Errorf("unrecognized emit mode: %q", ucr.mode)
|
||||
}
|
||||
if uc.patchPath != "" && ucr.mode != "diff" {
|
||||
return fmt.Errorf("-patch set but -mode is %s, not diff", ucr.mode)
|
||||
}
|
||||
|
||||
dirs := fs.Args()
|
||||
if len(dirs) == 0 {
|
||||
dirs = []string{"."}
|
||||
}
|
||||
uc.dirs = make([]string, len(dirs))
|
||||
for i := range dirs {
|
||||
dir, err := filepath.Abs(dirs[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to find absolute path: %v", dirs[i], err)
|
||||
}
|
||||
dir, err = filepath.EvalSymlinks(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to resolve symlinks: %v", dirs[i], err)
|
||||
}
|
||||
if !isDescendingDir(dir, c.RepoRoot) {
|
||||
return fmt.Errorf("dir %q is not a subdirectory of repo root %q", dir, c.RepoRoot)
|
||||
}
|
||||
uc.dirs[i] = dir
|
||||
}
|
||||
|
||||
if ucr.recursive {
|
||||
uc.walkMode = walk.VisitAllUpdateSubdirsMode
|
||||
} else if c.IndexLibraries {
|
||||
uc.walkMode = walk.VisitAllUpdateDirsMode
|
||||
} else {
|
||||
uc.walkMode = walk.UpdateDirsMode
|
||||
}
|
||||
|
||||
// Load the repo configuration file (WORKSPACE by default) to find out
|
||||
// names and prefixes of other go_repositories. This affects external
|
||||
// dependency resolution for Go.
|
||||
// TODO(jayconrod): Go-specific code should be moved to language/go.
|
||||
if ucr.repoConfigPath == "" {
|
||||
ucr.repoConfigPath = filepath.Join(c.RepoRoot, "WORKSPACE")
|
||||
}
|
||||
repoConfigFile, err := rule.LoadWorkspaceFile(ucr.repoConfigPath, "")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err == nil {
|
||||
c.Repos, _, err = repo.ListRepositories(repoConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, imp := range ucr.knownImports {
|
||||
uc.repos = append(uc.repos, repo.Repo{
|
||||
Name: label.ImportPathToBazelRepoName(imp),
|
||||
GoPrefix: imp,
|
||||
})
|
||||
}
|
||||
for _, r := range c.Repos {
|
||||
if r.Kind() == "go_repository" {
|
||||
uc.repos = append(uc.repos, repo.Repo{
|
||||
Name: r.Name(),
|
||||
GoPrefix: r.AttrString("importpath"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// If the repo configuration file is not WORKSPACE, also load WORKSPACE
|
||||
// and any declared macro files so we can apply fixes.
|
||||
workspacePath := filepath.Join(c.RepoRoot, "WORKSPACE")
|
||||
var workspace *rule.File
|
||||
if ucr.repoConfigPath == workspacePath {
|
||||
workspace = repoConfigFile
|
||||
} else {
|
||||
workspace, err = rule.LoadWorkspaceFile(workspacePath, "")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if workspace != nil {
|
||||
c.RepoName = findWorkspaceName(workspace)
|
||||
_, repoFileMap, err := repo.ListRepositories(workspace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seen := make(map[*rule.File]bool)
|
||||
for _, f := range repoFileMap {
|
||||
if !seen[f] {
|
||||
uc.workspaceFiles = append(uc.workspaceFiles, f)
|
||||
seen[f] = true
|
||||
}
|
||||
}
|
||||
sort.Slice(uc.workspaceFiles, func(i, j int) bool {
|
||||
return uc.workspaceFiles[i].Path < uc.workspaceFiles[j].Path
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ucr *updateConfigurer) KnownDirectives() []string { return nil }
|
||||
|
||||
func (ucr *updateConfigurer) Configure(c *config.Config, rel string, f *rule.File) {}
|
||||
|
||||
// visitRecord stores information about about a directory visited with
|
||||
// packages.Walk.
|
||||
type visitRecord struct {
|
||||
// pkgRel is the slash-separated path to the visited directory, relative to
|
||||
// the repository root. "" for the repository root itself.
|
||||
pkgRel string
|
||||
|
||||
// c is the configuration for the directory with directives applied.
|
||||
c *config.Config
|
||||
|
||||
// rules is a list of generated Go rules.
|
||||
rules []*rule.Rule
|
||||
|
||||
// imports contains opaque import information for each rule in rules.
|
||||
imports []interface{}
|
||||
|
||||
// empty is a list of empty Go rules that may be deleted.
|
||||
empty []*rule.Rule
|
||||
|
||||
// file is the build file being processed.
|
||||
file *rule.File
|
||||
|
||||
// mappedKinds are mapped kinds used during this visit.
|
||||
mappedKinds []config.MappedKind
|
||||
mappedKindInfo map[string]rule.KindInfo
|
||||
}
|
||||
|
||||
type byPkgRel []visitRecord
|
||||
|
||||
func (vs byPkgRel) Len() int { return len(vs) }
|
||||
func (vs byPkgRel) Less(i, j int) bool { return vs[i].pkgRel < vs[j].pkgRel }
|
||||
func (vs byPkgRel) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
|
||||
|
||||
var genericLoads = []rule.LoadInfo{
|
||||
{
|
||||
Name: "@bazel_gazelle//:def.bzl",
|
||||
Symbols: []string{"gazelle"},
|
||||
},
|
||||
}
|
||||
|
||||
func runFixUpdate(cmd command, args []string) (err error) {
|
||||
cexts := make([]config.Configurer, 0, len(languages)+3)
|
||||
cexts = append(cexts,
|
||||
&config.CommonConfigurer{},
|
||||
&updateConfigurer{},
|
||||
&walk.Configurer{},
|
||||
&resolve.Configurer{})
|
||||
mrslv := newMetaResolver()
|
||||
kinds := make(map[string]rule.KindInfo)
|
||||
loads := genericLoads
|
||||
for _, lang := range languages {
|
||||
cexts = append(cexts, lang)
|
||||
for kind, info := range lang.Kinds() {
|
||||
mrslv.AddBuiltin(kind, lang)
|
||||
kinds[kind] = info
|
||||
}
|
||||
loads = append(loads, lang.Loads()...)
|
||||
}
|
||||
ruleIndex := resolve.NewRuleIndex(mrslv.Resolver)
|
||||
|
||||
c, err := newFixUpdateConfiguration(cmd, args, cexts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fixRepoFiles(c, loads); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd == fixCmd {
|
||||
// Only check the version when "fix" is run. Generated build files
|
||||
// frequently work with older version of rules_go, and we don't want to
|
||||
// nag too much since there's no way to disable this warning.
|
||||
checkRulesGoVersion(c.RepoRoot)
|
||||
}
|
||||
|
||||
// Visit all directories in the repository.
|
||||
var visits []visitRecord
|
||||
uc := getUpdateConfig(c)
|
||||
walk.Walk(c, cexts, uc.dirs, uc.walkMode, func(dir, rel string, c *config.Config, update bool, f *rule.File, subdirs, regularFiles, genFiles []string) {
|
||||
// If this file is ignored or if Gazelle was not asked to update this
|
||||
// directory, just index the build file and move on.
|
||||
if !update {
|
||||
if c.IndexLibraries && f != nil {
|
||||
for _, r := range f.Rules {
|
||||
ruleIndex.AddRule(c, r, f)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Fix any problems in the file.
|
||||
if f != nil {
|
||||
for _, l := range languages {
|
||||
l.Fix(c, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate rules.
|
||||
var empty, gen []*rule.Rule
|
||||
var imports []interface{}
|
||||
for _, l := range languages {
|
||||
res := l.GenerateRules(language.GenerateArgs{
|
||||
Config: c,
|
||||
Dir: dir,
|
||||
Rel: rel,
|
||||
File: f,
|
||||
Subdirs: subdirs,
|
||||
RegularFiles: regularFiles,
|
||||
GenFiles: genFiles,
|
||||
OtherEmpty: empty,
|
||||
OtherGen: gen})
|
||||
if len(res.Gen) != len(res.Imports) {
|
||||
log.Panicf("%s: language %s generated %d rules but returned %d imports", rel, l.Name(), len(res.Gen), len(res.Imports))
|
||||
}
|
||||
empty = append(empty, res.Empty...)
|
||||
gen = append(gen, res.Gen...)
|
||||
imports = append(imports, res.Imports...)
|
||||
}
|
||||
if f == nil && len(gen) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply and record relevant kind mappings.
|
||||
var (
|
||||
mappedKinds []config.MappedKind
|
||||
mappedKindInfo = make(map[string]rule.KindInfo)
|
||||
)
|
||||
for _, r := range gen {
|
||||
if repl, ok := c.KindMap[r.Kind()]; ok {
|
||||
mappedKindInfo[repl.KindName] = kinds[r.Kind()]
|
||||
mappedKinds = append(mappedKinds, repl)
|
||||
mrslv.MappedKind(rel, repl)
|
||||
r.SetKind(repl.KindName)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert or merge rules into the build file.
|
||||
if f == nil {
|
||||
f = rule.EmptyFile(filepath.Join(dir, c.DefaultBuildFileName()), rel)
|
||||
for _, r := range gen {
|
||||
r.Insert(f)
|
||||
}
|
||||
} else {
|
||||
merger.MergeFile(f, empty, gen, merger.PreResolve,
|
||||
unionKindInfoMaps(kinds, mappedKindInfo))
|
||||
}
|
||||
visits = append(visits, visitRecord{
|
||||
pkgRel: rel,
|
||||
c: c,
|
||||
rules: gen,
|
||||
imports: imports,
|
||||
empty: empty,
|
||||
file: f,
|
||||
mappedKinds: mappedKinds,
|
||||
mappedKindInfo: mappedKindInfo,
|
||||
})
|
||||
|
||||
// Add library rules to the dependency resolution table.
|
||||
if c.IndexLibraries {
|
||||
for _, r := range f.Rules {
|
||||
ruleIndex.AddRule(c, r, f)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Finish building the index for dependency resolution.
|
||||
ruleIndex.Finish()
|
||||
|
||||
// Resolve dependencies.
|
||||
rc, cleanupRc := repo.NewRemoteCache(uc.repos)
|
||||
defer func() {
|
||||
if cerr := cleanupRc(); err == nil && cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
for _, v := range visits {
|
||||
for i, r := range v.rules {
|
||||
from := label.New(c.RepoName, v.pkgRel, r.Name())
|
||||
mrslv.Resolver(r, v.pkgRel).Resolve(v.c, ruleIndex, rc, r, v.imports[i], from)
|
||||
}
|
||||
merger.MergeFile(v.file, v.empty, v.rules, merger.PostResolve,
|
||||
unionKindInfoMaps(kinds, v.mappedKindInfo))
|
||||
}
|
||||
|
||||
// Emit merged files.
|
||||
var exit error
|
||||
for _, v := range visits {
|
||||
merger.FixLoads(v.file, applyKindMappings(v.mappedKinds, loads))
|
||||
if err := uc.emit(v.c, v.file); err != nil {
|
||||
if err == exitError {
|
||||
exit = err
|
||||
} else {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uc.patchPath != "" {
|
||||
if err := ioutil.WriteFile(uc.patchPath, uc.patchBuffer.Bytes(), 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return exit
|
||||
}
|
||||
|
||||
func newFixUpdateConfiguration(cmd command, args []string, cexts []config.Configurer) (*config.Config, error) {
|
||||
c := config.New()
|
||||
|
||||
fs := flag.NewFlagSet("gazelle", flag.ContinueOnError)
|
||||
// Flag will call this on any parse error. Don't print usage unless
|
||||
// -h or -help were passed explicitly.
|
||||
fs.Usage = func() {}
|
||||
|
||||
for _, cext := range cexts {
|
||||
cext.RegisterFlags(fs, cmd.String(), c)
|
||||
}
|
||||
|
||||
if err := fs.Parse(args); err != nil {
|
||||
if err == flag.ErrHelp {
|
||||
fixUpdateUsage(fs)
|
||||
return nil, err
|
||||
}
|
||||
// flag already prints the error; don't print it again.
|
||||
log.Fatal("Try -help for more information.")
|
||||
}
|
||||
|
||||
for _, cext := range cexts {
|
||||
if err := cext.CheckFlags(fs, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func fixUpdateUsage(fs *flag.FlagSet) {
|
||||
fmt.Fprint(os.Stderr, `usage: gazelle [fix|update] [flags...] [package-dirs...]
|
||||
|
||||
The update command creates new build files and update existing BUILD files
|
||||
when needed.
|
||||
|
||||
The fix command also creates and updates build files, and in addition, it may
|
||||
make potentially breaking updates to usage of rules. For example, it may
|
||||
delete obsolete rules or rename existing rules.
|
||||
|
||||
There are several output modes which can be selected with the -mode flag. The
|
||||
output mode determines what Gazelle does with updated BUILD files.
|
||||
|
||||
fix (default) - write updated BUILD files back to disk.
|
||||
print - print updated BUILD files to stdout.
|
||||
diff - diff updated BUILD files against existing files in unified format.
|
||||
|
||||
Gazelle accepts a list of paths to Go package directories to process (defaults
|
||||
to the working directory if none are given). It recursively traverses
|
||||
subdirectories. All directories must be under the directory specified by
|
||||
-repo_root; if -repo_root is not given, this is the directory containing the
|
||||
WORKSPACE file.
|
||||
|
||||
FLAGS:
|
||||
|
||||
`)
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
|
||||
func fixRepoFiles(c *config.Config, loads []rule.LoadInfo) error {
|
||||
uc := getUpdateConfig(c)
|
||||
if !c.ShouldFix {
|
||||
return nil
|
||||
}
|
||||
shouldFix := false
|
||||
for _, d := range uc.dirs {
|
||||
if d == c.RepoRoot {
|
||||
shouldFix = true
|
||||
}
|
||||
}
|
||||
if !shouldFix {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, f := range uc.workspaceFiles {
|
||||
merger.FixLoads(f, loads)
|
||||
if f.Path == filepath.Join(c.RepoRoot, "WORKSPACE") {
|
||||
removeLegacyGoRepository(f)
|
||||
if err := merger.CheckGazelleLoaded(f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := uc.emit(c, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeLegacyGoRepository removes loads of go_repository from
|
||||
// @io_bazel_rules_go. FixLoads should be called after this; it will load from
|
||||
// @bazel_gazelle.
|
||||
func removeLegacyGoRepository(f *rule.File) {
|
||||
for _, l := range f.Loads {
|
||||
if l.Name() == "@io_bazel_rules_go//go:def.bzl" {
|
||||
l.Remove("go_repository")
|
||||
if l.IsEmpty() {
|
||||
l.Delete()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findWorkspaceName(f *rule.File) string {
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() == "workspace" {
|
||||
return r.Name()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isDescendingDir(dir, root string) bool {
|
||||
rel, err := filepath.Rel(root, dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if rel == "." {
|
||||
return true
|
||||
}
|
||||
return !strings.HasPrefix(rel, "..")
|
||||
}
|
||||
|
||||
func findOutputPath(c *config.Config, f *rule.File) string {
|
||||
if c.ReadBuildFilesDir == "" && c.WriteBuildFilesDir == "" {
|
||||
return f.Path
|
||||
}
|
||||
baseDir := c.WriteBuildFilesDir
|
||||
if c.WriteBuildFilesDir == "" {
|
||||
baseDir = c.RepoRoot
|
||||
}
|
||||
outputDir := filepath.Join(baseDir, filepath.FromSlash(f.Pkg))
|
||||
defaultOutputPath := filepath.Join(outputDir, c.DefaultBuildFileName())
|
||||
files, err := ioutil.ReadDir(outputDir)
|
||||
if err != nil {
|
||||
// Ignore error. Directory probably doesn't exist.
|
||||
return defaultOutputPath
|
||||
}
|
||||
outputPath := rule.MatchBuildFileName(outputDir, c.ValidBuildFileNames, files)
|
||||
if outputPath == "" {
|
||||
return defaultOutputPath
|
||||
}
|
||||
return outputPath
|
||||
}
|
||||
|
||||
func unionKindInfoMaps(a, b map[string]rule.KindInfo) map[string]rule.KindInfo {
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
result := make(map[string]rule.KindInfo, len(a)+len(b))
|
||||
for _, m := range []map[string]rule.KindInfo{a, b} {
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// applyKindMappings returns a copy of LoadInfo that includes c.KindMap.
|
||||
func applyKindMappings(mappedKinds []config.MappedKind, loads []rule.LoadInfo) []rule.LoadInfo {
|
||||
if len(mappedKinds) == 0 {
|
||||
return loads
|
||||
}
|
||||
|
||||
// Add new RuleInfos or replace existing ones with merged ones.
|
||||
mappedLoads := make([]rule.LoadInfo, len(loads))
|
||||
copy(mappedLoads, loads)
|
||||
for _, mappedKind := range mappedKinds {
|
||||
mappedLoads = appendOrMergeKindMapping(mappedLoads, mappedKind)
|
||||
}
|
||||
return mappedLoads
|
||||
}
|
||||
|
||||
// appendOrMergeKindMapping adds LoadInfo for the given replacement.
|
||||
func appendOrMergeKindMapping(mappedLoads []rule.LoadInfo, mappedKind config.MappedKind) []rule.LoadInfo {
|
||||
// If mappedKind.KindLoad already exists in the list, create a merged copy.
|
||||
for i, load := range mappedLoads {
|
||||
if load.Name == mappedKind.KindLoad {
|
||||
mappedLoads[i].Symbols = append(load.Symbols, mappedKind.KindName)
|
||||
return mappedLoads
|
||||
}
|
||||
}
|
||||
|
||||
// Add a new LoadInfo.
|
||||
return append(mappedLoads, rule.LoadInfo{
|
||||
Name: mappedKind.KindLoad,
|
||||
Symbols: []string{mappedKind.KindName},
|
||||
})
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func fixFile(c *config.Config, f *rule.File) error {
|
||||
outPath := findOutputPath(c, f)
|
||||
if err := os.MkdirAll(filepath.Dir(outPath), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(outPath, f.Format(), 0666)
|
||||
}
|
||||
|
|
@ -1,124 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Command gazelle is a BUILD file generator for Go projects.
|
||||
// See "gazelle --help" for more details.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type command int
|
||||
|
||||
const (
|
||||
updateCmd command = iota
|
||||
fixCmd
|
||||
updateReposCmd
|
||||
helpCmd
|
||||
)
|
||||
|
||||
var commandFromName = map[string]command{
|
||||
"fix": fixCmd,
|
||||
"help": helpCmd,
|
||||
"update": updateCmd,
|
||||
"update-repos": updateReposCmd,
|
||||
}
|
||||
|
||||
var nameFromCommand = []string{
|
||||
// keep in sync with definition above
|
||||
"update",
|
||||
"fix",
|
||||
"update-repos",
|
||||
"help",
|
||||
}
|
||||
|
||||
func (cmd command) String() string {
|
||||
return nameFromCommand[cmd]
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetPrefix("gazelle: ")
|
||||
log.SetFlags(0) // don't print timestamps
|
||||
|
||||
if err := run(os.Args[1:]); err != nil && err != flag.ErrHelp {
|
||||
if err == exitError {
|
||||
os.Exit(1)
|
||||
} else {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func run(args []string) error {
|
||||
cmd := updateCmd
|
||||
if len(args) == 1 && (args[0] == "-h" || args[0] == "-help" || args[0] == "--help") {
|
||||
cmd = helpCmd
|
||||
} else if len(args) > 0 {
|
||||
c, ok := commandFromName[args[0]]
|
||||
if ok {
|
||||
cmd = c
|
||||
args = args[1:]
|
||||
}
|
||||
}
|
||||
|
||||
switch cmd {
|
||||
case fixCmd, updateCmd:
|
||||
return runFixUpdate(cmd, args)
|
||||
case helpCmd:
|
||||
return help()
|
||||
case updateReposCmd:
|
||||
return updateRepos(args)
|
||||
default:
|
||||
log.Panicf("unknown command: %v", cmd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func help() error {
|
||||
fmt.Fprint(os.Stderr, `usage: gazelle <command> [args...]
|
||||
|
||||
Gazelle is a BUILD file generator for Go projects. It can create new BUILD files
|
||||
for a project that follows "go build" conventions, and it can update BUILD files
|
||||
if they already exist. It can be invoked directly in a project workspace, or
|
||||
it can be run on an external dependency during the build as part of the
|
||||
go_repository rule.
|
||||
|
||||
Gazelle may be run with one of the commands below. If no command is given,
|
||||
Gazelle defaults to "update".
|
||||
|
||||
update - Gazelle will create new BUILD files or update existing BUILD files
|
||||
if needed.
|
||||
fix - in addition to the changes made in update, Gazelle will make potentially
|
||||
breaking changes. For example, it may delete obsolete rules or rename
|
||||
existing rules.
|
||||
update-repos - updates repository rules in the WORKSPACE file. Run with
|
||||
-h for details.
|
||||
help - show this message.
|
||||
|
||||
For usage information for a specific command, run the command with the -h flag.
|
||||
For example:
|
||||
|
||||
gazelle update -h
|
||||
|
||||
Gazelle is under active development, and its interface may change
|
||||
without notice.
|
||||
|
||||
`)
|
||||
return flag.ErrHelp
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/go"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
)
|
||||
|
||||
var languages = []language.Language{
|
||||
proto.NewLanguage(),
|
||||
golang.NewLanguage(),
|
||||
}
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
/* Copyright 2019 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/resolve"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// metaResolver provides a rule.Resolver for any rule.Rule.
|
||||
type metaResolver struct {
|
||||
// builtins provides a map of the language kinds to their resolver.
|
||||
builtins map[string]resolve.Resolver
|
||||
|
||||
// mappedKinds provides a list of replacements used by File.Pkg.
|
||||
mappedKinds map[string][]config.MappedKind
|
||||
}
|
||||
|
||||
func newMetaResolver() *metaResolver {
|
||||
return &metaResolver{
|
||||
builtins: make(map[string]resolve.Resolver),
|
||||
mappedKinds: make(map[string][]config.MappedKind),
|
||||
}
|
||||
}
|
||||
|
||||
// AddBuiltin registers a builtin kind with its info.
|
||||
func (mr *metaResolver) AddBuiltin(kindName string, resolver resolve.Resolver) {
|
||||
mr.builtins[kindName] = resolver
|
||||
}
|
||||
|
||||
// MappedKind records the fact that the given mapping was applied while
|
||||
// processing the given package.
|
||||
func (mr *metaResolver) MappedKind(pkgRel string, kind config.MappedKind) {
|
||||
mr.mappedKinds[pkgRel] = append(mr.mappedKinds[pkgRel], kind)
|
||||
}
|
||||
|
||||
// Resolver returns a resolver for the given rule and package, and a bool
|
||||
// indicating whether one was found. Empty string may be passed for pkgRel,
|
||||
// which results in consulting the builtin kinds only.
|
||||
func (mr metaResolver) Resolver(r *rule.Rule, pkgRel string) resolve.Resolver {
|
||||
for _, mappedKind := range mr.mappedKinds[pkgRel] {
|
||||
if mappedKind.KindName == r.Kind() {
|
||||
return mr.builtins[mappedKind.FromKind]
|
||||
}
|
||||
}
|
||||
return mr.builtins[r.Kind()]
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func printFile(c *config.Config, f *rule.File) error {
|
||||
content := f.Format()
|
||||
_, err := os.Stdout.Write(content)
|
||||
return err
|
||||
}
|
||||
|
|
@ -1,368 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/merger"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
type updateReposConfig struct {
|
||||
repoFilePath string
|
||||
importPaths []string
|
||||
macroFileName string
|
||||
macroDefName string
|
||||
pruneRules bool
|
||||
workspace *rule.File
|
||||
repoFileMap map[string]*rule.File
|
||||
}
|
||||
|
||||
const updateReposName = "_update-repos"
|
||||
|
||||
func getUpdateReposConfig(c *config.Config) *updateReposConfig {
|
||||
return c.Exts[updateReposName].(*updateReposConfig)
|
||||
}
|
||||
|
||||
type updateReposConfigurer struct{}
|
||||
|
||||
type macroFlag struct {
|
||||
macroFileName *string
|
||||
macroDefName *string
|
||||
}
|
||||
|
||||
func (f macroFlag) Set(value string) error {
|
||||
args := strings.Split(value, "%")
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("Failure parsing to_macro: %s, expected format is macroFile%%defName", value)
|
||||
}
|
||||
if strings.HasPrefix(args[0], "..") {
|
||||
return fmt.Errorf("Failure parsing to_macro: %s, macro file path %s should not start with \"..\"", value, args[0])
|
||||
}
|
||||
*f.macroFileName = args[0]
|
||||
*f.macroDefName = args[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f macroFlag) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*updateReposConfigurer) RegisterFlags(fs *flag.FlagSet, cmd string, c *config.Config) {
|
||||
uc := &updateReposConfig{}
|
||||
c.Exts[updateReposName] = uc
|
||||
fs.StringVar(&uc.repoFilePath, "from_file", "", "Gazelle will translate repositories listed in this file into repository rules in WORKSPACE or a .bzl macro function. Gopkg.lock and go.mod files are supported")
|
||||
fs.Var(macroFlag{macroFileName: &uc.macroFileName, macroDefName: &uc.macroDefName}, "to_macro", "Tells Gazelle to write repository rules into a .bzl macro function rather than the WORKSPACE file. . The expected format is: macroFile%defName")
|
||||
fs.BoolVar(&uc.pruneRules, "prune", false, "When enabled, Gazelle will remove rules that no longer have equivalent repos in the Gopkg.lock/go.mod file. Can only used with -from_file.")
|
||||
}
|
||||
|
||||
func (*updateReposConfigurer) CheckFlags(fs *flag.FlagSet, c *config.Config) error {
|
||||
uc := getUpdateReposConfig(c)
|
||||
switch {
|
||||
case uc.repoFilePath != "":
|
||||
if len(fs.Args()) != 0 {
|
||||
return fmt.Errorf("got %d positional arguments with -from_file; wanted 0.\nTry -help for more information.", len(fs.Args()))
|
||||
}
|
||||
|
||||
default:
|
||||
if len(fs.Args()) == 0 {
|
||||
return fmt.Errorf("no repositories specified\nTry -help for more information.")
|
||||
}
|
||||
if uc.pruneRules {
|
||||
return fmt.Errorf("the -prune option can only be used with -from_file")
|
||||
}
|
||||
uc.importPaths = fs.Args()
|
||||
}
|
||||
|
||||
var err error
|
||||
workspacePath := filepath.Join(c.RepoRoot, "WORKSPACE")
|
||||
uc.workspace, err = rule.LoadWorkspaceFile(workspacePath, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading WORKSPACE file: %v", err)
|
||||
}
|
||||
c.Repos, uc.repoFileMap, err = repo.ListRepositories(uc.workspace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading WORKSPACE file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*updateReposConfigurer) KnownDirectives() []string { return nil }
|
||||
|
||||
func (*updateReposConfigurer) Configure(c *config.Config, rel string, f *rule.File) {}
|
||||
|
||||
func updateRepos(args []string) (err error) {
|
||||
// Build configuration with all languages.
|
||||
cexts := make([]config.Configurer, 0, len(languages)+2)
|
||||
cexts = append(cexts, &config.CommonConfigurer{}, &updateReposConfigurer{})
|
||||
kinds := make(map[string]rule.KindInfo)
|
||||
loads := []rule.LoadInfo{}
|
||||
for _, lang := range languages {
|
||||
cexts = append(cexts, lang)
|
||||
loads = append(loads, lang.Loads()...)
|
||||
for kind, info := range lang.Kinds() {
|
||||
kinds[kind] = info
|
||||
}
|
||||
}
|
||||
c, err := newUpdateReposConfiguration(args, cexts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uc := getUpdateReposConfig(c)
|
||||
|
||||
// TODO(jayconrod): move Go-specific RemoteCache logic to language/go.
|
||||
var knownRepos []repo.Repo
|
||||
for _, r := range c.Repos {
|
||||
if r.Kind() == "go_repository" {
|
||||
knownRepos = append(knownRepos, repo.Repo{
|
||||
Name: r.Name(),
|
||||
GoPrefix: r.AttrString("importpath"),
|
||||
Remote: r.AttrString("remote"),
|
||||
VCS: r.AttrString("vcs"),
|
||||
})
|
||||
}
|
||||
}
|
||||
rc, cleanup := repo.NewRemoteCache(knownRepos)
|
||||
defer func() {
|
||||
if cerr := cleanup(); err == nil && cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
// Fix the workspace file with each language.
|
||||
for _, lang := range languages {
|
||||
lang.Fix(c, uc.workspace)
|
||||
}
|
||||
|
||||
// Generate rules from command language arguments or by importing a file.
|
||||
var gen, empty []*rule.Rule
|
||||
if uc.repoFilePath == "" {
|
||||
gen, err = updateRepoImports(c, rc)
|
||||
} else {
|
||||
gen, empty, err = importRepos(c, rc)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Organize generated and empty rules by file. A rule should go into the file
|
||||
// it came from (by name). New rules should go into WORKSPACE or the file
|
||||
// specified with -to_macro.
|
||||
var newGen []*rule.Rule
|
||||
genForFiles := make(map[*rule.File][]*rule.Rule)
|
||||
emptyForFiles := make(map[*rule.File][]*rule.Rule)
|
||||
for _, r := range gen {
|
||||
f := uc.repoFileMap[r.Name()]
|
||||
if f != nil {
|
||||
genForFiles[f] = append(genForFiles[f], r)
|
||||
} else {
|
||||
newGen = append(newGen, r)
|
||||
}
|
||||
}
|
||||
for _, r := range empty {
|
||||
f := uc.repoFileMap[r.Name()]
|
||||
if f == nil {
|
||||
panic(fmt.Sprintf("empty rule %q for deletion that was not found", r.Name()))
|
||||
}
|
||||
emptyForFiles[f] = append(emptyForFiles[f], r)
|
||||
}
|
||||
|
||||
var newGenFile *rule.File
|
||||
var macroPath string
|
||||
if uc.macroFileName != "" {
|
||||
macroPath = filepath.Join(c.RepoRoot, filepath.Clean(uc.macroFileName))
|
||||
}
|
||||
for f := range genForFiles {
|
||||
if macroPath == "" && filepath.Base(f.Path) == "WORKSPACE" ||
|
||||
macroPath != "" && f.Path == macroPath && f.DefName == uc.macroDefName {
|
||||
newGenFile = f
|
||||
break
|
||||
}
|
||||
}
|
||||
if newGenFile == nil {
|
||||
if uc.macroFileName == "" {
|
||||
newGenFile = uc.workspace
|
||||
} else {
|
||||
var err error
|
||||
newGenFile, err = rule.LoadMacroFile(macroPath, "", uc.macroDefName)
|
||||
if os.IsNotExist(err) {
|
||||
newGenFile, err = rule.EmptyMacroFile(macroPath, "", uc.macroDefName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating %q: %v", macroPath, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error loading %q: %v", macroPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
genForFiles[newGenFile] = append(genForFiles[newGenFile], newGen...)
|
||||
|
||||
// Merge rules and fix loads in each file.
|
||||
seenFile := make(map[*rule.File]bool)
|
||||
sortedFiles := make([]*rule.File, 0, len(genForFiles))
|
||||
for f := range genForFiles {
|
||||
if !seenFile[f] {
|
||||
seenFile[f] = true
|
||||
sortedFiles = append(sortedFiles, f)
|
||||
}
|
||||
}
|
||||
for f := range emptyForFiles {
|
||||
if !seenFile[f] {
|
||||
seenFile[f] = true
|
||||
sortedFiles = append(sortedFiles, f)
|
||||
}
|
||||
}
|
||||
sort.Slice(sortedFiles, func(i, j int) bool {
|
||||
if cmp := strings.Compare(sortedFiles[i].Path, sortedFiles[j].Path); cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return sortedFiles[i].DefName < sortedFiles[j].DefName
|
||||
})
|
||||
|
||||
updatedFiles := make(map[string]*rule.File)
|
||||
for _, f := range sortedFiles {
|
||||
merger.MergeFile(f, emptyForFiles[f], genForFiles[f], merger.PreResolve, kinds)
|
||||
merger.FixLoads(f, loads)
|
||||
if f == uc.workspace {
|
||||
if err := merger.CheckGazelleLoaded(f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.Sync()
|
||||
if uf, ok := updatedFiles[f.Path]; ok {
|
||||
uf.SyncMacroFile(f)
|
||||
} else {
|
||||
updatedFiles[f.Path] = f
|
||||
}
|
||||
}
|
||||
for _, f := range sortedFiles {
|
||||
if uf := updatedFiles[f.Path]; uf != nil {
|
||||
if err := uf.Save(uf.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(updatedFiles, f.Path)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newUpdateReposConfiguration(args []string, cexts []config.Configurer) (*config.Config, error) {
|
||||
c := config.New()
|
||||
fs := flag.NewFlagSet("gazelle", flag.ContinueOnError)
|
||||
// Flag will call this on any parse error. Don't print usage unless
|
||||
// -h or -help were passed explicitly.
|
||||
fs.Usage = func() {}
|
||||
for _, cext := range cexts {
|
||||
cext.RegisterFlags(fs, "update-repos", c)
|
||||
}
|
||||
if err := fs.Parse(args); err != nil {
|
||||
if err == flag.ErrHelp {
|
||||
updateReposUsage(fs)
|
||||
return nil, err
|
||||
}
|
||||
// flag already prints the error; don't print it again.
|
||||
return nil, errors.New("Try -help for more information")
|
||||
}
|
||||
for _, cext := range cexts {
|
||||
if err := cext.CheckFlags(fs, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func updateReposUsage(fs *flag.FlagSet) {
|
||||
fmt.Fprint(os.Stderr, `usage:
|
||||
|
||||
# Add/update repositories by import path
|
||||
gazelle update-repos example.com/repo1 example.com/repo2
|
||||
|
||||
# Import repositories from lock file
|
||||
gazelle update-repos -from_file=file
|
||||
|
||||
The update-repos command updates repository rules in the WORKSPACE file.
|
||||
update-repos can add or update repositories explicitly by import path.
|
||||
update-repos can also import repository rules from a vendoring tool's lock
|
||||
file (currently only deps' Gopkg.lock is supported).
|
||||
|
||||
FLAGS:
|
||||
|
||||
`)
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
|
||||
func updateRepoImports(c *config.Config, rc *repo.RemoteCache) (gen []*rule.Rule, err error) {
|
||||
// TODO(jayconrod): let the user pick the language with a command line flag.
|
||||
// For now, only use the first language that implements the interface.
|
||||
uc := getUpdateReposConfig(c)
|
||||
var updater language.RepoUpdater
|
||||
for _, lang := range languages {
|
||||
if u, ok := lang.(language.RepoUpdater); ok {
|
||||
updater = u
|
||||
break
|
||||
}
|
||||
}
|
||||
if updater == nil {
|
||||
return nil, fmt.Errorf("no languages can update repositories")
|
||||
}
|
||||
res := updater.UpdateRepos(language.UpdateReposArgs{
|
||||
Config: c,
|
||||
Imports: uc.importPaths,
|
||||
Cache: rc,
|
||||
})
|
||||
return res.Gen, res.Error
|
||||
}
|
||||
|
||||
func importRepos(c *config.Config, rc *repo.RemoteCache) (gen, empty []*rule.Rule, err error) {
|
||||
uc := getUpdateReposConfig(c)
|
||||
importSupported := false
|
||||
var importer language.RepoImporter
|
||||
for _, lang := range languages {
|
||||
if i, ok := lang.(language.RepoImporter); ok {
|
||||
importSupported = true
|
||||
if i.CanImport(uc.repoFilePath) {
|
||||
importer = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if importer == nil {
|
||||
if importSupported {
|
||||
return nil, nil, fmt.Errorf("unknown file format: %s", uc.repoFilePath)
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("no supported languages can import configuration files")
|
||||
}
|
||||
}
|
||||
res := importer.ImportRepos(language.ImportReposArgs{
|
||||
Config: c,
|
||||
Path: uc.repoFilePath,
|
||||
Prune: uc.pruneRules,
|
||||
Cache: rc,
|
||||
})
|
||||
return res.Gen, res.Empty, res.Error
|
||||
}
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/internal/version"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
)
|
||||
|
||||
var minimumRulesGoVersion = version.Version{0, 19, 0}
|
||||
|
||||
// checkRulesGoVersion checks whether a compatible version of rules_go is
|
||||
// being used in the workspace. A message will be logged if an incompatible
|
||||
// version is found.
|
||||
//
|
||||
// Note that we can't always determine the version of rules_go in use. Also,
|
||||
// if we find an incompatible version, we shouldn't bail out since the
|
||||
// incompatibility may not matter in the current workspace.
|
||||
func checkRulesGoVersion(repoRoot string) {
|
||||
const message = `Gazelle may not be compatible with this version of rules_go.
|
||||
Update io_bazel_rules_go to a newer version in your WORKSPACE file.`
|
||||
|
||||
rulesGoPath, err := repo.FindExternalRepo(repoRoot, config.RulesGoRepoName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defBzlPath := filepath.Join(rulesGoPath, "go", "def.bzl")
|
||||
defBzlContent, err := ioutil.ReadFile(defBzlPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
versionRe := regexp.MustCompile(`(?m)^RULES_GO_VERSION = ['"]([0-9.]*)['"]`)
|
||||
match := versionRe.FindSubmatch(defBzlContent)
|
||||
if match == nil {
|
||||
log.Printf("RULES_GO_VERSION not found in @%s//go:def.bzl.\n%s", config.RulesGoRepoName, message)
|
||||
return
|
||||
}
|
||||
vstr := string(match[1])
|
||||
v, err := version.ParseVersion(vstr)
|
||||
if err != nil {
|
||||
log.Printf("RULES_GO_VERSION %q could not be parsed in @%s//go:def.bzl.\n%s", vstr, config.RulesGoRepoName, message)
|
||||
}
|
||||
if v.Compare(minimumRulesGoVersion) < 0 {
|
||||
log.Printf("Found RULES_GO_VERSION %s. Minimum compatible version is %s.\n%s", v, minimumRulesGoVersion, message)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"constants.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/config",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/internal/wspace:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config provides extensible configuration for Gazelle libraries.
|
||||
//
|
||||
// Packages may define Configurers which add support for new command-line
|
||||
// options and directive comments in build files. Note that the
|
||||
// language.Language interface embeds Configurer, so each language extension
|
||||
// has the opportunity
|
||||
//
|
||||
// When Gazelle walks the directory trees in a repository, it calls the
|
||||
// Configure method of each Configurer to produce a Config object.
|
||||
// Config objects are passed as arguments to most functions in Gazelle, so
|
||||
// this mechanism may be used to control many aspects of Gazelle's behavior.
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/internal/wspace"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// Config holds information about how Gazelle should run. This is based on
|
||||
// command line arguments, directives, other hints in build files.
|
||||
//
|
||||
// A Config applies to a single directory. A Config is created for the
|
||||
// repository root directory, then copied and modified for each subdirectory.
|
||||
//
|
||||
// Config itself contains only general information. Most configuration
|
||||
// information is language-specific and is stored in Exts. This information
|
||||
// is modified by extensions that implement Configurer.
|
||||
type Config struct {
|
||||
// RepoRoot is the absolute, canonical path to the root directory of the
|
||||
// repository with all symlinks resolved.
|
||||
RepoRoot string
|
||||
|
||||
// RepoName is the name of the repository.
|
||||
RepoName string
|
||||
|
||||
// ReadBuildFilesDir is the absolute path to a directory where
|
||||
// build files should be read from instead of RepoRoot.
|
||||
ReadBuildFilesDir string
|
||||
|
||||
// WriteBuildFilesDir is the absolute path to a directory where
|
||||
// build files should be written to instead of RepoRoot.
|
||||
WriteBuildFilesDir string
|
||||
|
||||
// ValidBuildFileNames is a list of base names that are considered valid
|
||||
// build files. Some repositories may have files named "BUILD" that are not
|
||||
// used by Bazel and should be ignored. Must contain at least one string.
|
||||
ValidBuildFileNames []string
|
||||
|
||||
// ShouldFix determines whether Gazelle attempts to remove and replace
|
||||
// usage of deprecated rules.
|
||||
ShouldFix bool
|
||||
|
||||
// IndexLibraries determines whether Gazelle should build an index of
|
||||
// libraries in the workspace for dependency resolution
|
||||
IndexLibraries bool
|
||||
|
||||
// KindMap maps from a kind name to its replacement. It provides a way for
|
||||
// users to customize the kind of rules created by Gazelle, via
|
||||
// # gazelle:map_kind.
|
||||
KindMap map[string]MappedKind
|
||||
|
||||
// Repos is a list of repository rules declared in the main WORKSPACE file
|
||||
// or in macros called by the main WORKSPACE file. This may affect rule
|
||||
// generation and dependency resolution.
|
||||
Repos []*rule.Rule
|
||||
|
||||
// Exts is a set of configurable extensions. Generally, each language
|
||||
// has its own set of extensions, but other modules may provide their own
|
||||
// extensions as well. Values in here may be populated by command line
|
||||
// arguments, directives in build files, or other mechanisms.
|
||||
Exts map[string]interface{}
|
||||
}
|
||||
|
||||
// MappedKind describes a replacement to use for a built-in kind.
|
||||
type MappedKind struct {
|
||||
FromKind, KindName, KindLoad string
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
ValidBuildFileNames: DefaultValidBuildFileNames,
|
||||
Exts: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Clone creates a copy of the configuration for use in a subdirectory.
|
||||
// Note that the Exts map is copied, but its contents are not.
|
||||
// Configurer.Configure should do this, if needed.
|
||||
func (c *Config) Clone() *Config {
|
||||
cc := *c
|
||||
cc.Exts = make(map[string]interface{})
|
||||
for k, v := range c.Exts {
|
||||
cc.Exts[k] = v
|
||||
}
|
||||
cc.KindMap = make(map[string]MappedKind)
|
||||
for k, v := range c.KindMap {
|
||||
cc.KindMap[k] = v
|
||||
}
|
||||
return &cc
|
||||
}
|
||||
|
||||
var DefaultValidBuildFileNames = []string{"BUILD.bazel", "BUILD"}
|
||||
|
||||
// IsValidBuildFileName returns true if a file with the given base name
|
||||
// should be treated as a build file.
|
||||
func (c *Config) IsValidBuildFileName(name string) bool {
|
||||
for _, n := range c.ValidBuildFileNames {
|
||||
if name == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// DefaultBuildFileName returns the base name used to create new build files.
|
||||
func (c *Config) DefaultBuildFileName() string {
|
||||
return c.ValidBuildFileNames[0]
|
||||
}
|
||||
|
||||
// Configurer is the interface for language or library-specific configuration
|
||||
// extensions. Most (ideally all) modifications to Config should happen
|
||||
// via this interface.
|
||||
type Configurer interface {
|
||||
// RegisterFlags registers command-line flags used by the extension. This
|
||||
// method is called once with the root configuration when Gazelle
|
||||
// starts. RegisterFlags may set an initial values in Config.Exts. When flags
|
||||
// are set, they should modify these values.
|
||||
RegisterFlags(fs *flag.FlagSet, cmd string, c *Config)
|
||||
|
||||
// CheckFlags validates the configuration after command line flags are parsed.
|
||||
// This is called once with the root configuration when Gazelle starts.
|
||||
// CheckFlags may set default values in flags or make implied changes.
|
||||
CheckFlags(fs *flag.FlagSet, c *Config) error
|
||||
|
||||
// KnownDirectives returns a list of directive keys that this Configurer can
|
||||
// interpret. Gazelle prints errors for directives that are not recoginized by
|
||||
// any Configurer.
|
||||
KnownDirectives() []string
|
||||
|
||||
// Configure modifies the configuration using directives and other information
|
||||
// extracted from a build file. Configure is called in each directory.
|
||||
//
|
||||
// c is the configuration for the current directory. It starts out as a copy
|
||||
// of the configuration for the parent directory.
|
||||
//
|
||||
// rel is the slash-separated relative path from the repository root to
|
||||
// the current directory. It is "" for the root directory itself.
|
||||
//
|
||||
// f is the build file for the current directory or nil if there is no
|
||||
// existing build file.
|
||||
Configure(c *Config, rel string, f *rule.File)
|
||||
}
|
||||
|
||||
// CommonConfigurer handles language-agnostic command-line flags and directives,
|
||||
// i.e., those that apply to Config itself and not to Config.Exts.
|
||||
type CommonConfigurer struct {
|
||||
repoRoot, buildFileNames, readBuildFilesDir, writeBuildFilesDir string
|
||||
indexLibraries bool
|
||||
}
|
||||
|
||||
func (cc *CommonConfigurer) RegisterFlags(fs *flag.FlagSet, cmd string, c *Config) {
|
||||
fs.StringVar(&cc.repoRoot, "repo_root", "", "path to a directory which corresponds to go_prefix, otherwise gazelle searches for it.")
|
||||
fs.StringVar(&cc.buildFileNames, "build_file_name", strings.Join(DefaultValidBuildFileNames, ","), "comma-separated list of valid build file names.\nThe first element of the list is the name of output build files to generate.")
|
||||
fs.BoolVar(&cc.indexLibraries, "index", true, "when true, gazelle will build an index of libraries in the workspace for dependency resolution")
|
||||
fs.StringVar(&cc.readBuildFilesDir, "experimental_read_build_files_dir", "", "path to a directory where build files should be read from (instead of -repo_root)")
|
||||
fs.StringVar(&cc.writeBuildFilesDir, "experimental_write_build_files_dir", "", "path to a directory where build files should be written to (instead of -repo_root)")
|
||||
}
|
||||
|
||||
func (cc *CommonConfigurer) CheckFlags(fs *flag.FlagSet, c *Config) error {
|
||||
var err error
|
||||
if cc.repoRoot == "" {
|
||||
cc.repoRoot, err = wspace.Find(".")
|
||||
if err != nil {
|
||||
return fmt.Errorf("-repo_root not specified, and WORKSPACE cannot be found: %v", err)
|
||||
}
|
||||
}
|
||||
c.RepoRoot, err = filepath.Abs(cc.repoRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to find absolute path of repo root: %v", cc.repoRoot, err)
|
||||
}
|
||||
c.RepoRoot, err = filepath.EvalSymlinks(c.RepoRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to resolve symlinks: %v", cc.repoRoot, err)
|
||||
}
|
||||
c.ValidBuildFileNames = strings.Split(cc.buildFileNames, ",")
|
||||
if cc.readBuildFilesDir != "" {
|
||||
c.ReadBuildFilesDir, err = filepath.Abs(cc.readBuildFilesDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to find absolute path of -read_build_files_dir: %v", cc.readBuildFilesDir, err)
|
||||
}
|
||||
}
|
||||
if cc.writeBuildFilesDir != "" {
|
||||
c.WriteBuildFilesDir, err = filepath.Abs(cc.writeBuildFilesDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: failed to find absolute path of -write_build_files_dir: %v", cc.writeBuildFilesDir, err)
|
||||
}
|
||||
}
|
||||
c.IndexLibraries = cc.indexLibraries
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *CommonConfigurer) KnownDirectives() []string {
|
||||
return []string{"build_file_name", "map_kind"}
|
||||
}
|
||||
|
||||
func (cc *CommonConfigurer) Configure(c *Config, rel string, f *rule.File) {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
for _, d := range f.Directives {
|
||||
switch d.Key {
|
||||
case "build_file_name":
|
||||
c.ValidBuildFileNames = strings.Split(d.Value, ",")
|
||||
|
||||
case "map_kind":
|
||||
vals := strings.Fields(d.Value)
|
||||
if len(vals) != 3 {
|
||||
log.Printf("expected three arguments (gazelle:map_kind from_kind to_kind load_file), got %v", vals)
|
||||
continue
|
||||
}
|
||||
if c.KindMap == nil {
|
||||
c.KindMap = make(map[string]MappedKind)
|
||||
}
|
||||
c.KindMap[vals[0]] = MappedKind{
|
||||
FromKind: vals[0],
|
||||
KindName: vals[1],
|
||||
KindLoad: vals[2],
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
const (
|
||||
// RulesGoRepoName is the canonical name of the rules_go repository. It must
|
||||
// match the workspace name in WORKSPACE.
|
||||
// TODO(jayconrod): move to language/go.
|
||||
RulesGoRepoName = "io_bazel_rules_go"
|
||||
|
||||
// GazelleImportsKey is an internal attribute that lists imported packages
|
||||
// on generated rules. It is replaced with "deps" during import resolution.
|
||||
GazelleImportsKey = "_gazelle_imports"
|
||||
)
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["flag.go"],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/flag",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/flag",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
// Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package flag provides some general-purpose types which satisfy the
|
||||
// flag.Value interface.
|
||||
package flag
|
||||
|
||||
import (
|
||||
stdflag "flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MultiFlag collects repeated string flags into a slice.
|
||||
type MultiFlag struct {
|
||||
IsSet *bool
|
||||
Values *[]string
|
||||
}
|
||||
|
||||
var _ stdflag.Value = (*MultiFlag)(nil)
|
||||
|
||||
func (m *MultiFlag) Set(v string) error {
|
||||
if m.IsSet != nil && !*m.IsSet {
|
||||
*m.IsSet = true
|
||||
*m.Values = nil // clear any default values
|
||||
}
|
||||
*m.Values = append(*m.Values, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MultiFlag) String() string {
|
||||
if m == nil || m.Values == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(*m.Values, ",")
|
||||
}
|
||||
|
||||
// ExplicitFlag is a string flag that tracks whether it was set.
|
||||
type ExplicitFlag struct {
|
||||
IsSet *bool
|
||||
Value *string
|
||||
}
|
||||
|
||||
var _ stdflag.Value = (*ExplicitFlag)(nil)
|
||||
|
||||
func (f *ExplicitFlag) Set(value string) error {
|
||||
*f.IsSet = true
|
||||
*f.Value = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ExplicitFlag) String() string {
|
||||
if f == nil || f.Value == nil {
|
||||
return ""
|
||||
}
|
||||
return *f.Value
|
||||
}
|
||||
|
||||
var _ stdflag.Value = (*AllowedStringFlag)(nil)
|
||||
|
||||
type AllowedStringFlag struct {
|
||||
Allowed []string
|
||||
Value *string
|
||||
}
|
||||
|
||||
func (f *AllowedStringFlag) Set(v string) error {
|
||||
for _, a := range f.Allowed {
|
||||
if v == a {
|
||||
*f.Value = v
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Invalid argument %q. Possible values are: %s", v, strings.Join(f.Allowed, ", "))
|
||||
}
|
||||
|
||||
func (f *AllowedStringFlag) String() string {
|
||||
if f == nil || f.Value == nil {
|
||||
return ""
|
||||
}
|
||||
return *f.Value
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["version.go"],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/internal/version",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/internal/version",
|
||||
visibility = ["//vendor/github.com/bazelbuild/bazel-gazelle:__subpackages__"],
|
||||
)
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Version is a tuple of non-negative integers that represents the version of
|
||||
// a software package.
|
||||
type Version []int
|
||||
|
||||
func (v Version) String() string {
|
||||
cstrs := make([]string, len(v))
|
||||
for i, cn := range v {
|
||||
cstrs[i] = strconv.Itoa(cn)
|
||||
}
|
||||
return strings.Join(cstrs, ".")
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing two versions lexicographically.
|
||||
func (x Version) Compare(y Version) int {
|
||||
n := len(x)
|
||||
if len(y) < n {
|
||||
n = len(y)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
cmp := x[i] - y[i]
|
||||
if cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
return len(x) - len(y)
|
||||
}
|
||||
|
||||
// ParseVersion parses a version of the form "12.34.56-abcd". Non-negative
|
||||
// integer components are separated by dots. An arbitrary suffix may appear
|
||||
// after '-', which is ignored.
|
||||
func ParseVersion(vs string) (Version, error) {
|
||||
i := strings.IndexByte(vs, '-')
|
||||
if i >= 0 {
|
||||
vs = vs[:i]
|
||||
}
|
||||
cstrs := strings.Split(vs, ".")
|
||||
v := make(Version, len(cstrs))
|
||||
for i, cstr := range cstrs {
|
||||
cn, err := strconv.Atoi(cstr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse version string: %q is not an integer", cstr)
|
||||
}
|
||||
if cn < 0 {
|
||||
return nil, fmt.Errorf("could not parse version string: %q is negative", cstr)
|
||||
}
|
||||
v[i] = cn
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["finder.go"],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/internal/wspace",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/internal/wspace",
|
||||
visibility = ["//vendor/github.com/bazelbuild/bazel-gazelle:__subpackages__"],
|
||||
)
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package wspace provides functions to locate and modify a bazel WORKSPACE file.
|
||||
package wspace
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const workspaceFile = "WORKSPACE"
|
||||
|
||||
// Find searches from the given dir and up for the WORKSPACE file
|
||||
// returning the directory containing it, or an error if none found in the tree.
|
||||
func Find(dir string) (string, error) {
|
||||
dir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
_, err = os.Stat(filepath.Join(dir, workspaceFile))
|
||||
if err == nil {
|
||||
return dir, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if strings.HasSuffix(dir, string(os.PathSeparator)) { // stop at root dir
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
dir = filepath.Dir(dir)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["label.go"],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/label",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/label",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/bazelbuild/bazel-gazelle/pathtools:go_default_library"],
|
||||
)
|
||||
|
|
@ -1,201 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package label provides utilities for parsing and manipulating
|
||||
// Bazel labels. See
|
||||
// https://docs.bazel.build/versions/master/build-ref.html#labels
|
||||
// for more information.
|
||||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/pathtools"
|
||||
)
|
||||
|
||||
// A Label represents a label of a build target in Bazel. Labels have three
|
||||
// parts: a repository name, a package name, and a target name, formatted
|
||||
// as @repo//pkg:target.
|
||||
type Label struct {
|
||||
// Repo is the repository name. If omitted, the label refers to a target
|
||||
// in the current repository.
|
||||
Repo string
|
||||
|
||||
// Pkg is the package name, which is usually the directory that contains
|
||||
// the target. If both Repo and Pkg are omitted, the label is relative.
|
||||
Pkg string
|
||||
|
||||
// Name is the name of the target the label refers to. If omitted, Name
|
||||
// is assumed to be the same as Pkg.
|
||||
Name string
|
||||
|
||||
// Relative indicates whether the label refers to a target in the current
|
||||
// package. Relative is true if and only if Repo and Pkg are both omitted.
|
||||
Relative bool
|
||||
}
|
||||
|
||||
// New constructs a new label from components.
|
||||
func New(repo, pkg, name string) Label {
|
||||
return Label{Repo: repo, Pkg: pkg, Name: name}
|
||||
}
|
||||
|
||||
// NoLabel is the zero value of Label. It is not a valid label and may be
|
||||
// returned when an error occurs.
|
||||
var NoLabel = Label{}
|
||||
|
||||
var (
|
||||
labelRepoRegexp = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`)
|
||||
labelPkgRegexp = regexp.MustCompile(`^[A-Za-z0-9/._-]*$`)
|
||||
labelNameRegexp = regexp.MustCompile(`^[A-Za-z0-9_/.+=,@~-]*$`)
|
||||
)
|
||||
|
||||
// Parse reads a label from a string.
|
||||
// See https://docs.bazel.build/versions/master/build-ref.html#lexi.
|
||||
func Parse(s string) (Label, error) {
|
||||
origStr := s
|
||||
|
||||
relative := true
|
||||
var repo string
|
||||
if strings.HasPrefix(s, "@") {
|
||||
relative = false
|
||||
endRepo := strings.Index(s, "//")
|
||||
if endRepo < 0 {
|
||||
return NoLabel, fmt.Errorf("label parse error: repository does not end with '//': %q", origStr)
|
||||
}
|
||||
repo = s[len("@"):endRepo]
|
||||
if !labelRepoRegexp.MatchString(repo) {
|
||||
return NoLabel, fmt.Errorf("label parse error: repository has invalid characters: %q", origStr)
|
||||
}
|
||||
s = s[endRepo:]
|
||||
}
|
||||
|
||||
var pkg string
|
||||
if strings.HasPrefix(s, "//") {
|
||||
relative = false
|
||||
endPkg := strings.Index(s, ":")
|
||||
if endPkg < 0 {
|
||||
pkg = s[len("//"):]
|
||||
s = ""
|
||||
} else {
|
||||
pkg = s[len("//"):endPkg]
|
||||
s = s[endPkg:]
|
||||
}
|
||||
if !labelPkgRegexp.MatchString(pkg) {
|
||||
return NoLabel, fmt.Errorf("label parse error: package has invalid characters: %q", origStr)
|
||||
}
|
||||
}
|
||||
|
||||
if s == ":" {
|
||||
return NoLabel, fmt.Errorf("label parse error: empty name: %q", origStr)
|
||||
}
|
||||
name := strings.TrimPrefix(s, ":")
|
||||
if !labelNameRegexp.MatchString(name) {
|
||||
return NoLabel, fmt.Errorf("label parse error: name has invalid characters: %q", origStr)
|
||||
}
|
||||
|
||||
if pkg == "" && name == "" {
|
||||
return NoLabel, fmt.Errorf("label parse error: empty package and name: %q", origStr)
|
||||
}
|
||||
if name == "" {
|
||||
name = path.Base(pkg)
|
||||
}
|
||||
|
||||
return Label{
|
||||
Repo: repo,
|
||||
Pkg: pkg,
|
||||
Name: name,
|
||||
Relative: relative,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l Label) String() string {
|
||||
if l.Relative {
|
||||
return fmt.Sprintf(":%s", l.Name)
|
||||
}
|
||||
|
||||
var repo string
|
||||
if l.Repo != "" {
|
||||
repo = fmt.Sprintf("@%s", l.Repo)
|
||||
}
|
||||
|
||||
if path.Base(l.Pkg) == l.Name {
|
||||
return fmt.Sprintf("%s//%s", repo, l.Pkg)
|
||||
}
|
||||
return fmt.Sprintf("%s//%s:%s", repo, l.Pkg, l.Name)
|
||||
}
|
||||
|
||||
// Abs computes an absolute label (one with a repository and package name)
|
||||
// from this label. If this label is already absolute, it is returned
|
||||
// unchanged.
|
||||
func (l Label) Abs(repo, pkg string) Label {
|
||||
if !l.Relative {
|
||||
return l
|
||||
}
|
||||
return Label{Repo: repo, Pkg: pkg, Name: l.Name}
|
||||
}
|
||||
|
||||
// Rel attempts to compute a relative label from this label. If this label
|
||||
// is already relative or is in a different package, this label may be
|
||||
// returned unchanged.
|
||||
func (l Label) Rel(repo, pkg string) Label {
|
||||
if l.Relative || l.Repo != repo {
|
||||
return l
|
||||
}
|
||||
if l.Pkg == pkg {
|
||||
return Label{Name: l.Name, Relative: true}
|
||||
}
|
||||
return Label{Pkg: l.Pkg, Name: l.Name}
|
||||
}
|
||||
|
||||
// Equal returns whether two labels are exactly the same. It does not return
|
||||
// true for different labels that refer to the same target.
|
||||
func (l Label) Equal(other Label) bool {
|
||||
return l.Repo == other.Repo &&
|
||||
l.Pkg == other.Pkg &&
|
||||
l.Name == other.Name &&
|
||||
l.Relative == other.Relative
|
||||
}
|
||||
|
||||
// Contains returns whether other is contained by the package of l or a
|
||||
// sub-package. Neither label may be relative.
|
||||
func (l Label) Contains(other Label) bool {
|
||||
if l.Relative {
|
||||
log.Panicf("l must not be relative: %s", l)
|
||||
}
|
||||
if other.Relative {
|
||||
log.Panicf("other must not be relative: %s", other)
|
||||
}
|
||||
result := l.Repo == other.Repo && pathtools.HasPrefix(other.Pkg, l.Pkg)
|
||||
return result
|
||||
}
|
||||
|
||||
// ImportPathToBazelRepoName converts a Go import path into a bazel repo name
|
||||
// following the guidelines in http://bazel.io/docs/be/functions.html#workspace
|
||||
func ImportPathToBazelRepoName(importpath string) string {
|
||||
importpath = strings.ToLower(importpath)
|
||||
components := strings.Split(importpath, "/")
|
||||
labels := strings.Split(components[0], ".")
|
||||
var reversed []string
|
||||
for i := range labels {
|
||||
l := labels[len(labels)-i-1]
|
||||
reversed = append(reversed, l)
|
||||
}
|
||||
repo := strings.Join(append(reversed, components[1:]...), "_")
|
||||
return strings.NewReplacer("-", "_", ".", "_").Replace(repo)
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"lang.go",
|
||||
"update.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/language",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/language",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/config:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/repo:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/resolve:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"constants.go",
|
||||
"dep.go",
|
||||
"fileinfo.go",
|
||||
"fix.go",
|
||||
"generate.go",
|
||||
"godep.go",
|
||||
"kinds.go",
|
||||
"known_go_imports.go",
|
||||
"known_proto_imports.go",
|
||||
"lang.go",
|
||||
"modules.go",
|
||||
"package.go",
|
||||
"resolve.go",
|
||||
"std_package_list.go",
|
||||
"update.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/language/go",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/language/go",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/config:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/flag:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language/proto:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/pathtools:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/repo:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/resolve:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/buildtools/build:go_default_library",
|
||||
"//vendor/github.com/pelletier/go-toml:go_default_library",
|
||||
"//vendor/golang.org/x/sync/errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,447 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
gzflag "github.com/bazelbuild/bazel-gazelle/flag"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// goConfig contains configuration values related to Go rules.
|
||||
type goConfig struct {
|
||||
// genericTags is a set of tags that Gazelle considers to be true. Set with
|
||||
// -build_tags or # gazelle:build_tags. Some tags, like gc, are always on.
|
||||
genericTags map[string]bool
|
||||
|
||||
// prefix is a prefix of an import path, used to generate importpath
|
||||
// attributes. Set with -go_prefix or # gazelle:prefix.
|
||||
prefix string
|
||||
|
||||
// prefixRel is the package name of the directory where the prefix was set
|
||||
// ("" for the root directory).
|
||||
prefixRel string
|
||||
|
||||
// prefixSet indicates whether the prefix was set explicitly. It is an error
|
||||
// to infer an importpath for a rule without setting the prefix.
|
||||
prefixSet bool
|
||||
|
||||
// importMapPrefix is a prefix of a package path, used to generate importmap
|
||||
// attributes. Set with # gazelle:importmap_prefix.
|
||||
importMapPrefix string
|
||||
|
||||
// importMapPrefixRel is the package name of the directory where importMapPrefix
|
||||
// was set ("" for the root directory).
|
||||
importMapPrefixRel string
|
||||
|
||||
// depMode determines how imports that are not standard, indexed, or local
|
||||
// (under the current prefix) should be resolved.
|
||||
depMode dependencyMode
|
||||
|
||||
// goProtoCompilers is the protocol buffers compiler(s) to use for go code.
|
||||
goProtoCompilers []string
|
||||
|
||||
// goProtoCompilersSet indicates whether goProtoCompiler was set explicitly.
|
||||
goProtoCompilersSet bool
|
||||
|
||||
// goGrpcCompilers is the gRPC compiler(s) to use for go code.
|
||||
goGrpcCompilers []string
|
||||
|
||||
// goGrpcCompilersSet indicates whether goGrpcCompiler was set explicitly.
|
||||
goGrpcCompilersSet bool
|
||||
|
||||
// goRepositoryMode is true if Gazelle was invoked by a go_repository rule.
|
||||
// In this mode, we won't go out to the network to resolve external deps.
|
||||
goRepositoryMode bool
|
||||
|
||||
// By default, internal packages are only visible to its siblings.
|
||||
// goVisibility adds a list of packages the internal packages should be
|
||||
// visible to
|
||||
goVisibility []string
|
||||
|
||||
// moduleMode is true if the current directory is intended to be built
|
||||
// as part of a module. Minimal module compatibility won't be supported
|
||||
// if this is true in the root directory. External dependencies may be
|
||||
// resolved differently (also depending on goRepositoryMode).
|
||||
moduleMode bool
|
||||
|
||||
// submodules is a list of modules which have the current module's path
|
||||
// as a prefix of their own path. This affects visibility attributes
|
||||
// in internal packages.
|
||||
submodules []moduleRepo
|
||||
|
||||
// buildExternalAttr, buildFileNamesAttr, buildFileGenerationAttr,
|
||||
// buildTagsAttr, buildFileProtoModeAttr, and buildExtraArgsAttr are
|
||||
// attributes for go_repository rules, set on the command line.
|
||||
buildExternalAttr, buildFileNamesAttr, buildFileGenerationAttr, buildTagsAttr, buildFileProtoModeAttr, buildExtraArgsAttr string
|
||||
}
|
||||
|
||||
var (
|
||||
defaultGoProtoCompilers = []string{"@io_bazel_rules_go//proto:go_proto"}
|
||||
defaultGoGrpcCompilers = []string{"@io_bazel_rules_go//proto:go_grpc"}
|
||||
)
|
||||
|
||||
func newGoConfig() *goConfig {
|
||||
gc := &goConfig{
|
||||
goProtoCompilers: defaultGoProtoCompilers,
|
||||
goGrpcCompilers: defaultGoGrpcCompilers,
|
||||
}
|
||||
gc.preprocessTags()
|
||||
return gc
|
||||
}
|
||||
|
||||
func getGoConfig(c *config.Config) *goConfig {
|
||||
return c.Exts[goName].(*goConfig)
|
||||
}
|
||||
|
||||
func (gc *goConfig) clone() *goConfig {
|
||||
gcCopy := *gc
|
||||
gcCopy.genericTags = make(map[string]bool)
|
||||
for k, v := range gc.genericTags {
|
||||
gcCopy.genericTags[k] = v
|
||||
}
|
||||
gcCopy.goProtoCompilers = gc.goProtoCompilers[:len(gc.goProtoCompilers):len(gc.goProtoCompilers)]
|
||||
gcCopy.goGrpcCompilers = gc.goGrpcCompilers[:len(gc.goGrpcCompilers):len(gc.goGrpcCompilers)]
|
||||
gcCopy.submodules = gc.submodules[:len(gc.submodules):len(gc.submodules)]
|
||||
return &gcCopy
|
||||
}
|
||||
|
||||
// preprocessTags adds some tags which are on by default before they are
|
||||
// used to match files.
|
||||
func (gc *goConfig) preprocessTags() {
|
||||
if gc.genericTags == nil {
|
||||
gc.genericTags = make(map[string]bool)
|
||||
}
|
||||
gc.genericTags["gc"] = true
|
||||
}
|
||||
|
||||
// setBuildTags sets genericTags by parsing as a comma separated list. An
|
||||
// error will be returned for tags that wouldn't be recognized by "go build".
|
||||
// preprocessTags should be called before this.
|
||||
func (gc *goConfig) setBuildTags(tags string) error {
|
||||
if tags == "" {
|
||||
return nil
|
||||
}
|
||||
for _, t := range strings.Split(tags, ",") {
|
||||
if strings.HasPrefix(t, "!") {
|
||||
return fmt.Errorf("build tags can't be negated: %s", t)
|
||||
}
|
||||
gc.genericTags[t] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProtoMode(c *config.Config) proto.Mode {
|
||||
if pc := proto.GetProtoConfig(c); pc != nil {
|
||||
return pc.Mode
|
||||
} else {
|
||||
return proto.DisableGlobalMode
|
||||
}
|
||||
}
|
||||
|
||||
// dependencyMode determines how imports of packages outside of the prefix
|
||||
// are resolved.
|
||||
type dependencyMode int
|
||||
|
||||
const (
|
||||
// externalMode indicates imports should be resolved to external dependencies
|
||||
// (declared in WORKSPACE).
|
||||
externalMode dependencyMode = iota
|
||||
|
||||
// vendorMode indicates imports should be resolved to libraries in the
|
||||
// vendor directory.
|
||||
vendorMode
|
||||
)
|
||||
|
||||
func (m dependencyMode) String() string {
|
||||
if m == externalMode {
|
||||
return "external"
|
||||
} else {
|
||||
return "vendored"
|
||||
}
|
||||
}
|
||||
|
||||
type externalFlag struct {
|
||||
depMode *dependencyMode
|
||||
}
|
||||
|
||||
func (f *externalFlag) Set(value string) error {
|
||||
switch value {
|
||||
case "external":
|
||||
*f.depMode = externalMode
|
||||
case "vendored":
|
||||
*f.depMode = vendorMode
|
||||
default:
|
||||
return fmt.Errorf("unrecognized dependency mode: %q", value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *externalFlag) String() string {
|
||||
if f == nil || f.depMode == nil {
|
||||
return "external"
|
||||
}
|
||||
return f.depMode.String()
|
||||
}
|
||||
|
||||
type tagsFlag func(string) error
|
||||
|
||||
func (f tagsFlag) Set(value string) error {
|
||||
return f(value)
|
||||
}
|
||||
|
||||
func (f tagsFlag) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type moduleRepo struct {
|
||||
repoName, modulePath string
|
||||
}
|
||||
|
||||
var validBuildExternalAttr = []string{"external", "vendored"}
|
||||
var validBuildFileGenerationAttr = []string{"auto", "on", "off"}
|
||||
var validBuildFileProtoModeAttr = []string{"default", "legacy", "disable", "disable_global", "package"}
|
||||
|
||||
func (*goLang) KnownDirectives() []string {
|
||||
return []string{
|
||||
"build_tags",
|
||||
"go_grpc_compilers",
|
||||
"go_proto_compilers",
|
||||
"go_visibility",
|
||||
"importmap_prefix",
|
||||
"prefix",
|
||||
}
|
||||
}
|
||||
|
||||
func (*goLang) RegisterFlags(fs *flag.FlagSet, cmd string, c *config.Config) {
|
||||
gc := newGoConfig()
|
||||
switch cmd {
|
||||
case "fix", "update":
|
||||
fs.Var(
|
||||
tagsFlag(gc.setBuildTags),
|
||||
"build_tags",
|
||||
"comma-separated list of build tags. If not specified, Gazelle will not\n\tfilter sources with build constraints.")
|
||||
fs.Var(
|
||||
&gzflag.ExplicitFlag{Value: &gc.prefix, IsSet: &gc.prefixSet},
|
||||
"go_prefix",
|
||||
"prefix of import paths in the current workspace")
|
||||
fs.Var(
|
||||
&externalFlag{&gc.depMode},
|
||||
"external",
|
||||
"external: resolve external packages with go_repository\n\tvendored: resolve external packages as packages in vendor/")
|
||||
fs.Var(
|
||||
&gzflag.MultiFlag{Values: &gc.goProtoCompilers, IsSet: &gc.goProtoCompilersSet},
|
||||
"go_proto_compiler",
|
||||
"go_proto_library compiler to use (may be repeated)")
|
||||
fs.Var(
|
||||
&gzflag.MultiFlag{Values: &gc.goGrpcCompilers, IsSet: &gc.goGrpcCompilersSet},
|
||||
"go_grpc_compiler",
|
||||
"go_proto_library compiler to use for gRPC (may be repeated)")
|
||||
fs.BoolVar(
|
||||
&gc.goRepositoryMode,
|
||||
"go_repository_mode",
|
||||
false,
|
||||
"set when gazelle is invoked by go_repository")
|
||||
fs.BoolVar(
|
||||
&gc.moduleMode,
|
||||
"go_repository_module_mode",
|
||||
false,
|
||||
"set when gazelle is invoked by go_repository in module mode")
|
||||
|
||||
case "update-repos":
|
||||
fs.Var(&gzflag.AllowedStringFlag{Value: &gc.buildExternalAttr, Allowed: validBuildExternalAttr},
|
||||
"build_external",
|
||||
"Sets the build_external attribute for the generated go_repository rule(s).")
|
||||
fs.StringVar(&gc.buildExtraArgsAttr,
|
||||
"build_extra_args",
|
||||
"",
|
||||
"Sets the build_extra_args attribute for the generated go_repository rule(s).")
|
||||
fs.Var(&gzflag.AllowedStringFlag{Value: &gc.buildFileGenerationAttr, Allowed: validBuildFileGenerationAttr},
|
||||
"build_file_generation",
|
||||
"Sets the build_file_generation attribute for the generated go_repository rule(s).")
|
||||
fs.StringVar(&gc.buildFileNamesAttr,
|
||||
"build_file_names",
|
||||
"",
|
||||
"Sets the build_file_name attribute for the generated go_repository rule(s).")
|
||||
fs.Var(&gzflag.AllowedStringFlag{Value: &gc.buildFileProtoModeAttr, Allowed: validBuildFileProtoModeAttr},
|
||||
"build_file_proto_mode",
|
||||
"Sets the build_file_proto_mode attribute for the generated go_repository rule(s).")
|
||||
fs.StringVar(&gc.buildTagsAttr,
|
||||
"build_tags",
|
||||
"",
|
||||
"Sets the build_tags attribute for the generated go_repository rule(s).")
|
||||
}
|
||||
c.Exts[goName] = gc
|
||||
}
|
||||
|
||||
func (*goLang) CheckFlags(fs *flag.FlagSet, c *config.Config) error {
|
||||
// The base of the -go_prefix flag may be used to generate proto_library
|
||||
// rule names when there are no .proto sources (empty rules to be deleted)
|
||||
// or when the package name can't be determined.
|
||||
// TODO(jayconrod): deprecate and remove this behavior.
|
||||
gc := getGoConfig(c)
|
||||
if pc := proto.GetProtoConfig(c); pc != nil {
|
||||
pc.GoPrefix = gc.prefix
|
||||
}
|
||||
|
||||
// List modules that may refer to internal packages in this module.
|
||||
for _, r := range c.Repos {
|
||||
if r.Kind() != "go_repository" {
|
||||
continue
|
||||
}
|
||||
modulePath := r.AttrString("importpath")
|
||||
if !strings.HasPrefix(modulePath, gc.prefix+"/") {
|
||||
continue
|
||||
}
|
||||
m := moduleRepo{
|
||||
repoName: r.Name(),
|
||||
modulePath: modulePath,
|
||||
}
|
||||
gc.submodules = append(gc.submodules, m)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*goLang) Configure(c *config.Config, rel string, f *rule.File) {
|
||||
var gc *goConfig
|
||||
if raw, ok := c.Exts[goName]; !ok {
|
||||
gc = newGoConfig()
|
||||
} else {
|
||||
gc = raw.(*goConfig).clone()
|
||||
}
|
||||
c.Exts[goName] = gc
|
||||
|
||||
if !gc.moduleMode {
|
||||
st, err := os.Stat(filepath.Join(c.RepoRoot, filepath.FromSlash(rel), "go.mod"))
|
||||
if err == nil && !st.IsDir() {
|
||||
gc.moduleMode = true
|
||||
}
|
||||
}
|
||||
|
||||
if path.Base(rel) == "vendor" {
|
||||
gc.importMapPrefix = inferImportPath(gc, rel)
|
||||
gc.importMapPrefixRel = rel
|
||||
gc.prefix = ""
|
||||
gc.prefixRel = rel
|
||||
}
|
||||
|
||||
if f != nil {
|
||||
setPrefix := func(prefix string) {
|
||||
if err := checkPrefix(prefix); err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
gc.prefix = prefix
|
||||
gc.prefixSet = true
|
||||
gc.prefixRel = rel
|
||||
}
|
||||
for _, d := range f.Directives {
|
||||
switch d.Key {
|
||||
case "build_tags":
|
||||
if err := gc.setBuildTags(d.Value); err != nil {
|
||||
log.Print(err)
|
||||
continue
|
||||
}
|
||||
gc.preprocessTags()
|
||||
gc.setBuildTags(d.Value)
|
||||
|
||||
case "go_grpc_compilers":
|
||||
// Special syntax (empty value) to reset directive.
|
||||
if d.Value == "" {
|
||||
gc.goGrpcCompilersSet = false
|
||||
gc.goGrpcCompilers = defaultGoGrpcCompilers
|
||||
} else {
|
||||
gc.goGrpcCompilersSet = true
|
||||
gc.goGrpcCompilers = splitValue(d.Value)
|
||||
}
|
||||
|
||||
case "go_proto_compilers":
|
||||
// Special syntax (empty value) to reset directive.
|
||||
if d.Value == "" {
|
||||
gc.goProtoCompilersSet = false
|
||||
gc.goProtoCompilers = defaultGoProtoCompilers
|
||||
} else {
|
||||
gc.goProtoCompilersSet = true
|
||||
gc.goProtoCompilers = splitValue(d.Value)
|
||||
}
|
||||
|
||||
case "go_visibility":
|
||||
gc.goVisibility = append(gc.goVisibility, strings.TrimSpace(d.Value))
|
||||
|
||||
case "importmap_prefix":
|
||||
gc.importMapPrefix = d.Value
|
||||
gc.importMapPrefixRel = rel
|
||||
|
||||
case "prefix":
|
||||
setPrefix(d.Value)
|
||||
}
|
||||
}
|
||||
if !gc.prefixSet {
|
||||
for _, r := range f.Rules {
|
||||
switch r.Kind() {
|
||||
case "go_prefix":
|
||||
args := r.Args()
|
||||
if len(args) != 1 {
|
||||
continue
|
||||
}
|
||||
s, ok := args[0].(*bzl.StringExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
setPrefix(s.Value)
|
||||
|
||||
case "gazelle":
|
||||
if prefix := r.AttrString("prefix"); prefix != "" {
|
||||
setPrefix(prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkPrefix checks that a string may be used as a prefix. We forbid local
|
||||
// (relative) imports and those beginning with "/". We allow the empty string,
|
||||
// but generated rules must not have an empty importpath.
|
||||
func checkPrefix(prefix string) error {
|
||||
if strings.HasPrefix(prefix, "/") || build.IsLocalImport(prefix) {
|
||||
return fmt.Errorf("invalid prefix: %q", prefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitDirective splits a comma-separated directive value into its component
|
||||
// parts, trimming each of any whitespace characters.
|
||||
func splitValue(value string) []string {
|
||||
parts := strings.Split(value, ",")
|
||||
values := make([]string, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
values = append(values, strings.TrimSpace(part))
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
const (
|
||||
// defaultLibName is the name of the default go_library rule in a Go
|
||||
// package directory. This name was originally chosen so that rules_go
|
||||
// could translate between Bazel labels and Go import paths using go_prefix.
|
||||
// It is no longer needed since go_prefix was deleted.
|
||||
defaultLibName = "go_default_library"
|
||||
|
||||
// defaultTestName is a name of an internal test corresponding to
|
||||
// defaultLibName. It does not need to be consistent to something but it
|
||||
// just needs to be unique in the Bazel package
|
||||
defaultTestName = "go_default_test"
|
||||
|
||||
// legacyProtoFilegroupName is the anme of a filegroup created in legacy
|
||||
// mode for libraries that contained .pb.go files and .proto files.
|
||||
legacyProtoFilegroupName = "go_default_library_protos"
|
||||
|
||||
// grpcCompilerLabel is the label for the gRPC compiler plugin, used in the
|
||||
// "compilers" attribute of go_proto_library rules.
|
||||
grpcCompilerLabel = "@io_bazel_rules_go//proto:go_grpc"
|
||||
|
||||
// wellKnownTypesGoPrefix is the import path for the Go repository containing
|
||||
// pre-generated code for the Well Known Types.
|
||||
wellKnownTypesGoPrefix = "github.com/golang/protobuf"
|
||||
|
||||
// wellKnownTypesPkg is the package name for the predefined WKTs in rules_go.
|
||||
wellKnownTypesPkg = "proto/wkt"
|
||||
)
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_context", "go_rule")
|
||||
|
||||
def _std_package_list_impl(ctx):
|
||||
go = go_context(ctx)
|
||||
args = ctx.actions.args()
|
||||
args.add_all([go.package_list, ctx.outputs.out])
|
||||
ctx.actions.run(
|
||||
inputs = [go.package_list],
|
||||
outputs = [ctx.outputs.out],
|
||||
executable = ctx.executable._gen_std_package_list,
|
||||
arguments = [args],
|
||||
mnemonic = "GoStdPackageList",
|
||||
)
|
||||
return [DefaultInfo(files = depset([ctx.outputs.out]))]
|
||||
|
||||
std_package_list = go_rule(
|
||||
_std_package_list_impl,
|
||||
attrs = {
|
||||
"out": attr.output(mandatory = True),
|
||||
"_gen_std_package_list": attr.label(
|
||||
default = "//language/go/gen_std_package_list",
|
||||
cfg = "host",
|
||||
executable = True,
|
||||
),
|
||||
},
|
||||
)
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
toml "github.com/pelletier/go-toml"
|
||||
)
|
||||
|
||||
type depLockFile struct {
|
||||
Projects []depProject `toml:"projects"`
|
||||
}
|
||||
|
||||
type depProject struct {
|
||||
Name string `toml:"name"`
|
||||
Revision string `toml:"revision"`
|
||||
Source string `toml:"source"`
|
||||
}
|
||||
|
||||
func importReposFromDep(args language.ImportReposArgs) language.ImportReposResult {
|
||||
data, err := ioutil.ReadFile(args.Path)
|
||||
if err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
var file depLockFile
|
||||
if err := toml.Unmarshal(data, &file); err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
|
||||
gen := make([]*rule.Rule, len(file.Projects))
|
||||
for i, p := range file.Projects {
|
||||
gen[i] = rule.NewRule("go_repository", label.ImportPathToBazelRepoName(p.Name))
|
||||
gen[i].SetAttr("importpath", p.Name)
|
||||
gen[i].SetAttr("commit", p.Revision)
|
||||
if p.Source != "" {
|
||||
// TODO(#411): Handle source directives correctly. It may be an import
|
||||
// path, or a URL. In the case of an import path, we should resolve it
|
||||
// to the correct remote and vcs. In the case of a URL, we should
|
||||
// correctly determine what VCS to use (the URL will usually start
|
||||
// with "https://", which is used by multiple VCSs).
|
||||
gen[i].SetAttr("remote", p.Source)
|
||||
gen[i].SetAttr("vcs", "git")
|
||||
}
|
||||
}
|
||||
sort.SliceStable(gen, func(i, j int) bool {
|
||||
return gen[i].Name() < gen[j].Name()
|
||||
})
|
||||
|
||||
return language.ImportReposResult{Gen: gen}
|
||||
}
|
||||
|
|
@ -1,695 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// fileInfo holds information used to decide how to build a file. This
|
||||
// information comes from the file's name, from package and import declarations
|
||||
// (in .go files), and from +build and cgo comments.
|
||||
type fileInfo struct {
|
||||
path string
|
||||
name string
|
||||
|
||||
// ext is the type of file, based on extension.
|
||||
ext ext
|
||||
|
||||
// packageName is the Go package name of a .go file, without the
|
||||
// "_test" suffix if it was present. It is empty for non-Go files.
|
||||
packageName string
|
||||
|
||||
// importPath is the canonical import path for this file's package.
|
||||
// This may be read from a package comment (in Go) or a go_package
|
||||
// option (in proto). This field is empty for files that don't specify
|
||||
// an import path.
|
||||
importPath string
|
||||
|
||||
// isTest is true if the file stem (the part before the extension)
|
||||
// ends with "_test.go". This is never true for non-Go files.
|
||||
isTest bool
|
||||
|
||||
// imports is a list of packages imported by a file. It does not include
|
||||
// "C" or anything from the standard library.
|
||||
imports []string
|
||||
|
||||
// isCgo is true for .go files that import "C".
|
||||
isCgo bool
|
||||
|
||||
// goos and goarch contain the OS and architecture suffixes in the filename,
|
||||
// if they were present.
|
||||
goos, goarch string
|
||||
|
||||
// tags is a list of build tag lines. Each entry is the trimmed text of
|
||||
// a line after a "+build" prefix.
|
||||
tags []tagLine
|
||||
|
||||
// copts and clinkopts contain flags that are part of CFLAGS, CPPFLAGS,
|
||||
// CXXFLAGS, and LDFLAGS directives in cgo comments.
|
||||
copts, clinkopts []taggedOpts
|
||||
|
||||
// hasServices indicates whether a .proto file has service definitions.
|
||||
hasServices bool
|
||||
}
|
||||
|
||||
// tagLine represents the space-separated disjunction of build tag groups
|
||||
// in a line comment.
|
||||
type tagLine []tagGroup
|
||||
|
||||
// check returns true if at least one of the tag groups is satisfied.
|
||||
func (l tagLine) check(c *config.Config, os, arch string) bool {
|
||||
if len(l) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, g := range l {
|
||||
if g.check(c, os, arch) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// tagGroup represents a comma-separated conjuction of build tags.
|
||||
type tagGroup []string
|
||||
|
||||
// check returns true if all of the tags are true. Tags that start with
|
||||
// "!" are negated (but "!!") is not allowed. Go release tags (e.g., "go1.8")
|
||||
// are ignored. If the group contains an os or arch tag, but the os or arch
|
||||
// parameters are empty, check returns false even if the tag is negated.
|
||||
func (g tagGroup) check(c *config.Config, os, arch string) bool {
|
||||
goConf := getGoConfig(c)
|
||||
for _, t := range g {
|
||||
if strings.HasPrefix(t, "!!") { // bad syntax, reject always
|
||||
return false
|
||||
}
|
||||
not := strings.HasPrefix(t, "!")
|
||||
if not {
|
||||
t = t[1:]
|
||||
}
|
||||
if isIgnoredTag(t) {
|
||||
// Release tags are treated as "unknown" and are considered true,
|
||||
// whether or not they are negated.
|
||||
continue
|
||||
}
|
||||
var match bool
|
||||
if _, ok := rule.KnownOSSet[t]; ok {
|
||||
if os == "" {
|
||||
return false
|
||||
}
|
||||
match = matchesOS(os, t)
|
||||
} else if _, ok := rule.KnownArchSet[t]; ok {
|
||||
if arch == "" {
|
||||
return false
|
||||
}
|
||||
match = arch == t
|
||||
} else {
|
||||
match = goConf.genericTags[t]
|
||||
}
|
||||
if not {
|
||||
match = !match
|
||||
}
|
||||
if !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// taggedOpts a list of compile or link options which should only be applied
|
||||
// if the given set of build tags are satisfied. These options have already
|
||||
// been tokenized using the same algorithm that "go build" uses, then joined
|
||||
// with OptSeparator.
|
||||
type taggedOpts struct {
|
||||
tags tagLine
|
||||
opts string
|
||||
}
|
||||
|
||||
// optSeparator is a special character inserted between options that appeared
|
||||
// together in a #cgo directive. This allows options to be split, modified,
|
||||
// and escaped by other packages.
|
||||
//
|
||||
// It's important to keep options grouped together in the same string. For
|
||||
// example, if we have "-framework IOKit" together in a #cgo directive,
|
||||
// "-framework" shouldn't be treated as a separate string for the purposes of
|
||||
// sorting and de-duplicating.
|
||||
const optSeparator = "\x1D"
|
||||
|
||||
// ext indicates how a file should be treated, based on extension.
|
||||
type ext int
|
||||
|
||||
const (
|
||||
// unknownExt is applied files that aren't buildable with Go.
|
||||
unknownExt ext = iota
|
||||
|
||||
// goExt is applied to .go files.
|
||||
goExt
|
||||
|
||||
// cExt is applied to C and C++ files.
|
||||
cExt
|
||||
|
||||
// hExt is applied to header files. If cgo code is present, these may be
|
||||
// C or C++ headers. If not, they are treated as Go assembly headers.
|
||||
hExt
|
||||
|
||||
// sExt is applied to Go assembly files, ending with .s.
|
||||
sExt
|
||||
|
||||
// csExt is applied to other assembly files, ending with .S. These are built
|
||||
// with the C compiler if cgo code is present.
|
||||
csExt
|
||||
|
||||
// protoExt is applied to .proto files.
|
||||
protoExt
|
||||
)
|
||||
|
||||
// fileNameInfo returns information that can be inferred from the name of
|
||||
// a file. It does not read data from the file.
|
||||
func fileNameInfo(path_ string) fileInfo {
|
||||
name := filepath.Base(path_)
|
||||
var ext ext
|
||||
switch path.Ext(name) {
|
||||
case ".go":
|
||||
ext = goExt
|
||||
case ".c", ".cc", ".cpp", ".cxx", ".m", ".mm":
|
||||
ext = cExt
|
||||
case ".h", ".hh", ".hpp", ".hxx":
|
||||
ext = hExt
|
||||
case ".s":
|
||||
ext = sExt
|
||||
case ".S":
|
||||
ext = csExt
|
||||
case ".proto":
|
||||
ext = protoExt
|
||||
default:
|
||||
ext = unknownExt
|
||||
}
|
||||
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") {
|
||||
ext = unknownExt
|
||||
}
|
||||
|
||||
// Determine test, goos, and goarch. This is intended to match the logic
|
||||
// in goodOSArchFile in go/build.
|
||||
var isTest bool
|
||||
var goos, goarch string
|
||||
l := strings.Split(name[:len(name)-len(path.Ext(name))], "_")
|
||||
if len(l) >= 2 && l[len(l)-1] == "test" {
|
||||
isTest = ext == goExt
|
||||
l = l[:len(l)-1]
|
||||
}
|
||||
switch {
|
||||
case len(l) >= 3 && rule.KnownOSSet[l[len(l)-2]] && rule.KnownArchSet[l[len(l)-1]]:
|
||||
goos = l[len(l)-2]
|
||||
goarch = l[len(l)-1]
|
||||
case len(l) >= 2 && rule.KnownOSSet[l[len(l)-1]]:
|
||||
goos = l[len(l)-1]
|
||||
case len(l) >= 2 && rule.KnownArchSet[l[len(l)-1]]:
|
||||
goarch = l[len(l)-1]
|
||||
}
|
||||
|
||||
return fileInfo{
|
||||
path: path_,
|
||||
name: name,
|
||||
ext: ext,
|
||||
isTest: isTest,
|
||||
goos: goos,
|
||||
goarch: goarch,
|
||||
}
|
||||
}
|
||||
|
||||
// otherFileInfo returns information about a non-.go file. It will parse
|
||||
// part of the file to determine build tags. If the file can't be read, an
|
||||
// error will be logged, and partial information will be returned.
|
||||
func otherFileInfo(path string) fileInfo {
|
||||
info := fileNameInfo(path)
|
||||
if info.ext == unknownExt {
|
||||
return info
|
||||
}
|
||||
|
||||
tags, err := readTags(info.path)
|
||||
if err != nil {
|
||||
log.Printf("%s: error reading file: %v", info.path, err)
|
||||
return info
|
||||
}
|
||||
info.tags = tags
|
||||
return info
|
||||
}
|
||||
|
||||
// goFileInfo returns information about a .go file. It will parse part of the
|
||||
// file to determine the package name, imports, and build constraints.
|
||||
// If the file can't be read, an error will be logged, and partial information
|
||||
// will be returned.
|
||||
// This function is intended to match go/build.Context.Import.
|
||||
// TODD(#53): extract canonical import path
|
||||
func goFileInfo(path, rel string) fileInfo {
|
||||
info := fileNameInfo(path)
|
||||
fset := token.NewFileSet()
|
||||
pf, err := parser.ParseFile(fset, info.path, nil, parser.ImportsOnly|parser.ParseComments)
|
||||
if err != nil {
|
||||
log.Printf("%s: error reading go file: %v", info.path, err)
|
||||
return info
|
||||
}
|
||||
|
||||
info.packageName = pf.Name.Name
|
||||
if info.isTest && strings.HasSuffix(info.packageName, "_test") {
|
||||
info.packageName = info.packageName[:len(info.packageName)-len("_test")]
|
||||
}
|
||||
|
||||
for _, decl := range pf.Decls {
|
||||
d, ok := decl.(*ast.GenDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, dspec := range d.Specs {
|
||||
spec, ok := dspec.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
quoted := spec.Path.Value
|
||||
path, err := strconv.Unquote(quoted)
|
||||
if err != nil {
|
||||
log.Printf("%s: error reading go file: %v", info.path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if path == "C" {
|
||||
if info.isTest {
|
||||
log.Printf("%s: warning: use of cgo in test not supported", info.path)
|
||||
}
|
||||
info.isCgo = true
|
||||
cg := spec.Doc
|
||||
if cg == nil && len(d.Specs) == 1 {
|
||||
cg = d.Doc
|
||||
}
|
||||
if cg != nil {
|
||||
if err := saveCgo(&info, rel, cg); err != nil {
|
||||
log.Printf("%s: error reading go file: %v", info.path, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
info.imports = append(info.imports, path)
|
||||
}
|
||||
}
|
||||
|
||||
tags, err := readTags(info.path)
|
||||
if err != nil {
|
||||
log.Printf("%s: error reading go file: %v", info.path, err)
|
||||
return info
|
||||
}
|
||||
info.tags = tags
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
// saveCgo extracts CFLAGS, CPPFLAGS, CXXFLAGS, and LDFLAGS directives
|
||||
// from a comment above a "C" import. This is intended to match logic in
|
||||
// go/build.Context.saveCgo.
|
||||
func saveCgo(info *fileInfo, rel string, cg *ast.CommentGroup) error {
|
||||
text := cg.Text()
|
||||
for _, line := range strings.Split(text, "\n") {
|
||||
orig := line
|
||||
|
||||
// Line is
|
||||
// #cgo [GOOS/GOARCH...] LDFLAGS: stuff
|
||||
//
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
|
||||
continue
|
||||
}
|
||||
|
||||
// Split at colon.
|
||||
line = strings.TrimSpace(line[4:])
|
||||
i := strings.Index(line, ":")
|
||||
if i < 0 {
|
||||
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
|
||||
}
|
||||
line, optstr := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
|
||||
|
||||
// Parse tags and verb.
|
||||
f := strings.Fields(line)
|
||||
if len(f) < 1 {
|
||||
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
|
||||
}
|
||||
verb := f[len(f)-1]
|
||||
tags := parseTagsInGroups(f[:len(f)-1])
|
||||
|
||||
// Parse options.
|
||||
opts, err := splitQuoted(optstr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: invalid #cgo line: %s", info.path, orig)
|
||||
}
|
||||
var ok bool
|
||||
for i, opt := range opts {
|
||||
if opt, ok = expandSrcDir(opt, rel); !ok {
|
||||
return fmt.Errorf("%s: malformed #cgo argument: %s", info.path, orig)
|
||||
}
|
||||
opts[i] = opt
|
||||
}
|
||||
joinedStr := strings.Join(opts, optSeparator)
|
||||
|
||||
// Add tags to appropriate list.
|
||||
switch verb {
|
||||
case "CFLAGS", "CPPFLAGS", "CXXFLAGS":
|
||||
info.copts = append(info.copts, taggedOpts{tags, joinedStr})
|
||||
case "LDFLAGS":
|
||||
info.clinkopts = append(info.clinkopts, taggedOpts{tags, joinedStr})
|
||||
case "pkg-config":
|
||||
return fmt.Errorf("%s: pkg-config not supported: %s", info.path, orig)
|
||||
default:
|
||||
return fmt.Errorf("%s: invalid #cgo verb: %s", info.path, orig)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitQuoted splits the string s around each instance of one or more consecutive
|
||||
// white space characters while taking into account quotes and escaping, and
|
||||
// returns an array of substrings of s or an empty list if s contains only white space.
|
||||
// Single quotes and double quotes are recognized to prevent splitting within the
|
||||
// quoted region, and are removed from the resulting substrings. If a quote in s
|
||||
// isn't closed err will be set and r will have the unclosed argument as the
|
||||
// last element. The backslash is used for escaping.
|
||||
//
|
||||
// For example, the following string:
|
||||
//
|
||||
// a b:"c d" 'e''f' "g\""
|
||||
//
|
||||
// Would be parsed as:
|
||||
//
|
||||
// []string{"a", "b:c d", "ef", `g"`}
|
||||
//
|
||||
// Copied from go/build.splitQuoted
|
||||
func splitQuoted(s string) (r []string, err error) {
|
||||
var args []string
|
||||
arg := make([]rune, len(s))
|
||||
escaped := false
|
||||
quoted := false
|
||||
quote := '\x00'
|
||||
i := 0
|
||||
for _, rune := range s {
|
||||
switch {
|
||||
case escaped:
|
||||
escaped = false
|
||||
case rune == '\\':
|
||||
escaped = true
|
||||
continue
|
||||
case quote != '\x00':
|
||||
if rune == quote {
|
||||
quote = '\x00'
|
||||
continue
|
||||
}
|
||||
case rune == '"' || rune == '\'':
|
||||
quoted = true
|
||||
quote = rune
|
||||
continue
|
||||
case unicode.IsSpace(rune):
|
||||
if quoted || i > 0 {
|
||||
quoted = false
|
||||
args = append(args, string(arg[:i]))
|
||||
i = 0
|
||||
}
|
||||
continue
|
||||
}
|
||||
arg[i] = rune
|
||||
i++
|
||||
}
|
||||
if quoted || i > 0 {
|
||||
args = append(args, string(arg[:i]))
|
||||
}
|
||||
if quote != 0 {
|
||||
err = errors.New("unclosed quote")
|
||||
} else if escaped {
|
||||
err = errors.New("unfinished escaping")
|
||||
}
|
||||
return args, err
|
||||
}
|
||||
|
||||
// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
|
||||
// the result is safe for the shell.
|
||||
//
|
||||
// Copied from go/build.expandSrcDir
|
||||
func expandSrcDir(str string, srcdir string) (string, bool) {
|
||||
// "\" delimited paths cause safeCgoName to fail
|
||||
// so convert native paths with a different delimiter
|
||||
// to "/" before starting (eg: on windows).
|
||||
srcdir = filepath.ToSlash(srcdir)
|
||||
if srcdir == "" {
|
||||
srcdir = "."
|
||||
}
|
||||
|
||||
// Spaces are tolerated in ${SRCDIR}, but not anywhere else.
|
||||
chunks := strings.Split(str, "${SRCDIR}")
|
||||
if len(chunks) < 2 {
|
||||
return str, safeCgoName(str, false)
|
||||
}
|
||||
ok := true
|
||||
for _, chunk := range chunks {
|
||||
ok = ok && (chunk == "" || safeCgoName(chunk, false))
|
||||
}
|
||||
ok = ok && (srcdir == "" || safeCgoName(srcdir, true))
|
||||
res := strings.Join(chunks, srcdir)
|
||||
return res, ok && res != ""
|
||||
}
|
||||
|
||||
// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
|
||||
// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
|
||||
// See golang.org/issue/6038.
|
||||
// The @ is for OS X. See golang.org/issue/13720.
|
||||
// The % is for Jenkins. See golang.org/issue/16959.
|
||||
const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%"
|
||||
const safeSpaces = " "
|
||||
|
||||
var safeBytes = []byte(safeSpaces + safeString)
|
||||
|
||||
// Copied from go/build.safeCgoName
|
||||
func safeCgoName(s string, spaces bool) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
safe := safeBytes
|
||||
if !spaces {
|
||||
safe = safe[len(safeSpaces):]
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if c := s[i]; c < utf8.RuneSelf && bytes.IndexByte(safe, c) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// readTags reads and extracts build tags from the block of comments
|
||||
// and blank lines at the start of a file which is separated from the
|
||||
// rest of the file by a blank line. Each string in the returned slice
|
||||
// is the trimmed text of a line after a "+build" prefix.
|
||||
// Based on go/build.Context.shouldBuild.
|
||||
func readTags(path string) ([]tagLine, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
|
||||
// Pass 1: Identify leading run of // comments and blank lines,
|
||||
// which must be followed by a blank line.
|
||||
var lines []string
|
||||
end := 0
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
end = len(lines)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "//") {
|
||||
lines = append(lines, line[len("//"):])
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lines = lines[:end]
|
||||
|
||||
// Pass 2: Process each line in the run.
|
||||
var tagLines []tagLine
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) > 0 && fields[0] == "+build" {
|
||||
tagLines = append(tagLines, parseTagsInGroups(fields[1:]))
|
||||
}
|
||||
}
|
||||
return tagLines, nil
|
||||
}
|
||||
|
||||
func parseTagsInGroups(groups []string) tagLine {
|
||||
var l tagLine
|
||||
for _, g := range groups {
|
||||
l = append(l, tagGroup(strings.Split(g, ",")))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func isOSArchSpecific(info fileInfo, cgoTags tagLine) (osSpecific, archSpecific bool) {
|
||||
if info.goos != "" {
|
||||
osSpecific = true
|
||||
}
|
||||
if info.goarch != "" {
|
||||
archSpecific = true
|
||||
}
|
||||
lines := info.tags
|
||||
if len(cgoTags) > 0 {
|
||||
lines = append(lines, cgoTags)
|
||||
}
|
||||
for _, line := range lines {
|
||||
for _, group := range line {
|
||||
for _, tag := range group {
|
||||
if strings.HasPrefix(tag, "!") {
|
||||
tag = tag[1:]
|
||||
}
|
||||
_, osOk := rule.KnownOSSet[tag]
|
||||
if osOk {
|
||||
osSpecific = true
|
||||
}
|
||||
_, archOk := rule.KnownArchSet[tag]
|
||||
if archOk {
|
||||
archSpecific = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return osSpecific, archSpecific
|
||||
}
|
||||
|
||||
// matchesOS checks if a value is equal to either an OS value or to any of its
|
||||
// aliases.
|
||||
func matchesOS(os, value string) bool {
|
||||
if os == value {
|
||||
return true
|
||||
}
|
||||
for _, alias := range rule.OSAliases[os] {
|
||||
if alias == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkConstraints determines whether build constraints are satisfied on
|
||||
// a given platform.
|
||||
//
|
||||
// The first few arguments describe the platform. genericTags is the set
|
||||
// of build tags that are true on all platforms. os and arch are the platform
|
||||
// GOOS and GOARCH strings. If os or arch is empty, checkConstraints will
|
||||
// return false in the presence of OS and architecture constraints, even
|
||||
// if they are negated.
|
||||
//
|
||||
// The remaining arguments describe the file being tested. All of these may
|
||||
// be empty or nil. osSuffix and archSuffix are filename suffixes. fileTags
|
||||
// is a list tags from +build comments found near the top of the file. cgoTags
|
||||
// is an extra set of tags in a #cgo directive.
|
||||
func checkConstraints(c *config.Config, os, arch, osSuffix, archSuffix string, fileTags []tagLine, cgoTags tagLine) bool {
|
||||
if osSuffix != "" && !matchesOS(os, osSuffix) || archSuffix != "" && archSuffix != arch {
|
||||
return false
|
||||
}
|
||||
for _, l := range fileTags {
|
||||
if !l.check(c, os, arch) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if len(cgoTags) > 0 && !cgoTags.check(c, os, arch) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isIgnoredTag returns whether the tag is "cgo" or is a release tag.
|
||||
// Release tags match the pattern "go[0-9]\.[0-9]+".
|
||||
// Gazelle won't consider whether an ignored tag is satisfied when evaluating
|
||||
// build constraints for a file.
|
||||
func isIgnoredTag(tag string) bool {
|
||||
if tag == "cgo" || tag == "race" || tag == "msan" {
|
||||
return true
|
||||
}
|
||||
if len(tag) < 5 || !strings.HasPrefix(tag, "go") {
|
||||
return false
|
||||
}
|
||||
if tag[2] < '0' || tag[2] > '9' || tag[3] != '.' {
|
||||
return false
|
||||
}
|
||||
for _, c := range tag[4:] {
|
||||
if c < '0' || c > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// protoFileInfo extracts metadata from a proto file. The proto extension
|
||||
// already "parses" these and stores metadata in proto.FileInfo, so this is
|
||||
// just processing relevant options.
|
||||
func protoFileInfo(path_ string, protoInfo proto.FileInfo) fileInfo {
|
||||
info := fileNameInfo(path_)
|
||||
|
||||
// Look for "option go_package". If there's no / in the package option, then
|
||||
// it's just a simple package name, not a full import path.
|
||||
for _, opt := range protoInfo.Options {
|
||||
if opt.Key != "go_package" {
|
||||
continue
|
||||
}
|
||||
if strings.LastIndexByte(opt.Value, '/') == -1 {
|
||||
info.packageName = opt.Value
|
||||
} else {
|
||||
if i := strings.LastIndexByte(opt.Value, ';'); i != -1 {
|
||||
info.importPath = opt.Value[:i]
|
||||
info.packageName = opt.Value[i+1:]
|
||||
} else {
|
||||
info.importPath = opt.Value
|
||||
info.packageName = path.Base(opt.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the Go package name from the proto package name if there was no
|
||||
// option go_package.
|
||||
if info.packageName == "" && protoInfo.PackageName != "" {
|
||||
info.packageName = strings.Replace(protoInfo.PackageName, ".", "_", -1)
|
||||
}
|
||||
|
||||
info.imports = protoInfo.Imports
|
||||
info.hasServices = protoInfo.HasServices
|
||||
return info
|
||||
}
|
||||
|
|
@ -1,253 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
func (_ *goLang) Fix(c *config.Config, f *rule.File) {
|
||||
migrateLibraryEmbed(c, f)
|
||||
migrateGrpcCompilers(c, f)
|
||||
flattenSrcs(c, f)
|
||||
squashCgoLibrary(c, f)
|
||||
squashXtest(c, f)
|
||||
removeLegacyProto(c, f)
|
||||
removeLegacyGazelle(c, f)
|
||||
}
|
||||
|
||||
// migrateLibraryEmbed converts "library" attributes to "embed" attributes,
|
||||
// preserving comments. This only applies to Go rules, and only if there is
|
||||
// no keep comment on "library" and no existing "embed" attribute.
|
||||
func migrateLibraryEmbed(c *config.Config, f *rule.File) {
|
||||
for _, r := range f.Rules {
|
||||
if !isGoRule(r.Kind()) {
|
||||
continue
|
||||
}
|
||||
libExpr := r.Attr("library")
|
||||
if libExpr == nil || rule.ShouldKeep(libExpr) || r.Attr("embed") != nil {
|
||||
continue
|
||||
}
|
||||
r.DelAttr("library")
|
||||
r.SetAttr("embed", &bzl.ListExpr{List: []bzl.Expr{libExpr}})
|
||||
}
|
||||
}
|
||||
|
||||
// migrateGrpcCompilers converts "go_grpc_library" rules into "go_proto_library"
|
||||
// rules with a "compilers" attribute.
|
||||
func migrateGrpcCompilers(c *config.Config, f *rule.File) {
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() != "go_grpc_library" || r.ShouldKeep() || r.Attr("compilers") != nil {
|
||||
continue
|
||||
}
|
||||
r.SetKind("go_proto_library")
|
||||
r.SetAttr("compilers", []string{grpcCompilerLabel})
|
||||
}
|
||||
}
|
||||
|
||||
// squashCgoLibrary removes cgo_library rules with the default name and
|
||||
// merges their attributes with go_library with the default name. If no
|
||||
// go_library rule exists, a new one will be created.
|
||||
//
|
||||
// Note that the library attribute is disregarded, so cgo_library and
|
||||
// go_library attributes will be squashed even if the cgo_library was unlinked.
|
||||
// MergeFile will remove unused values and attributes later.
|
||||
func squashCgoLibrary(c *config.Config, f *rule.File) {
|
||||
// Find the default cgo_library and go_library rules.
|
||||
var cgoLibrary, goLibrary *rule.Rule
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() == "cgo_library" && r.Name() == "cgo_default_library" && !r.ShouldKeep() {
|
||||
if cgoLibrary != nil {
|
||||
log.Printf("%s: when fixing existing file, multiple cgo_library rules with default name found", f.Path)
|
||||
continue
|
||||
}
|
||||
cgoLibrary = r
|
||||
continue
|
||||
}
|
||||
if r.Kind() == "go_library" && r.Name() == defaultLibName {
|
||||
if goLibrary != nil {
|
||||
log.Printf("%s: when fixing existing file, multiple go_library rules with default name referencing cgo_library found", f.Path)
|
||||
}
|
||||
goLibrary = r
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if cgoLibrary == nil {
|
||||
return
|
||||
}
|
||||
if !c.ShouldFix {
|
||||
log.Printf("%s: cgo_library is deprecated. Run 'gazelle fix' to squash with go_library.", f.Path)
|
||||
return
|
||||
}
|
||||
|
||||
if goLibrary == nil {
|
||||
cgoLibrary.SetKind("go_library")
|
||||
cgoLibrary.SetName(defaultLibName)
|
||||
cgoLibrary.SetAttr("cgo", true)
|
||||
return
|
||||
}
|
||||
|
||||
if err := rule.SquashRules(cgoLibrary, goLibrary, f.Path); err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
goLibrary.DelAttr("embed")
|
||||
goLibrary.SetAttr("cgo", true)
|
||||
cgoLibrary.Delete()
|
||||
}
|
||||
|
||||
// squashXtest removes go_test rules with the default external name and merges
|
||||
// their attributes with a go_test rule with the default internal name. If
|
||||
// no internal go_test rule exists, a new one will be created (effectively
|
||||
// renaming the old rule).
|
||||
func squashXtest(c *config.Config, f *rule.File) {
|
||||
// Search for internal and external tests.
|
||||
var itest, xtest *rule.Rule
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() != "go_test" {
|
||||
continue
|
||||
}
|
||||
if r.Name() == defaultTestName {
|
||||
itest = r
|
||||
} else if r.Name() == "go_default_xtest" {
|
||||
xtest = r
|
||||
}
|
||||
}
|
||||
|
||||
if xtest == nil || xtest.ShouldKeep() || (itest != nil && itest.ShouldKeep()) {
|
||||
return
|
||||
}
|
||||
if !c.ShouldFix {
|
||||
if itest == nil {
|
||||
log.Printf("%s: go_default_xtest is no longer necessary. Run 'gazelle fix' to rename to go_default_test.", f.Path)
|
||||
} else {
|
||||
log.Printf("%s: go_default_xtest is no longer necessary. Run 'gazelle fix' to squash with go_default_test.", f.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If there was no internal test, we can just rename the external test.
|
||||
if itest == nil {
|
||||
xtest.SetName(defaultTestName)
|
||||
return
|
||||
}
|
||||
|
||||
// Attempt to squash.
|
||||
if err := rule.SquashRules(xtest, itest, f.Path); err != nil {
|
||||
log.Print(err)
|
||||
return
|
||||
}
|
||||
xtest.Delete()
|
||||
}
|
||||
|
||||
// flattenSrcs transforms srcs attributes structured as concatenations of
|
||||
// lists and selects (generated from PlatformStrings; see
|
||||
// extractPlatformStringsExprs for matching details) into a sorted,
|
||||
// de-duplicated list. Comments are accumulated and de-duplicated across
|
||||
// duplicate expressions.
|
||||
func flattenSrcs(c *config.Config, f *rule.File) {
|
||||
for _, r := range f.Rules {
|
||||
if !isGoRule(r.Kind()) {
|
||||
continue
|
||||
}
|
||||
oldSrcs := r.Attr("srcs")
|
||||
if oldSrcs == nil {
|
||||
continue
|
||||
}
|
||||
flatSrcs := rule.FlattenExpr(oldSrcs)
|
||||
if flatSrcs != oldSrcs {
|
||||
r.SetAttr("srcs", flatSrcs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeLegacyProto removes uses of the old proto rules. It deletes loads
|
||||
// from go_proto_library.bzl. It deletes proto filegroups. It removes
|
||||
// go_proto_library attributes which are no longer recognized. New rules
|
||||
// are generated in place of the deleted rules, but attributes and comments
|
||||
// are not migrated.
|
||||
func removeLegacyProto(c *config.Config, f *rule.File) {
|
||||
// Don't fix if the proto mode was set to something other than the default.
|
||||
if pcMode := getProtoMode(c); pcMode != proto.DefaultMode {
|
||||
return
|
||||
}
|
||||
|
||||
// Scan for definitions to delete.
|
||||
var protoLoads []*rule.Load
|
||||
for _, l := range f.Loads {
|
||||
if l.Name() == "@io_bazel_rules_go//proto:go_proto_library.bzl" {
|
||||
protoLoads = append(protoLoads, l)
|
||||
}
|
||||
}
|
||||
var protoFilegroups, protoRules []*rule.Rule
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() == "filegroup" && r.Name() == legacyProtoFilegroupName {
|
||||
protoFilegroups = append(protoFilegroups, r)
|
||||
}
|
||||
if r.Kind() == "go_proto_library" {
|
||||
protoRules = append(protoRules, r)
|
||||
}
|
||||
}
|
||||
if len(protoLoads)+len(protoFilegroups) == 0 {
|
||||
return
|
||||
}
|
||||
if !c.ShouldFix {
|
||||
log.Printf("%s: go_proto_library.bzl is deprecated. Run 'gazelle fix' to replace old rules.", f.Path)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete legacy proto loads and filegroups. Only delete go_proto_library
|
||||
// rules if we deleted a load.
|
||||
for _, l := range protoLoads {
|
||||
l.Delete()
|
||||
}
|
||||
for _, r := range protoFilegroups {
|
||||
r.Delete()
|
||||
}
|
||||
if len(protoLoads) > 0 {
|
||||
for _, r := range protoRules {
|
||||
r.Delete()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeLegacyGazelle removes loads of the "gazelle" macro from
|
||||
// @io_bazel_rules_go//go:def.bzl. The definition has moved to
|
||||
// @bazel_gazelle//:def.bzl, and the old one will be deleted soon.
|
||||
func removeLegacyGazelle(c *config.Config, f *rule.File) {
|
||||
for _, l := range f.Loads {
|
||||
if l.Name() == "@io_bazel_rules_go//go:def.bzl" && l.Has("gazelle") {
|
||||
l.Remove("gazelle")
|
||||
if l.IsEmpty() {
|
||||
l.Delete()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isGoRule(kind string) bool {
|
||||
return kind == "go_library" ||
|
||||
kind == "go_binary" ||
|
||||
kind == "go_test" ||
|
||||
kind == "go_proto_library" ||
|
||||
kind == "go_grpc_library"
|
||||
}
|
||||
|
|
@ -1,638 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
"github.com/bazelbuild/bazel-gazelle/pathtools"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func (gl *goLang) GenerateRules(args language.GenerateArgs) language.GenerateResult {
|
||||
// Extract information about proto files. We need this to exclude .pb.go
|
||||
// files and generate go_proto_library rules.
|
||||
c := args.Config
|
||||
gc := getGoConfig(c)
|
||||
pcMode := getProtoMode(c)
|
||||
|
||||
// This is a collection of proto_library rule names that have a corresponding
|
||||
// go_proto_library rule already generated.
|
||||
goProtoRules := make(map[string]struct{})
|
||||
|
||||
var protoRuleNames []string
|
||||
protoPackages := make(map[string]proto.Package)
|
||||
protoFileInfo := make(map[string]proto.FileInfo)
|
||||
for _, r := range args.OtherGen {
|
||||
if r.Kind() == "go_proto_library" {
|
||||
if proto := r.AttrString("proto"); proto != "" {
|
||||
goProtoRules[proto] = struct{}{}
|
||||
}
|
||||
if protos := r.AttrStrings("protos"); protos != nil {
|
||||
for _, proto := range protos {
|
||||
goProtoRules[proto] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if r.Kind() != "proto_library" {
|
||||
continue
|
||||
}
|
||||
pkg := r.PrivateAttr(proto.PackageKey).(proto.Package)
|
||||
protoPackages[r.Name()] = pkg
|
||||
for name, info := range pkg.Files {
|
||||
protoFileInfo[name] = info
|
||||
}
|
||||
protoRuleNames = append(protoRuleNames, r.Name())
|
||||
}
|
||||
sort.Strings(protoRuleNames)
|
||||
var emptyProtoRuleNames []string
|
||||
for _, r := range args.OtherEmpty {
|
||||
if r.Kind() == "proto_library" {
|
||||
emptyProtoRuleNames = append(emptyProtoRuleNames, r.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// If proto rule generation is enabled, exclude .pb.go files that correspond
|
||||
// to any .proto files present.
|
||||
regularFiles := append([]string{}, args.RegularFiles...)
|
||||
genFiles := append([]string{}, args.GenFiles...)
|
||||
if !pcMode.ShouldIncludePregeneratedFiles() {
|
||||
keep := func(f string) bool {
|
||||
if strings.HasSuffix(f, ".pb.go") {
|
||||
_, ok := protoFileInfo[strings.TrimSuffix(f, ".pb.go")+".proto"]
|
||||
return !ok
|
||||
}
|
||||
return true
|
||||
}
|
||||
filterFiles(®ularFiles, keep)
|
||||
filterFiles(&genFiles, keep)
|
||||
}
|
||||
|
||||
// Split regular files into files which can determine the package name and
|
||||
// import path and other files.
|
||||
var goFiles, otherFiles []string
|
||||
for _, f := range regularFiles {
|
||||
if strings.HasSuffix(f, ".go") {
|
||||
goFiles = append(goFiles, f)
|
||||
} else {
|
||||
otherFiles = append(otherFiles, f)
|
||||
}
|
||||
}
|
||||
|
||||
// Look for a subdirectory named testdata. Only treat it as data if it does
|
||||
// not contain a buildable package.
|
||||
var hasTestdata bool
|
||||
for _, sub := range args.Subdirs {
|
||||
if sub == "testdata" {
|
||||
hasTestdata = !gl.goPkgRels[path.Join(args.Rel, "testdata")]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Build a set of packages from files in this directory.
|
||||
goPackageMap, goFilesWithUnknownPackage := buildPackages(c, args.Dir, args.Rel, goFiles, hasTestdata)
|
||||
|
||||
// Select a package to generate rules for. If there is no package, create
|
||||
// an empty package so we can generate empty rules.
|
||||
var protoName string
|
||||
pkg, err := selectPackage(c, args.Dir, goPackageMap)
|
||||
if err != nil {
|
||||
if _, ok := err.(*build.NoGoError); ok {
|
||||
if len(protoPackages) == 1 {
|
||||
for name, ppkg := range protoPackages {
|
||||
if _, ok := goProtoRules[":"+name]; ok {
|
||||
// if a go_proto_library rule already exists for this
|
||||
// proto package, treat it as if the proto package
|
||||
// doesn't exist.
|
||||
pkg = emptyPackage(c, args.Dir, args.Rel)
|
||||
break
|
||||
}
|
||||
pkg = &goPackage{
|
||||
name: goProtoPackageName(ppkg),
|
||||
importPath: goProtoImportPath(gc, ppkg, args.Rel),
|
||||
proto: protoTargetFromProtoPackage(name, ppkg),
|
||||
}
|
||||
protoName = name
|
||||
break
|
||||
}
|
||||
} else {
|
||||
pkg = emptyPackage(c, args.Dir, args.Rel)
|
||||
}
|
||||
} else {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to link the selected package with a proto package.
|
||||
if pkg != nil {
|
||||
if pkg.importPath == "" {
|
||||
if err := pkg.inferImportPath(c); err != nil && pkg.firstGoFile() != "" {
|
||||
inferImportPathErrorOnce.Do(func() { log.Print(err) })
|
||||
}
|
||||
}
|
||||
for _, name := range protoRuleNames {
|
||||
ppkg := protoPackages[name]
|
||||
if pkg.importPath == goProtoImportPath(gc, ppkg, args.Rel) {
|
||||
protoName = name
|
||||
pkg.proto = protoTargetFromProtoPackage(name, ppkg)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate rules for proto packages. These should come before the other
|
||||
// Go rules.
|
||||
g := &generator{
|
||||
c: c,
|
||||
rel: args.Rel,
|
||||
shouldSetVisibility: args.File == nil || !args.File.HasDefaultVisibility(),
|
||||
}
|
||||
var res language.GenerateResult
|
||||
var rules []*rule.Rule
|
||||
var protoEmbed string
|
||||
for _, name := range protoRuleNames {
|
||||
if _, ok := goProtoRules[":"+name]; ok {
|
||||
// if a go_proto_library rule exists for this proto_library rule
|
||||
// already, skip creating another go_proto_library for it, assuming
|
||||
// that a different gazelle extension is responsible for
|
||||
// go_proto_library rule generation.
|
||||
continue
|
||||
}
|
||||
ppkg := protoPackages[name]
|
||||
var rs []*rule.Rule
|
||||
if name == protoName {
|
||||
protoEmbed, rs = g.generateProto(pcMode, pkg.proto, pkg.importPath)
|
||||
} else {
|
||||
target := protoTargetFromProtoPackage(name, ppkg)
|
||||
importPath := goProtoImportPath(gc, ppkg, args.Rel)
|
||||
_, rs = g.generateProto(pcMode, target, importPath)
|
||||
}
|
||||
rules = append(rules, rs...)
|
||||
}
|
||||
for _, name := range emptyProtoRuleNames {
|
||||
goProtoName := strings.TrimSuffix(name, "_proto") + "_go_proto"
|
||||
res.Empty = append(res.Empty, rule.NewRule("go_proto_library", goProtoName))
|
||||
}
|
||||
if pkg != nil && pcMode == proto.PackageMode && pkg.firstGoFile() == "" {
|
||||
// In proto package mode, don't generate a go_library embedding a
|
||||
// go_proto_library unless there are actually go files.
|
||||
protoEmbed = ""
|
||||
}
|
||||
|
||||
// Complete the Go package and generate rules for that.
|
||||
if pkg != nil {
|
||||
// Add files with unknown packages. This happens when there are parse
|
||||
// or I/O errors. We should keep the file in the srcs list and let the
|
||||
// compiler deal with the error.
|
||||
cgo := pkg.haveCgo()
|
||||
for _, info := range goFilesWithUnknownPackage {
|
||||
if err := pkg.addFile(c, info, cgo); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Process the other static files.
|
||||
for _, file := range otherFiles {
|
||||
info := otherFileInfo(filepath.Join(args.Dir, file))
|
||||
if err := pkg.addFile(c, info, cgo); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Process generated files. Note that generated files may have the same names
|
||||
// as static files. Bazel will use the generated files, but we will look at
|
||||
// the content of static files, assuming they will be the same.
|
||||
regularFileSet := make(map[string]bool)
|
||||
for _, f := range regularFiles {
|
||||
regularFileSet[f] = true
|
||||
}
|
||||
// Some of the generated files may have been consumed by other rules
|
||||
consumedFileSet := make(map[string]bool)
|
||||
for _, r := range args.OtherGen {
|
||||
for _, f := range r.AttrStrings("srcs") {
|
||||
consumedFileSet[f] = true
|
||||
}
|
||||
if f := r.AttrString("src"); f != "" {
|
||||
consumedFileSet[f] = true
|
||||
}
|
||||
}
|
||||
for _, f := range genFiles {
|
||||
if regularFileSet[f] || consumedFileSet[f] {
|
||||
continue
|
||||
}
|
||||
info := fileNameInfo(filepath.Join(args.Dir, f))
|
||||
if err := pkg.addFile(c, info, cgo); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate Go rules.
|
||||
if protoName == "" {
|
||||
// Empty proto rules for deletion.
|
||||
_, rs := g.generateProto(pcMode, pkg.proto, pkg.importPath)
|
||||
rules = append(rules, rs...)
|
||||
}
|
||||
lib := g.generateLib(pkg, protoEmbed)
|
||||
var libName string
|
||||
if !lib.IsEmpty(goKinds[lib.Kind()]) {
|
||||
libName = lib.Name()
|
||||
}
|
||||
rules = append(rules, lib)
|
||||
rules = append(rules,
|
||||
g.generateBin(pkg, libName),
|
||||
g.generateTest(pkg, libName))
|
||||
}
|
||||
|
||||
for _, r := range rules {
|
||||
if r.IsEmpty(goKinds[r.Kind()]) {
|
||||
res.Empty = append(res.Empty, r)
|
||||
} else {
|
||||
res.Gen = append(res.Gen, r)
|
||||
res.Imports = append(res.Imports, r.PrivateAttr(config.GazelleImportsKey))
|
||||
}
|
||||
}
|
||||
|
||||
if args.File != nil || len(res.Gen) > 0 {
|
||||
gl.goPkgRels[args.Rel] = true
|
||||
} else {
|
||||
for _, sub := range args.Subdirs {
|
||||
if gl.goPkgRels[path.Join(args.Rel, sub)] {
|
||||
gl.goPkgRels[args.Rel] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func filterFiles(files *[]string, pred func(string) bool) {
|
||||
w := 0
|
||||
for r := 0; r < len(*files); r++ {
|
||||
f := (*files)[r]
|
||||
if pred(f) {
|
||||
(*files)[w] = f
|
||||
w++
|
||||
}
|
||||
}
|
||||
*files = (*files)[:w]
|
||||
}
|
||||
|
||||
func buildPackages(c *config.Config, dir, rel string, goFiles []string, hasTestdata bool) (packageMap map[string]*goPackage, goFilesWithUnknownPackage []fileInfo) {
|
||||
// Process .go and .proto files first, since these determine the package name.
|
||||
packageMap = make(map[string]*goPackage)
|
||||
for _, f := range goFiles {
|
||||
path := filepath.Join(dir, f)
|
||||
info := goFileInfo(path, rel)
|
||||
if info.packageName == "" {
|
||||
goFilesWithUnknownPackage = append(goFilesWithUnknownPackage, info)
|
||||
continue
|
||||
}
|
||||
if info.packageName == "documentation" {
|
||||
// go/build ignores this package
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := packageMap[info.packageName]; !ok {
|
||||
packageMap[info.packageName] = &goPackage{
|
||||
name: info.packageName,
|
||||
dir: dir,
|
||||
rel: rel,
|
||||
hasTestdata: hasTestdata,
|
||||
}
|
||||
}
|
||||
if err := packageMap[info.packageName].addFile(c, info, false); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
return packageMap, goFilesWithUnknownPackage
|
||||
}
|
||||
|
||||
var inferImportPathErrorOnce sync.Once
|
||||
|
||||
// selectPackages selects one Go packages out of the buildable packages found
|
||||
// in a directory. If multiple packages are found, it returns the package
|
||||
// whose name matches the directory if such a package exists.
|
||||
func selectPackage(c *config.Config, dir string, packageMap map[string]*goPackage) (*goPackage, error) {
|
||||
buildablePackages := make(map[string]*goPackage)
|
||||
for name, pkg := range packageMap {
|
||||
if pkg.isBuildable(c) {
|
||||
buildablePackages[name] = pkg
|
||||
}
|
||||
}
|
||||
|
||||
if len(buildablePackages) == 0 {
|
||||
return nil, &build.NoGoError{Dir: dir}
|
||||
}
|
||||
|
||||
if len(buildablePackages) == 1 {
|
||||
for _, pkg := range buildablePackages {
|
||||
return pkg, nil
|
||||
}
|
||||
}
|
||||
|
||||
if pkg, ok := buildablePackages[defaultPackageName(c, dir)]; ok {
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
err := &build.MultiplePackageError{Dir: dir}
|
||||
for name, pkg := range buildablePackages {
|
||||
// Add the first file for each package for the error message.
|
||||
// Error() method expects these lists to be the same length. File
|
||||
// lists must be non-empty. These lists are only created by
|
||||
// buildPackage for packages with .go files present.
|
||||
err.Packages = append(err.Packages, name)
|
||||
err.Files = append(err.Files, pkg.firstGoFile())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func emptyPackage(c *config.Config, dir, rel string) *goPackage {
|
||||
pkg := &goPackage{
|
||||
name: defaultPackageName(c, dir),
|
||||
dir: dir,
|
||||
rel: rel,
|
||||
}
|
||||
pkg.inferImportPath(c)
|
||||
return pkg
|
||||
}
|
||||
|
||||
func defaultPackageName(c *config.Config, rel string) string {
|
||||
gc := getGoConfig(c)
|
||||
return pathtools.RelBaseName(rel, gc.prefix, "")
|
||||
}
|
||||
|
||||
type generator struct {
|
||||
c *config.Config
|
||||
rel string
|
||||
shouldSetVisibility bool
|
||||
}
|
||||
|
||||
func (g *generator) generateProto(mode proto.Mode, target protoTarget, importPath string) (string, []*rule.Rule) {
|
||||
if !mode.ShouldGenerateRules() && mode != proto.LegacyMode {
|
||||
// Don't create or delete proto rules in this mode. Any existing rules
|
||||
// are likely hand-written.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
gc := getGoConfig(g.c)
|
||||
filegroupName := legacyProtoFilegroupName
|
||||
protoName := target.name
|
||||
if protoName == "" {
|
||||
importPath := inferImportPath(gc, g.rel)
|
||||
protoName = proto.RuleName(importPath)
|
||||
}
|
||||
goProtoName := strings.TrimSuffix(protoName, "_proto") + "_go_proto"
|
||||
visibility := g.commonVisibility(importPath)
|
||||
|
||||
if mode == proto.LegacyMode {
|
||||
filegroup := rule.NewRule("filegroup", filegroupName)
|
||||
if target.sources.isEmpty() {
|
||||
return "", []*rule.Rule{filegroup}
|
||||
}
|
||||
filegroup.SetAttr("srcs", target.sources.build())
|
||||
if g.shouldSetVisibility {
|
||||
filegroup.SetAttr("visibility", visibility)
|
||||
}
|
||||
return "", []*rule.Rule{filegroup}
|
||||
}
|
||||
|
||||
if target.sources.isEmpty() {
|
||||
return "", []*rule.Rule{
|
||||
rule.NewRule("filegroup", filegroupName),
|
||||
rule.NewRule("go_proto_library", goProtoName),
|
||||
}
|
||||
}
|
||||
|
||||
goProtoLibrary := rule.NewRule("go_proto_library", goProtoName)
|
||||
goProtoLibrary.SetAttr("proto", ":"+protoName)
|
||||
g.setImportAttrs(goProtoLibrary, importPath)
|
||||
if target.hasServices {
|
||||
goProtoLibrary.SetAttr("compilers", gc.goGrpcCompilers)
|
||||
} else if gc.goProtoCompilersSet {
|
||||
goProtoLibrary.SetAttr("compilers", gc.goProtoCompilers)
|
||||
}
|
||||
if g.shouldSetVisibility {
|
||||
goProtoLibrary.SetAttr("visibility", visibility)
|
||||
}
|
||||
goProtoLibrary.SetPrivateAttr(config.GazelleImportsKey, target.imports.build())
|
||||
return goProtoName, []*rule.Rule{goProtoLibrary}
|
||||
}
|
||||
|
||||
func (g *generator) generateLib(pkg *goPackage, embed string) *rule.Rule {
|
||||
goLibrary := rule.NewRule("go_library", defaultLibName)
|
||||
if !pkg.library.sources.hasGo() && embed == "" {
|
||||
return goLibrary // empty
|
||||
}
|
||||
var visibility []string
|
||||
if pkg.isCommand() {
|
||||
// Libraries made for a go_binary should not be exposed to the public.
|
||||
visibility = []string{"//visibility:private"}
|
||||
} else {
|
||||
visibility = g.commonVisibility(pkg.importPath)
|
||||
}
|
||||
g.setCommonAttrs(goLibrary, pkg.rel, visibility, pkg.library, embed)
|
||||
g.setImportAttrs(goLibrary, pkg.importPath)
|
||||
return goLibrary
|
||||
}
|
||||
|
||||
func (g *generator) generateBin(pkg *goPackage, library string) *rule.Rule {
|
||||
name := pathtools.RelBaseName(pkg.rel, getGoConfig(g.c).prefix, g.c.RepoRoot)
|
||||
goBinary := rule.NewRule("go_binary", name)
|
||||
if !pkg.isCommand() || pkg.binary.sources.isEmpty() && library == "" {
|
||||
return goBinary // empty
|
||||
}
|
||||
visibility := g.commonVisibility(pkg.importPath)
|
||||
g.setCommonAttrs(goBinary, pkg.rel, visibility, pkg.binary, library)
|
||||
return goBinary
|
||||
}
|
||||
|
||||
func (g *generator) generateTest(pkg *goPackage, library string) *rule.Rule {
|
||||
goTest := rule.NewRule("go_test", defaultTestName)
|
||||
if !pkg.test.sources.hasGo() {
|
||||
return goTest // empty
|
||||
}
|
||||
g.setCommonAttrs(goTest, pkg.rel, nil, pkg.test, library)
|
||||
if pkg.hasTestdata {
|
||||
goTest.SetAttr("data", rule.GlobValue{Patterns: []string{"testdata/**"}})
|
||||
}
|
||||
return goTest
|
||||
}
|
||||
|
||||
func (g *generator) setCommonAttrs(r *rule.Rule, pkgRel string, visibility []string, target goTarget, embed string) {
|
||||
if !target.sources.isEmpty() {
|
||||
r.SetAttr("srcs", target.sources.buildFlat())
|
||||
}
|
||||
if target.cgo {
|
||||
r.SetAttr("cgo", true)
|
||||
}
|
||||
if !target.clinkopts.isEmpty() {
|
||||
r.SetAttr("clinkopts", g.options(target.clinkopts.build(), pkgRel))
|
||||
}
|
||||
if !target.copts.isEmpty() {
|
||||
r.SetAttr("copts", g.options(target.copts.build(), pkgRel))
|
||||
}
|
||||
if g.shouldSetVisibility && len(visibility) > 0 {
|
||||
r.SetAttr("visibility", visibility)
|
||||
}
|
||||
if embed != "" {
|
||||
r.SetAttr("embed", []string{":" + embed})
|
||||
}
|
||||
r.SetPrivateAttr(config.GazelleImportsKey, target.imports.build())
|
||||
}
|
||||
|
||||
func (g *generator) setImportAttrs(r *rule.Rule, importPath string) {
|
||||
gc := getGoConfig(g.c)
|
||||
r.SetAttr("importpath", importPath)
|
||||
|
||||
// Set importpath_aliases if we need minimal module compatibility.
|
||||
// If a package is part of a module with a v2+ semantic import version
|
||||
// suffix, packages that are not part of modules may import it without
|
||||
// the suffix.
|
||||
if gc.goRepositoryMode && gc.moduleMode && pathtools.HasPrefix(importPath, gc.prefix) && gc.prefixRel == "" {
|
||||
if mmcImportPath := pathWithoutSemver(importPath); mmcImportPath != "" {
|
||||
r.SetAttr("importpath_aliases", []string{mmcImportPath})
|
||||
}
|
||||
}
|
||||
|
||||
if gc.importMapPrefix != "" {
|
||||
fromPrefixRel := pathtools.TrimPrefix(g.rel, gc.importMapPrefixRel)
|
||||
importMap := path.Join(gc.importMapPrefix, fromPrefixRel)
|
||||
if importMap != importPath {
|
||||
r.SetAttr("importmap", importMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *generator) commonVisibility(importPath string) []string {
|
||||
// If the Bazel package name (rel) contains "internal", add visibility for
|
||||
// subpackages of the parent.
|
||||
// If the import path contains "internal" but rel does not, this is
|
||||
// probably an internal submodule. Add visibility for all subpackages.
|
||||
relIndex := pathtools.Index(g.rel, "internal")
|
||||
importIndex := pathtools.Index(importPath, "internal")
|
||||
visibility := getGoConfig(g.c).goVisibility
|
||||
if relIndex >= 0 {
|
||||
parent := strings.TrimSuffix(g.rel[:relIndex], "/")
|
||||
visibility = append(visibility, fmt.Sprintf("//%s:__subpackages__", parent))
|
||||
} else if importIndex >= 0 {
|
||||
visibility = append(visibility, "//:__subpackages__")
|
||||
} else {
|
||||
return []string{"//visibility:public"}
|
||||
}
|
||||
|
||||
// Add visibility for any submodules that have the internal parent as
|
||||
// a prefix of their module path.
|
||||
if importIndex >= 0 {
|
||||
gc := getGoConfig(g.c)
|
||||
internalRoot := strings.TrimSuffix(importPath[:importIndex], "/")
|
||||
for _, m := range gc.submodules {
|
||||
if strings.HasPrefix(m.modulePath, internalRoot) {
|
||||
visibility = append(visibility, fmt.Sprintf("@%s//:__subpackages__", m.repoName))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visibility
|
||||
}
|
||||
|
||||
var (
|
||||
// shortOptPrefixes are strings that come at the beginning of an option
|
||||
// argument that includes a path, e.g., -Ifoo/bar.
|
||||
shortOptPrefixes = []string{"-I", "-L", "-F"}
|
||||
|
||||
// longOptPrefixes are separate arguments that come before a path argument,
|
||||
// e.g., -iquote foo/bar.
|
||||
longOptPrefixes = []string{"-I", "-L", "-F", "-iquote", "-isystem"}
|
||||
)
|
||||
|
||||
// options transforms package-relative paths in cgo options into repository-
|
||||
// root-relative paths that Bazel can understand. For example, if a cgo file
|
||||
// in //foo declares an include flag in its copts: "-Ibar", this method
|
||||
// will transform that flag into "-Ifoo/bar".
|
||||
func (g *generator) options(opts rule.PlatformStrings, pkgRel string) rule.PlatformStrings {
|
||||
fixPath := func(opt string) string {
|
||||
if strings.HasPrefix(opt, "/") {
|
||||
return opt
|
||||
}
|
||||
return path.Clean(path.Join(pkgRel, opt))
|
||||
}
|
||||
|
||||
fixGroups := func(groups []string) ([]string, error) {
|
||||
fixedGroups := make([]string, len(groups))
|
||||
for i, group := range groups {
|
||||
opts := strings.Split(group, optSeparator)
|
||||
fixedOpts := make([]string, len(opts))
|
||||
isPath := false
|
||||
for j, opt := range opts {
|
||||
if isPath {
|
||||
opt = fixPath(opt)
|
||||
isPath = false
|
||||
goto next
|
||||
}
|
||||
|
||||
for _, short := range shortOptPrefixes {
|
||||
if strings.HasPrefix(opt, short) && len(opt) > len(short) {
|
||||
opt = short + fixPath(opt[len(short):])
|
||||
goto next
|
||||
}
|
||||
}
|
||||
|
||||
for _, long := range longOptPrefixes {
|
||||
if opt == long {
|
||||
isPath = true
|
||||
goto next
|
||||
}
|
||||
}
|
||||
|
||||
next:
|
||||
fixedOpts[j] = escapeOption(opt)
|
||||
}
|
||||
fixedGroups[i] = strings.Join(fixedOpts, " ")
|
||||
}
|
||||
|
||||
return fixedGroups, nil
|
||||
}
|
||||
|
||||
opts, errs := opts.MapSlice(fixGroups)
|
||||
if errs != nil {
|
||||
log.Panicf("unexpected error when transforming options with pkg %q: %v", pkgRel, errs)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func escapeOption(opt string) string {
|
||||
return strings.NewReplacer(
|
||||
`\`, `\\`,
|
||||
`'`, `\'`,
|
||||
`"`, `\"`,
|
||||
` `, `\ `,
|
||||
"\t", "\\\t",
|
||||
"\n", "\\\n",
|
||||
"\r", "\\\r",
|
||||
).Replace(opt)
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
/* Copyright 2019 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type goDepLockFile struct {
|
||||
ImportPath string
|
||||
GoVersion string
|
||||
GodepVersion string
|
||||
Packages []string
|
||||
Deps []goDepProject
|
||||
}
|
||||
|
||||
type goDepProject struct {
|
||||
ImportPath string
|
||||
Rev string
|
||||
}
|
||||
|
||||
func importReposFromGodep(args language.ImportReposArgs) language.ImportReposResult {
|
||||
data, err := ioutil.ReadFile(args.Path)
|
||||
if err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
|
||||
file := goDepLockFile{}
|
||||
if err := json.Unmarshal(data, &file); err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
|
||||
var eg errgroup.Group
|
||||
roots := make([]string, len(file.Deps))
|
||||
for i := range file.Deps {
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
p := file.Deps[i]
|
||||
repoRoot, _, err := args.Cache.Root(p.ImportPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roots[i] = repoRoot
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
|
||||
gen := make([]*rule.Rule, 0, len(file.Deps))
|
||||
repoToRev := make(map[string]string)
|
||||
for i, p := range file.Deps {
|
||||
repoRoot := roots[i]
|
||||
if rev, ok := repoToRev[repoRoot]; !ok {
|
||||
r := rule.NewRule("go_repository", label.ImportPathToBazelRepoName(repoRoot))
|
||||
r.SetAttr("importpath", repoRoot)
|
||||
r.SetAttr("commit", p.Rev)
|
||||
repoToRev[repoRoot] = p.Rev
|
||||
gen = append(gen, r)
|
||||
} else {
|
||||
if p.Rev != rev {
|
||||
return language.ImportReposResult{Error: fmt.Errorf("repo %s imported at multiple revisions: %s, %s", repoRoot, p.Rev, rev)}
|
||||
}
|
||||
}
|
||||
}
|
||||
return language.ImportReposResult{Gen: gen}
|
||||
}
|
||||
|
|
@ -1,153 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import "github.com/bazelbuild/bazel-gazelle/rule"
|
||||
|
||||
var goKinds = map[string]rule.KindInfo{
|
||||
"filegroup": {
|
||||
NonEmptyAttrs: map[string]bool{"srcs": true},
|
||||
MergeableAttrs: map[string]bool{"srcs": true},
|
||||
},
|
||||
"go_binary": {
|
||||
MatchAny: true,
|
||||
NonEmptyAttrs: map[string]bool{
|
||||
"deps": true,
|
||||
"embed": true,
|
||||
"srcs": true,
|
||||
},
|
||||
SubstituteAttrs: map[string]bool{"embed": true},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"cgo": true,
|
||||
"clinkopts": true,
|
||||
"copts": true,
|
||||
"embed": true,
|
||||
"srcs": true,
|
||||
},
|
||||
ResolveAttrs: map[string]bool{"deps": true},
|
||||
},
|
||||
"go_library": {
|
||||
MatchAttrs: []string{"importpath"},
|
||||
NonEmptyAttrs: map[string]bool{
|
||||
"deps": true,
|
||||
"embed": true,
|
||||
"srcs": true,
|
||||
},
|
||||
SubstituteAttrs: map[string]bool{
|
||||
"embed": true,
|
||||
},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"cgo": true,
|
||||
"clinkopts": true,
|
||||
"copts": true,
|
||||
"embed": true,
|
||||
"importmap": true,
|
||||
"importpath": true,
|
||||
"srcs": true,
|
||||
},
|
||||
ResolveAttrs: map[string]bool{"deps": true},
|
||||
},
|
||||
"go_proto_library": {
|
||||
MatchAttrs: []string{"importpath"},
|
||||
NonEmptyAttrs: map[string]bool{
|
||||
"deps": true,
|
||||
"embed": true,
|
||||
"proto": true,
|
||||
"srcs": true,
|
||||
},
|
||||
SubstituteAttrs: map[string]bool{"proto": true},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"srcs": true,
|
||||
"importpath": true,
|
||||
"importmap": true,
|
||||
"cgo": true,
|
||||
"clinkopts": true,
|
||||
"copts": true,
|
||||
"embed": true,
|
||||
"proto": true,
|
||||
"compilers": true,
|
||||
},
|
||||
ResolveAttrs: map[string]bool{"deps": true},
|
||||
},
|
||||
"go_repository": {
|
||||
MatchAttrs: []string{"importpath"},
|
||||
NonEmptyAttrs: map[string]bool{
|
||||
"importpath": true,
|
||||
},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"commit": true,
|
||||
"importpath": true,
|
||||
"remote": true,
|
||||
"replace": true,
|
||||
"sha256": true,
|
||||
"strip_prefix": true,
|
||||
"sum": true,
|
||||
"tag": true,
|
||||
"type": true,
|
||||
"urls": true,
|
||||
"vcs": true,
|
||||
"version": true,
|
||||
},
|
||||
},
|
||||
"go_test": {
|
||||
NonEmptyAttrs: map[string]bool{
|
||||
"deps": true,
|
||||
"embed": true,
|
||||
"srcs": true,
|
||||
},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"cgo": true,
|
||||
"clinkopts": true,
|
||||
"copts": true,
|
||||
"embed": true,
|
||||
"srcs": true,
|
||||
},
|
||||
ResolveAttrs: map[string]bool{"deps": true},
|
||||
},
|
||||
}
|
||||
|
||||
var goLoads = []rule.LoadInfo{
|
||||
{
|
||||
Name: "@io_bazel_rules_go//go:def.bzl",
|
||||
Symbols: []string{
|
||||
"cgo_library",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_prefix",
|
||||
"go_repository",
|
||||
"go_test",
|
||||
},
|
||||
}, {
|
||||
Name: "@io_bazel_rules_go//proto:def.bzl",
|
||||
Symbols: []string{
|
||||
"go_grpc_library",
|
||||
"go_proto_library",
|
||||
},
|
||||
}, {
|
||||
Name: "@bazel_gazelle//:deps.bzl",
|
||||
Symbols: []string{
|
||||
"go_repository",
|
||||
},
|
||||
After: []string{
|
||||
"go_rules_dependencies",
|
||||
"go_register_toolchains",
|
||||
"gazelle_dependencies",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func (_ *goLang) Kinds() map[string]rule.KindInfo { return goKinds }
|
||||
func (_ *goLang) Loads() []rule.LoadInfo { return goLoads }
|
||||
|
|
@ -1,201 +0,0 @@
|
|||
// Generated by language/proto/gen/gen_known_imports.go
|
||||
// From language/proto/proto.csv
|
||||
|
||||
package golang
|
||||
|
||||
import "github.com/bazelbuild/bazel-gazelle/label"
|
||||
|
||||
var knownGoProtoImports = map[string]label.Label{
|
||||
|
||||
"github.com/golang/protobuf/ptypes/any": label.New("io_bazel_rules_go", "proto/wkt", "any_go_proto"),
|
||||
"google.golang.org/genproto/protobuf/api": label.New("io_bazel_rules_go", "proto/wkt", "api_go_proto"),
|
||||
"github.com/golang/protobuf/protoc-gen-go/plugin": label.New("io_bazel_rules_go", "proto/wkt", "compiler_plugin_go_proto"),
|
||||
"github.com/golang/protobuf/protoc-gen-go/descriptor": label.New("io_bazel_rules_go", "proto/wkt", "descriptor_go_proto"),
|
||||
"github.com/golang/protobuf/ptypes/duration": label.New("io_bazel_rules_go", "proto/wkt", "duration_go_proto"),
|
||||
"github.com/golang/protobuf/ptypes/empty": label.New("io_bazel_rules_go", "proto/wkt", "empty_go_proto"),
|
||||
"google.golang.org/genproto/protobuf/field_mask": label.New("io_bazel_rules_go", "proto/wkt", "field_mask_go_proto"),
|
||||
"google.golang.org/genproto/protobuf/source_context": label.New("io_bazel_rules_go", "proto/wkt", "source_context_go_proto"),
|
||||
"github.com/golang/protobuf/ptypes/struct": label.New("io_bazel_rules_go", "proto/wkt", "struct_go_proto"),
|
||||
"github.com/golang/protobuf/ptypes/timestamp": label.New("io_bazel_rules_go", "proto/wkt", "timestamp_go_proto"),
|
||||
"google.golang.org/genproto/protobuf/ptype": label.New("io_bazel_rules_go", "proto/wkt", "type_go_proto"),
|
||||
"github.com/golang/protobuf/ptypes/wrappers": label.New("io_bazel_rules_go", "proto/wkt", "wrappers_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v1/common": label.New("go_googleapis", "google/ads/googleads/v1/common", "common_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v1/enums": label.New("go_googleapis", "google/ads/googleads/v1/enums", "enums_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v1/errors": label.New("go_googleapis", "google/ads/googleads/v1/errors", "errors_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v1/resources": label.New("go_googleapis", "google/ads/googleads/v1/resources", "resources_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v1/services": label.New("go_googleapis", "google/ads/googleads/v1/services", "services_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v2/common": label.New("go_googleapis", "google/ads/googleads/v2/common", "common_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v2/enums": label.New("go_googleapis", "google/ads/googleads/v2/enums", "enums_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v2/errors": label.New("go_googleapis", "google/ads/googleads/v2/errors", "errors_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v2/resources": label.New("go_googleapis", "google/ads/googleads/v2/resources", "resources_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/ads/googleads/v2/services": label.New("go_googleapis", "google/ads/googleads/v2/services", "services_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/annotations": label.New("go_googleapis", "google/api", "annotations_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/serviceconfig": label.New("go_googleapis", "google/api", "serviceconfig_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/configchange": label.New("go_googleapis", "google/api", "configchange_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/distribution": label.New("go_googleapis", "google/api", "distribution_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/expr/v1alpha1": label.New("go_googleapis", "google/api/expr/v1alpha1", "expr_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/expr/v1beta1": label.New("go_googleapis", "google/api/expr/v1beta1", "expr_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/httpbody": label.New("go_googleapis", "google/api", "httpbody_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/label": label.New("go_googleapis", "google/api", "label_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api": label.New("go_googleapis", "google/api", "api_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/metric": label.New("go_googleapis", "google/api", "metric_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/monitoredres": label.New("go_googleapis", "google/api", "monitoredres_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/servicecontrol/v1": label.New("go_googleapis", "google/api/servicecontrol/v1", "servicecontrol_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/api/servicemanagement/v1": label.New("go_googleapis", "google/api/servicemanagement/v1", "servicemanagement_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/appengine/legacy": label.New("go_googleapis", "google/appengine/legacy", "legacy_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/appengine/logging/v1": label.New("go_googleapis", "google/appengine/logging/v1", "logging_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/appengine/v1": label.New("go_googleapis", "google/appengine/v1", "appengine_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1": label.New("go_googleapis", "google/assistant/embedded/v1alpha1", "embedded_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/assistant/embedded/v1alpha2": label.New("go_googleapis", "google/assistant/embedded/v1alpha2", "embedded_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1": label.New("go_googleapis", "google/bigtable/admin/cluster/v1", "cluster_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bigtable/admin/table/v1": label.New("go_googleapis", "google/bigtable/admin/table/v1", "table_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bigtable/admin/v2": label.New("go_googleapis", "google/bigtable/admin/v2", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bigtable/v1": label.New("go_googleapis", "google/bigtable/v1", "bigtable_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bigtable/v2": label.New("go_googleapis", "google/bigtable/v2", "bigtable_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/bytestream": label.New("go_googleapis", "google/bytestream", "bytestream_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/asset/v1": label.New("go_googleapis", "google/cloud/asset/v1", "asset_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/asset/v1beta1": label.New("go_googleapis", "google/cloud/asset/v1beta1", "asset_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/asset/v1p2beta1": label.New("go_googleapis", "google/cloud/asset/v1p2beta1", "asset_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/audit": label.New("go_googleapis", "google/cloud/audit", "audit_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/automl/v1": label.New("go_googleapis", "google/cloud/automl/v1", "automl_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/automl/v1beta1": label.New("go_googleapis", "google/cloud/automl/v1beta1", "automl_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1": label.New("go_googleapis", "google/cloud/bigquery/datatransfer/v1", "datatransfer_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/bigquery/logging/v1": label.New("go_googleapis", "google/cloud/bigquery/logging/v1", "logging_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta1": label.New("go_googleapis", "google/cloud/bigquery/storage/v1beta1", "storage_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/bigquery/v2": label.New("go_googleapis", "google/cloud/bigquery/v2", "bigquery_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/billing/v1": label.New("go_googleapis", "google/cloud/billing/v1", "billing_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/binaryauthorization/v1beta1": label.New("go_googleapis", "google/cloud/binaryauthorization/v1beta1", "binaryauthorization_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/datacatalog/v1beta1": label.New("go_googleapis", "google/cloud/datacatalog/v1beta1", "datacatalog_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/datalabeling/v1beta1": label.New("go_googleapis", "google/cloud/datalabeling/v1beta1", "datalabeling_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/dataproc/v1": label.New("go_googleapis", "google/cloud/dataproc/v1", "dataproc_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2": label.New("go_googleapis", "google/cloud/dataproc/v1beta2", "dataproc_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/dialogflow/v2": label.New("go_googleapis", "google/cloud/dialogflow/v2", "dialogflow_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1": label.New("go_googleapis", "google/cloud/dialogflow/v2beta1", "dialogflow_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/document/v1beta1": label.New("go_googleapis", "google/cloud/document/v1beta1", "document_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/functions/v1beta2": label.New("go_googleapis", "google/cloud/functions/v1beta2", "functions_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/iot/v1": label.New("go_googleapis", "google/cloud/iot/v1", "iot_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/irm/v1alpha2": label.New("go_googleapis", "google/cloud/irm/v1alpha2", "irm_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/kms/v1": label.New("go_googleapis", "google/cloud/kms/v1", "kms_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/language/v1": label.New("go_googleapis", "google/cloud/language/v1", "language_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/language/v1beta1": label.New("go_googleapis", "google/cloud/language/v1beta1", "language_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/language/v1beta2": label.New("go_googleapis", "google/cloud/language/v1beta2", "language_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/location": label.New("go_googleapis", "google/cloud/location", "location_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/ml/v1": label.New("go_googleapis", "google/cloud/ml/v1", "ml_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/oslogin/common": label.New("go_googleapis", "google/cloud/oslogin/common", "common_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/oslogin/v1": label.New("go_googleapis", "google/cloud/oslogin/v1", "oslogin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/oslogin/v1alpha": label.New("go_googleapis", "google/cloud/oslogin/v1alpha", "oslogin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/oslogin/v1beta": label.New("go_googleapis", "google/cloud/oslogin/v1beta", "oslogin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/phishingprotection/v1beta1": label.New("go_googleapis", "google/cloud/phishingprotection/v1beta1", "phishingprotection_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/recaptchaenterprise/v1beta1": label.New("go_googleapis", "google/cloud/recaptchaenterprise/v1beta1", "recaptchaenterprise_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/recommender/v1beta1": label.New("go_googleapis", "google/cloud/recommender/v1beta1", "recommender_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/redis/v1": label.New("go_googleapis", "google/cloud/redis/v1", "redis_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/redis/v1beta1": label.New("go_googleapis", "google/cloud/redis/v1beta1", "redis_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/resourcemanager/v2": label.New("go_googleapis", "google/cloud/resourcemanager/v2", "resourcemanager_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/runtimeconfig/v1beta1": label.New("go_googleapis", "google/cloud/runtimeconfig/v1beta1", "runtimeconfig_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/scheduler/v1": label.New("go_googleapis", "google/cloud/scheduler/v1", "scheduler_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/scheduler/v1beta1": label.New("go_googleapis", "google/cloud/scheduler/v1beta1", "scheduler_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/securitycenter/v1": label.New("go_googleapis", "google/cloud/securitycenter/v1", "securitycenter_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/securitycenter/v1beta1": label.New("go_googleapis", "google/cloud/securitycenter/v1beta1", "securitycenter_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/speech/v1": label.New("go_googleapis", "google/cloud/speech/v1", "speech_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1": label.New("go_googleapis", "google/cloud/speech/v1p1beta1", "speech_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/support/common": label.New("go_googleapis", "google/cloud/support", "common_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/support/v1alpha1": label.New("go_googleapis", "google/cloud/support/v1alpha1", "support_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/talent/v4beta1": label.New("go_googleapis", "google/cloud/talent/v4beta1", "talent_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/tasks/v2": label.New("go_googleapis", "google/cloud/tasks/v2", "tasks_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/tasks/v2beta2": label.New("go_googleapis", "google/cloud/tasks/v2beta2", "tasks_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/tasks/v2beta3": label.New("go_googleapis", "google/cloud/tasks/v2beta3", "tasks_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/texttospeech/v1": label.New("go_googleapis", "google/cloud/texttospeech/v1", "texttospeech_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1": label.New("go_googleapis", "google/cloud/texttospeech/v1beta1", "texttospeech_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/translate/v3": label.New("go_googleapis", "google/cloud/translate/v3", "translate_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/translate/v3beta1": label.New("go_googleapis", "google/cloud/translate/v3beta1", "translate_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1": label.New("go_googleapis", "google/cloud/videointelligence/v1", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1": label.New("go_googleapis", "google/cloud/videointelligence/v1beta1", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2": label.New("go_googleapis", "google/cloud/videointelligence/v1beta2", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1": label.New("go_googleapis", "google/cloud/videointelligence/v1p1beta1", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1": label.New("go_googleapis", "google/cloud/videointelligence/v1p2beta1", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1": label.New("go_googleapis", "google/cloud/videointelligence/v1p3beta1", "videointelligence_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/vision/v1": label.New("go_googleapis", "google/cloud/vision/v1", "vision_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1": label.New("go_googleapis", "google/cloud/vision/v1p1beta1", "vision_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1": label.New("go_googleapis", "google/cloud/vision/v1p2beta1", "vision_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/vision/v1p3beta1": label.New("go_googleapis", "google/cloud/vision/v1p3beta1", "vision_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/vision/v1p4beta1": label.New("go_googleapis", "google/cloud/vision/v1p4beta1", "vision_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/webrisk/v1beta1": label.New("go_googleapis", "google/cloud/webrisk/v1beta1", "webrisk_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha": label.New("go_googleapis", "google/cloud/websecurityscanner/v1alpha", "websecurityscanner_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1beta": label.New("go_googleapis", "google/cloud/websecurityscanner/v1beta", "websecurityscanner_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/container/v1": label.New("go_googleapis", "google/container/v1", "container_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/container/v1alpha1": label.New("go_googleapis", "google/container/v1alpha1", "container_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/container/v1beta1": label.New("go_googleapis", "google/container/v1beta1", "container_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/datastore/admin/v1": label.New("go_googleapis", "google/datastore/admin/v1", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/datastore/admin/v1beta1": label.New("go_googleapis", "google/datastore/admin/v1beta1", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/datastore/v1": label.New("go_googleapis", "google/datastore/v1", "datastore_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/datastore/v1beta3": label.New("go_googleapis", "google/datastore/v1beta3", "datastore_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/build/v1": label.New("go_googleapis", "google/devtools/build/v1", "build_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/cloudbuild/v1": label.New("go_googleapis", "google/devtools/cloudbuild/v1", "cloudbuild_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/clouddebugger/v2": label.New("go_googleapis", "google/devtools/clouddebugger/v2", "clouddebugger_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1": label.New("go_googleapis", "google/devtools/clouderrorreporting/v1beta1", "clouderrorreporting_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2": label.New("go_googleapis", "google/devtools/cloudprofiler/v2", "cloudprofiler_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/cloudtrace/v1": label.New("go_googleapis", "google/devtools/cloudtrace/v1", "cloudtrace_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/cloudtrace/v2": label.New("go_googleapis", "google/devtools/cloudtrace/v2", "cloudtrace_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1": label.New("go_googleapis", "google/devtools/containeranalysis/v1", "containeranalysis_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1alpha1": label.New("go_googleapis", "google/devtools/containeranalysis/v1alpha1", "containeranalysis_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/attestation": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/attestation", "attestation_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/build": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/build", "build_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/common": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/common", "common_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1", "containeranalysis_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/deployment": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/deployment", "deployment_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/discovery": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/discovery", "discovery_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/grafeas": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/grafeas", "grafeas_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/image": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/image", "image_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/package": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/package", "package_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/provenance": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/provenance", "provenance_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/source": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/source", "source_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/containeranalysis/v1beta1/vulnerability": label.New("go_googleapis", "google/devtools/containeranalysis/v1beta1/vulnerability", "vulnerability_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/remoteexecution/v1test": label.New("go_googleapis", "google/devtools/remoteexecution/v1test", "remoteexecution_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/remoteworkers/v1test2": label.New("go_googleapis", "google/devtools/remoteworkers/v1test2", "remoteworkers_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/resultstore/v2": label.New("go_googleapis", "google/devtools/resultstore/v2", "resultstore_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/source/v1": label.New("go_googleapis", "google/devtools/source/v1", "source_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/devtools/sourcerepo/v1": label.New("go_googleapis", "google/devtools/sourcerepo/v1", "sourcerepo_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/example/library/v1": label.New("go_googleapis", "google/example/library/v1", "library_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firebase/fcm/connection/v1alpha1": label.New("go_googleapis", "google/firebase/fcm/connection/v1alpha1", "connection_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firestore/admin/v1": label.New("go_googleapis", "google/firestore/admin/v1", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firestore/admin/v1beta1": label.New("go_googleapis", "google/firestore/admin/v1beta1", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firestore/admin/v1beta2": label.New("go_googleapis", "google/firestore/admin/v1beta2", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firestore/v1": label.New("go_googleapis", "google/firestore/v1", "firestore_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/firestore/v1beta1": label.New("go_googleapis", "google/firestore/v1beta1", "firestore_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/genomics/v1": label.New("go_googleapis", "google/genomics/v1", "genomics_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/genomics/v1alpha2": label.New("go_googleapis", "google/genomics/v1alpha2", "genomics_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/geo/type/viewport": label.New("go_googleapis", "google/geo/type", "viewport_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/home/graph/v1": label.New("go_googleapis", "google/home/graph/v1", "graph_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/iam/admin/v1": label.New("go_googleapis", "google/iam/admin/v1", "admin_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/iam/credentials/v1": label.New("go_googleapis", "google/iam/credentials/v1", "credentials_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/iam/v1": label.New("go_googleapis", "google/iam/v1", "iam_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/iam/v1/logging": label.New("go_googleapis", "google/iam/v1/logging", "logging_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/logging/type": label.New("go_googleapis", "google/logging/type", "ltype_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/logging/v2": label.New("go_googleapis", "google/logging/v2", "logging_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/longrunning": label.New("go_googleapis", "google/longrunning", "longrunning_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/monitoring/v3": label.New("go_googleapis", "google/monitoring/v3", "monitoring_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/privacy/dlp/v2": label.New("go_googleapis", "google/privacy/dlp/v2", "dlp_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/pubsub/v1": label.New("go_googleapis", "google/pubsub/v1", "pubsub_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/pubsub/v1beta2": label.New("go_googleapis", "google/pubsub/v1beta2", "pubsub_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/rpc/code": label.New("go_googleapis", "google/rpc", "code_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails": label.New("go_googleapis", "google/rpc", "errdetails_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/rpc/status": label.New("go_googleapis", "google/rpc", "status_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/spanner/admin/database/v1": label.New("go_googleapis", "google/spanner/admin/database/v1", "database_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/spanner/admin/instance/v1": label.New("go_googleapis", "google/spanner/admin/instance/v1", "instance_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/spanner/v1": label.New("go_googleapis", "google/spanner/v1", "spanner_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/storagetransfer/v1": label.New("go_googleapis", "google/storagetransfer/v1", "storagetransfer_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/streetview/publish/v1": label.New("go_googleapis", "google/streetview/publish/v1", "publish_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/calendarperiod": label.New("go_googleapis", "google/type", "calendarperiod_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/color": label.New("go_googleapis", "google/type", "color_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/date": label.New("go_googleapis", "google/type", "date_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/dayofweek": label.New("go_googleapis", "google/type", "dayofweek_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/expr": label.New("go_googleapis", "google/type", "expr_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/fraction": label.New("go_googleapis", "google/type", "fraction_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/latlng": label.New("go_googleapis", "google/type", "latlng_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/money": label.New("go_googleapis", "google/type", "money_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/postaladdress": label.New("go_googleapis", "google/type", "postaladdress_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/quaternion": label.New("go_googleapis", "google/type", "quaternion_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/type/timeofday": label.New("go_googleapis", "google/type", "timeofday_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/watcher/v1": label.New("go_googleapis", "google/watcher/v1", "watcher_go_proto"),
|
||||
"google.golang.org/genproto/googleapis/grafeas/v1": label.New("go_googleapis", "grafeas/v1", "grafeas_go_proto"),
|
||||
}
|
||||
1614
vendor/github.com/bazelbuild/bazel-gazelle/language/go/known_proto_imports.go
generated
vendored
1614
vendor/github.com/bazelbuild/bazel-gazelle/language/go/known_proto_imports.go
generated
vendored
File diff suppressed because it is too large
Load Diff
|
|
@ -1,70 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package golang provides support for Go and Go proto rules. It generates
|
||||
// go_library, go_binary, go_test, and go_proto_library rules.
|
||||
//
|
||||
// Configuration
|
||||
//
|
||||
// Go rules support the flags -build_tags, -go_prefix, and -external.
|
||||
// They also support the directives # gazelle:build_tags, # gazelle:prefix,
|
||||
// and # gazelle:importmap_prefix. See
|
||||
// https://github.com/bazelbuild/bazel-gazelle/blob/master/README.rst#directives
|
||||
// for information on these.
|
||||
//
|
||||
// Rule generation
|
||||
//
|
||||
// Currently, Gazelle generates rules for one Go package per directory. In
|
||||
// general, we aim to support Go code which is compatible with "go build". If
|
||||
// there are no buildable packages, Gazelle will delete existing rules with
|
||||
// default names. If there are multiple packages, Gazelle will pick one that
|
||||
// matches the directory name or will print an error if no such package is
|
||||
// found.
|
||||
//
|
||||
// Gazelle names library and test rules somewhat oddly: go_default_library, and
|
||||
// go_default_test. This is for historic reasons: before the importpath
|
||||
// attribute was mandatory, import paths were inferred from label names. Even if
|
||||
// we never support multiple packages in the future (we should), we should
|
||||
// migrate away from this because it's surprising. Libraries should generally
|
||||
// be named after their directories.
|
||||
//
|
||||
// Dependency resolution
|
||||
//
|
||||
// Go libraries are indexed by their importpath attribute. Gazelle attempts to
|
||||
// resolve libraries by import path using the index, filtered using the
|
||||
// vendoring algorithm. If an import doesn't match any known library, Gazelle
|
||||
// guesses a name for it, locally (if the import path is under the current
|
||||
// prefix), or in an external repository or vendor directory (depending
|
||||
// on external mode).
|
||||
//
|
||||
// Gazelle has special cases for import paths associated with proto Well
|
||||
// Known Types and Google APIs. rules_go declares canonical rules for these.
|
||||
package golang
|
||||
|
||||
import "github.com/bazelbuild/bazel-gazelle/language"
|
||||
|
||||
const goName = "go"
|
||||
|
||||
type goLang struct {
|
||||
// goPkgDirs is a set of relative paths to directories containing buildable
|
||||
// Go code, including in subdirectories.
|
||||
goPkgRels map[string]bool
|
||||
}
|
||||
|
||||
func (_ *goLang) Name() string { return goName }
|
||||
|
||||
func NewLanguage() language.Language {
|
||||
return &goLang{goPkgRels: make(map[string]bool)}
|
||||
}
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"go/build"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func importReposFromModules(args language.ImportReposArgs) language.ImportReposResult {
|
||||
// Copy go.mod to temporary directory. We may run commands that modify it,
|
||||
// and we want to leave the original alone.
|
||||
tempDir, err := copyGoModToTemp(args.Path)
|
||||
if err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// List all modules except for the main module, including implicit indirect
|
||||
// dependencies.
|
||||
type module struct {
|
||||
Path, Version, Sum string
|
||||
Main bool
|
||||
Replace *struct {
|
||||
Path, Version string
|
||||
}
|
||||
}
|
||||
// path@version can be used as a unique identifier for looking up sums
|
||||
pathToModule := map[string]*module{}
|
||||
data, err := goListModules(tempDir)
|
||||
if err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
for dec.More() {
|
||||
mod := new(module)
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
if mod.Main {
|
||||
continue
|
||||
}
|
||||
if mod.Replace != nil {
|
||||
if filepath.IsAbs(mod.Replace.Path) || build.IsLocalImport(mod.Replace.Path) {
|
||||
log.Printf("go_repository does not support file path replacements for %s -> %s", mod.Path,
|
||||
mod.Replace.Path)
|
||||
continue
|
||||
}
|
||||
pathToModule[mod.Replace.Path+"@"+mod.Replace.Version] = mod
|
||||
} else {
|
||||
pathToModule[mod.Path+"@"+mod.Version] = mod
|
||||
}
|
||||
}
|
||||
// Load sums from go.sum. Ideally, they're all there.
|
||||
goSumPath := filepath.Join(filepath.Dir(args.Path), "go.sum")
|
||||
data, _ = ioutil.ReadFile(goSumPath)
|
||||
lines := bytes.Split(data, []byte("\n"))
|
||||
for _, line := range lines {
|
||||
line = bytes.TrimSpace(line)
|
||||
fields := bytes.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
continue
|
||||
}
|
||||
path, version, sum := string(fields[0]), string(fields[1]), string(fields[2])
|
||||
if strings.HasSuffix(version, "/go.mod") {
|
||||
continue
|
||||
}
|
||||
if mod, ok := pathToModule[path+"@"+version]; ok {
|
||||
mod.Sum = sum
|
||||
}
|
||||
}
|
||||
// If sums are missing, run go mod download to get them.
|
||||
var missingSumArgs []string
|
||||
for pathVer, mod := range pathToModule {
|
||||
if mod.Sum == "" {
|
||||
missingSumArgs = append(missingSumArgs, pathVer)
|
||||
}
|
||||
}
|
||||
if len(missingSumArgs) > 0 {
|
||||
data, err := goModDownload(tempDir, missingSumArgs)
|
||||
if err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
dec = json.NewDecoder(bytes.NewReader(data))
|
||||
for dec.More() {
|
||||
var dl module
|
||||
if err := dec.Decode(&dl); err != nil {
|
||||
return language.ImportReposResult{Error: err}
|
||||
}
|
||||
if mod, ok := pathToModule[dl.Path+"@"+dl.Version]; ok {
|
||||
mod.Sum = dl.Sum
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Translate to repository rules.
|
||||
gen := make([]*rule.Rule, 0, len(pathToModule))
|
||||
for pathVer, mod := range pathToModule {
|
||||
if mod.Sum == "" {
|
||||
log.Printf("could not determine sum for module %s", pathVer)
|
||||
continue
|
||||
}
|
||||
r := rule.NewRule("go_repository", label.ImportPathToBazelRepoName(mod.Path))
|
||||
r.SetAttr("importpath", mod.Path)
|
||||
r.SetAttr("sum", mod.Sum)
|
||||
if mod.Replace == nil {
|
||||
r.SetAttr("version", mod.Version)
|
||||
} else {
|
||||
r.SetAttr("replace", mod.Replace.Path)
|
||||
r.SetAttr("version", mod.Replace.Version)
|
||||
}
|
||||
gen = append(gen, r)
|
||||
}
|
||||
sort.Slice(gen, func(i, j int) bool {
|
||||
return gen[i].Name() < gen[j].Name()
|
||||
})
|
||||
return language.ImportReposResult{Gen: gen}
|
||||
}
|
||||
|
||||
// goListModules invokes "go list" in a directory containing a go.mod file.
|
||||
var goListModules = func(dir string) ([]byte, error) {
|
||||
goTool := findGoTool()
|
||||
cmd := exec.Command(goTool, "list", "-m", "-json", "all")
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Dir = dir
|
||||
return cmd.Output()
|
||||
}
|
||||
|
||||
// goModDownload invokes "go mod download" in a directory containing a
|
||||
// go.mod file.
|
||||
var goModDownload = func(dir string, args []string) ([]byte, error) {
|
||||
goTool := findGoTool()
|
||||
cmd := exec.Command(goTool, "mod", "download", "-json")
|
||||
cmd.Args = append(cmd.Args, args...)
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Dir = dir
|
||||
return cmd.Output()
|
||||
}
|
||||
|
||||
// copyGoModToTemp copies to given go.mod file to a temporary directory.
|
||||
// go list tends to mutate go.mod files, but gazelle shouldn't do that.
|
||||
func copyGoModToTemp(filename string) (tempDir string, err error) {
|
||||
goModOrig, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer goModOrig.Close()
|
||||
|
||||
tempDir, err = ioutil.TempDir("", "gazelle-temp-gomod")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
goModCopy, err := os.Create(filepath.Join(tempDir, "go.mod"))
|
||||
if err != nil {
|
||||
os.Remove(tempDir)
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
if cerr := goModCopy.Close(); err == nil && cerr != nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(goModCopy, goModOrig)
|
||||
if err != nil {
|
||||
os.RemoveAll(tempDir)
|
||||
return "", err
|
||||
}
|
||||
return tempDir, err
|
||||
}
|
||||
|
||||
// findGoTool attempts to locate the go executable. If GOROOT is set, we'll
|
||||
// prefer the one in there; otherwise, we'll rely on PATH. If the wrapper
|
||||
// script generated by the gazelle rule is invoked by Bazel, it will set
|
||||
// GOROOT to the configured SDK. We don't want to rely on the host SDK in
|
||||
// that situation.
|
||||
func findGoTool() string {
|
||||
path := "go" // rely on PATH by default
|
||||
if goroot, ok := os.LookupEnv("GOROOT"); ok {
|
||||
path = filepath.Join(goroot, "bin", "go")
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
path += ".exe"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
|
@ -1,507 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language/proto"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// goPackage contains metadata for a set of .go and .proto files that can be
|
||||
// used to generate Go rules.
|
||||
type goPackage struct {
|
||||
name, dir, rel string
|
||||
library, binary, test goTarget
|
||||
proto protoTarget
|
||||
hasTestdata bool
|
||||
importPath string
|
||||
}
|
||||
|
||||
// goTarget contains information used to generate an individual Go rule
|
||||
// (library, binary, or test).
|
||||
type goTarget struct {
|
||||
sources, imports, copts, clinkopts platformStringsBuilder
|
||||
cgo bool
|
||||
}
|
||||
|
||||
// protoTarget contains information used to generate a go_proto_library rule.
|
||||
type protoTarget struct {
|
||||
name string
|
||||
sources platformStringsBuilder
|
||||
imports platformStringsBuilder
|
||||
hasServices bool
|
||||
}
|
||||
|
||||
// platformStringsBuilder is used to construct rule.PlatformStrings. Bazel
|
||||
// has some requirements for deps list (a dependency cannot appear in more
|
||||
// than one select expression; dependencies cannot be duplicated), so we need
|
||||
// to build these carefully.
|
||||
type platformStringsBuilder struct {
|
||||
strs map[string]platformStringInfo
|
||||
}
|
||||
|
||||
// platformStringInfo contains information about a single string (source,
|
||||
// import, or option).
|
||||
type platformStringInfo struct {
|
||||
set platformStringSet
|
||||
oss map[string]bool
|
||||
archs map[string]bool
|
||||
platforms map[rule.Platform]bool
|
||||
}
|
||||
|
||||
type platformStringSet int
|
||||
|
||||
const (
|
||||
genericSet platformStringSet = iota
|
||||
osSet
|
||||
archSet
|
||||
platformSet
|
||||
)
|
||||
|
||||
// addFile adds the file described by "info" to a target in the package "p" if
|
||||
// the file is buildable.
|
||||
//
|
||||
// "cgo" tells whether any ".go" file in the package contains cgo code. This
|
||||
// affects whether C files are added to targets.
|
||||
//
|
||||
// An error is returned if a file is buildable but invalid (for example, a
|
||||
// test .go file containing cgo code). Files that are not buildable will not
|
||||
// be added to any target (for example, .txt files).
|
||||
func (pkg *goPackage) addFile(c *config.Config, info fileInfo, cgo bool) error {
|
||||
switch {
|
||||
case info.ext == unknownExt || !cgo && (info.ext == cExt || info.ext == csExt):
|
||||
return nil
|
||||
case info.ext == protoExt:
|
||||
if pcMode := getProtoMode(c); pcMode == proto.LegacyMode {
|
||||
// Only add files in legacy mode. This is used to generate a filegroup
|
||||
// that contains all protos. In order modes, we get the .proto files
|
||||
// from information emitted by the proto language extension.
|
||||
pkg.proto.addFile(c, info)
|
||||
}
|
||||
case info.isTest:
|
||||
if info.isCgo {
|
||||
return fmt.Errorf("%s: use of cgo in test not supported", info.path)
|
||||
}
|
||||
pkg.test.addFile(c, info)
|
||||
default:
|
||||
pkg.library.addFile(c, info)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isCommand returns true if the package name is "main".
|
||||
func (pkg *goPackage) isCommand() bool {
|
||||
return pkg.name == "main"
|
||||
}
|
||||
|
||||
// isBuildable returns true if anything in the package is buildable.
|
||||
// This is true if the package has Go code that satisfies build constraints
|
||||
// on any platform or has proto files not in legacy mode.
|
||||
func (pkg *goPackage) isBuildable(c *config.Config) bool {
|
||||
return pkg.firstGoFile() != "" || !pkg.proto.sources.isEmpty()
|
||||
}
|
||||
|
||||
// firstGoFile returns the name of a .go file if the package contains at least
|
||||
// one .go file, or "" otherwise.
|
||||
func (pkg *goPackage) firstGoFile() string {
|
||||
goSrcs := []platformStringsBuilder{
|
||||
pkg.library.sources,
|
||||
pkg.binary.sources,
|
||||
pkg.test.sources,
|
||||
}
|
||||
for _, sb := range goSrcs {
|
||||
if sb.strs != nil {
|
||||
for s := range sb.strs {
|
||||
if strings.HasSuffix(s, ".go") {
|
||||
return s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (pkg *goPackage) haveCgo() bool {
|
||||
return pkg.library.cgo || pkg.binary.cgo || pkg.test.cgo
|
||||
}
|
||||
|
||||
func (pkg *goPackage) inferImportPath(c *config.Config) error {
|
||||
if pkg.importPath != "" {
|
||||
log.Panic("importPath already set")
|
||||
}
|
||||
gc := getGoConfig(c)
|
||||
if !gc.prefixSet {
|
||||
return fmt.Errorf("%s: go prefix is not set, so importpath can't be determined for rules. Set a prefix with a '# gazelle:prefix' comment or with -go_prefix on the command line", pkg.dir)
|
||||
}
|
||||
pkg.importPath = inferImportPath(gc, pkg.rel)
|
||||
|
||||
if pkg.rel == gc.prefixRel {
|
||||
pkg.importPath = gc.prefix
|
||||
} else {
|
||||
fromPrefixRel := strings.TrimPrefix(pkg.rel, gc.prefixRel+"/")
|
||||
pkg.importPath = path.Join(gc.prefix, fromPrefixRel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func inferImportPath(gc *goConfig, rel string) string {
|
||||
if rel == gc.prefixRel {
|
||||
return gc.prefix
|
||||
} else {
|
||||
fromPrefixRel := strings.TrimPrefix(rel, gc.prefixRel+"/")
|
||||
return path.Join(gc.prefix, fromPrefixRel)
|
||||
}
|
||||
}
|
||||
|
||||
func goProtoPackageName(pkg proto.Package) string {
|
||||
if value, ok := pkg.Options["go_package"]; ok {
|
||||
if strings.LastIndexByte(value, '/') == -1 {
|
||||
return value
|
||||
} else {
|
||||
if i := strings.LastIndexByte(value, ';'); i != -1 {
|
||||
return value[i+1:]
|
||||
} else {
|
||||
return path.Base(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Replace(pkg.Name, ".", "_", -1)
|
||||
}
|
||||
|
||||
func goProtoImportPath(gc *goConfig, pkg proto.Package, rel string) string {
|
||||
if value, ok := pkg.Options["go_package"]; ok {
|
||||
if strings.LastIndexByte(value, '/') == -1 {
|
||||
return inferImportPath(gc, rel)
|
||||
} else if i := strings.LastIndexByte(value, ';'); i != -1 {
|
||||
return value[:i]
|
||||
} else {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return inferImportPath(gc, rel)
|
||||
}
|
||||
|
||||
func (t *goTarget) addFile(c *config.Config, info fileInfo) {
|
||||
t.cgo = t.cgo || info.isCgo
|
||||
add := getPlatformStringsAddFunction(c, info, nil)
|
||||
add(&t.sources, info.name)
|
||||
add(&t.imports, info.imports...)
|
||||
for _, copts := range info.copts {
|
||||
optAdd := add
|
||||
if len(copts.tags) > 0 {
|
||||
optAdd = getPlatformStringsAddFunction(c, info, copts.tags)
|
||||
}
|
||||
optAdd(&t.copts, copts.opts)
|
||||
}
|
||||
for _, clinkopts := range info.clinkopts {
|
||||
optAdd := add
|
||||
if len(clinkopts.tags) > 0 {
|
||||
optAdd = getPlatformStringsAddFunction(c, info, clinkopts.tags)
|
||||
}
|
||||
optAdd(&t.clinkopts, clinkopts.opts)
|
||||
}
|
||||
}
|
||||
|
||||
func protoTargetFromProtoPackage(name string, pkg proto.Package) protoTarget {
|
||||
target := protoTarget{name: name}
|
||||
for f := range pkg.Files {
|
||||
target.sources.addGenericString(f)
|
||||
}
|
||||
for i := range pkg.Imports {
|
||||
target.imports.addGenericString(i)
|
||||
}
|
||||
target.hasServices = pkg.HasServices
|
||||
return target
|
||||
}
|
||||
|
||||
func (t *protoTarget) addFile(c *config.Config, info fileInfo) {
|
||||
t.sources.addGenericString(info.name)
|
||||
for _, imp := range info.imports {
|
||||
t.imports.addGenericString(imp)
|
||||
}
|
||||
t.hasServices = t.hasServices || info.hasServices
|
||||
}
|
||||
|
||||
// getPlatformStringsAddFunction returns a function used to add strings to
|
||||
// a *platformStringsBuilder under the same set of constraints. This is a
|
||||
// performance optimization to avoid evaluating constraints repeatedly.
|
||||
func getPlatformStringsAddFunction(c *config.Config, info fileInfo, cgoTags tagLine) func(sb *platformStringsBuilder, ss ...string) {
|
||||
isOSSpecific, isArchSpecific := isOSArchSpecific(info, cgoTags)
|
||||
|
||||
switch {
|
||||
case !isOSSpecific && !isArchSpecific:
|
||||
if checkConstraints(c, "", "", info.goos, info.goarch, info.tags, cgoTags) {
|
||||
return func(sb *platformStringsBuilder, ss ...string) {
|
||||
for _, s := range ss {
|
||||
sb.addGenericString(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case isOSSpecific && !isArchSpecific:
|
||||
var osMatch []string
|
||||
for _, os := range rule.KnownOSs {
|
||||
if checkConstraints(c, os, "", info.goos, info.goarch, info.tags, cgoTags) {
|
||||
osMatch = append(osMatch, os)
|
||||
}
|
||||
}
|
||||
if len(osMatch) > 0 {
|
||||
return func(sb *platformStringsBuilder, ss ...string) {
|
||||
for _, s := range ss {
|
||||
sb.addOSString(s, osMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case !isOSSpecific && isArchSpecific:
|
||||
var archMatch []string
|
||||
for _, arch := range rule.KnownArchs {
|
||||
if checkConstraints(c, "", arch, info.goos, info.goarch, info.tags, cgoTags) {
|
||||
archMatch = append(archMatch, arch)
|
||||
}
|
||||
}
|
||||
if len(archMatch) > 0 {
|
||||
return func(sb *platformStringsBuilder, ss ...string) {
|
||||
for _, s := range ss {
|
||||
sb.addArchString(s, archMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
var platformMatch []rule.Platform
|
||||
for _, platform := range rule.KnownPlatforms {
|
||||
if checkConstraints(c, platform.OS, platform.Arch, info.goos, info.goarch, info.tags, cgoTags) {
|
||||
platformMatch = append(platformMatch, platform)
|
||||
}
|
||||
}
|
||||
if len(platformMatch) > 0 {
|
||||
return func(sb *platformStringsBuilder, ss ...string) {
|
||||
for _, s := range ss {
|
||||
sb.addPlatformString(s, platformMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func(_ *platformStringsBuilder, _ ...string) {}
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) isEmpty() bool {
|
||||
return sb.strs == nil
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) hasGo() bool {
|
||||
for s := range sb.strs {
|
||||
if strings.HasSuffix(s, ".go") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) addGenericString(s string) {
|
||||
if sb.strs == nil {
|
||||
sb.strs = make(map[string]platformStringInfo)
|
||||
}
|
||||
sb.strs[s] = platformStringInfo{set: genericSet}
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) addOSString(s string, oss []string) {
|
||||
if sb.strs == nil {
|
||||
sb.strs = make(map[string]platformStringInfo)
|
||||
}
|
||||
si, ok := sb.strs[s]
|
||||
if !ok {
|
||||
si.set = osSet
|
||||
si.oss = make(map[string]bool)
|
||||
}
|
||||
switch si.set {
|
||||
case genericSet:
|
||||
return
|
||||
case osSet:
|
||||
for _, os := range oss {
|
||||
si.oss[os] = true
|
||||
}
|
||||
default:
|
||||
si.convertToPlatforms()
|
||||
for _, os := range oss {
|
||||
for _, arch := range rule.KnownOSArchs[os] {
|
||||
si.platforms[rule.Platform{OS: os, Arch: arch}] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.strs[s] = si
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) addArchString(s string, archs []string) {
|
||||
if sb.strs == nil {
|
||||
sb.strs = make(map[string]platformStringInfo)
|
||||
}
|
||||
si, ok := sb.strs[s]
|
||||
if !ok {
|
||||
si.set = archSet
|
||||
si.archs = make(map[string]bool)
|
||||
}
|
||||
switch si.set {
|
||||
case genericSet:
|
||||
return
|
||||
case archSet:
|
||||
for _, arch := range archs {
|
||||
si.archs[arch] = true
|
||||
}
|
||||
default:
|
||||
si.convertToPlatforms()
|
||||
for _, arch := range archs {
|
||||
for _, os := range rule.KnownArchOSs[arch] {
|
||||
si.platforms[rule.Platform{OS: os, Arch: arch}] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.strs[s] = si
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) addPlatformString(s string, platforms []rule.Platform) {
|
||||
if sb.strs == nil {
|
||||
sb.strs = make(map[string]platformStringInfo)
|
||||
}
|
||||
si, ok := sb.strs[s]
|
||||
if !ok {
|
||||
si.set = platformSet
|
||||
si.platforms = make(map[rule.Platform]bool)
|
||||
}
|
||||
switch si.set {
|
||||
case genericSet:
|
||||
return
|
||||
default:
|
||||
si.convertToPlatforms()
|
||||
for _, p := range platforms {
|
||||
si.platforms[p] = true
|
||||
}
|
||||
}
|
||||
sb.strs[s] = si
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) build() rule.PlatformStrings {
|
||||
var ps rule.PlatformStrings
|
||||
for s, si := range sb.strs {
|
||||
switch si.set {
|
||||
case genericSet:
|
||||
ps.Generic = append(ps.Generic, s)
|
||||
case osSet:
|
||||
if ps.OS == nil {
|
||||
ps.OS = make(map[string][]string)
|
||||
}
|
||||
for os := range si.oss {
|
||||
ps.OS[os] = append(ps.OS[os], s)
|
||||
}
|
||||
case archSet:
|
||||
if ps.Arch == nil {
|
||||
ps.Arch = make(map[string][]string)
|
||||
}
|
||||
for arch := range si.archs {
|
||||
ps.Arch[arch] = append(ps.Arch[arch], s)
|
||||
}
|
||||
case platformSet:
|
||||
if ps.Platform == nil {
|
||||
ps.Platform = make(map[rule.Platform][]string)
|
||||
}
|
||||
for p := range si.platforms {
|
||||
ps.Platform[p] = append(ps.Platform[p], s)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(ps.Generic)
|
||||
if ps.OS != nil {
|
||||
for _, ss := range ps.OS {
|
||||
sort.Strings(ss)
|
||||
}
|
||||
}
|
||||
if ps.Arch != nil {
|
||||
for _, ss := range ps.Arch {
|
||||
sort.Strings(ss)
|
||||
}
|
||||
}
|
||||
if ps.Platform != nil {
|
||||
for _, ss := range ps.Platform {
|
||||
sort.Strings(ss)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
func (sb *platformStringsBuilder) buildFlat() []string {
|
||||
strs := make([]string, 0, len(sb.strs))
|
||||
for s := range sb.strs {
|
||||
strs = append(strs, s)
|
||||
}
|
||||
sort.Strings(strs)
|
||||
return strs
|
||||
}
|
||||
|
||||
func (si *platformStringInfo) convertToPlatforms() {
|
||||
switch si.set {
|
||||
case genericSet:
|
||||
log.Panic("cannot convert generic string to platforms")
|
||||
case platformSet:
|
||||
return
|
||||
case osSet:
|
||||
si.set = platformSet
|
||||
si.platforms = make(map[rule.Platform]bool)
|
||||
for os := range si.oss {
|
||||
for _, arch := range rule.KnownOSArchs[os] {
|
||||
si.platforms[rule.Platform{OS: os, Arch: arch}] = true
|
||||
}
|
||||
}
|
||||
si.oss = nil
|
||||
case archSet:
|
||||
si.set = platformSet
|
||||
si.platforms = make(map[rule.Platform]bool)
|
||||
for arch := range si.archs {
|
||||
for _, os := range rule.KnownArchOSs[arch] {
|
||||
si.platforms[rule.Platform{OS: os, Arch: arch}] = true
|
||||
}
|
||||
}
|
||||
si.archs = nil
|
||||
}
|
||||
}
|
||||
|
||||
var semverRex = regexp.MustCompile(`^.*?(/v\d+)(?:/.*)?$`)
|
||||
|
||||
// pathWithoutSemver removes a semantic version suffix from path.
|
||||
// For example, if path is "example.com/foo/v2/bar", pathWithoutSemver
|
||||
// will return "example.com/foo/bar". If there is no semantic version suffix,
|
||||
// "" will be returned.
|
||||
func pathWithoutSemver(path string) string {
|
||||
m := semverRex.FindStringSubmatchIndex(path)
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
v := path[m[2]+2 : m[3]]
|
||||
if v[0] == '0' || v == "1" {
|
||||
return ""
|
||||
}
|
||||
return path[:m[2]] + path[m[3]:]
|
||||
}
|
||||
|
|
@ -1,373 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/pathtools"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/resolve"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func (_ *goLang) Imports(_ *config.Config, r *rule.Rule, f *rule.File) []resolve.ImportSpec {
|
||||
if !isGoLibrary(r.Kind()) {
|
||||
return nil
|
||||
}
|
||||
if importPath := r.AttrString("importpath"); importPath == "" {
|
||||
return []resolve.ImportSpec{}
|
||||
} else {
|
||||
return []resolve.ImportSpec{{goName, importPath}}
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *goLang) Embeds(r *rule.Rule, from label.Label) []label.Label {
|
||||
embedStrings := r.AttrStrings("embed")
|
||||
if isGoProtoLibrary(r.Kind()) {
|
||||
embedStrings = append(embedStrings, r.AttrString("proto"))
|
||||
}
|
||||
embedLabels := make([]label.Label, 0, len(embedStrings))
|
||||
for _, s := range embedStrings {
|
||||
l, err := label.Parse(s)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
l = l.Abs(from.Repo, from.Pkg)
|
||||
embedLabels = append(embedLabels, l)
|
||||
}
|
||||
return embedLabels
|
||||
}
|
||||
|
||||
func (gl *goLang) Resolve(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, r *rule.Rule, importsRaw interface{}, from label.Label) {
|
||||
if importsRaw == nil {
|
||||
// may not be set in tests.
|
||||
return
|
||||
}
|
||||
imports := importsRaw.(rule.PlatformStrings)
|
||||
r.DelAttr("deps")
|
||||
resolve := ResolveGo
|
||||
if r.Kind() == "go_proto_library" {
|
||||
resolve = resolveProto
|
||||
}
|
||||
deps, errs := imports.Map(func(imp string) (string, error) {
|
||||
l, err := resolve(c, ix, rc, imp, from)
|
||||
if err == skipImportError {
|
||||
return "", nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, embed := range gl.Embeds(r, from) {
|
||||
if embed.Equal(l) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
l = l.Rel(from.Repo, from.Pkg)
|
||||
return l.String(), nil
|
||||
})
|
||||
for _, err := range errs {
|
||||
log.Print(err)
|
||||
}
|
||||
if !deps.IsEmpty() {
|
||||
if r.Kind() == "go_proto_library" {
|
||||
// protos may import the same library multiple times by different names,
|
||||
// so we need to de-duplicate them. Protos are not platform-specific,
|
||||
// so it's safe to just flatten them.
|
||||
r.SetAttr("deps", deps.Flat())
|
||||
} else {
|
||||
r.SetAttr("deps", deps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
skipImportError = errors.New("std or self import")
|
||||
notFoundError = errors.New("rule not found")
|
||||
)
|
||||
|
||||
// ResolveGo resolves a Go import path to a Bazel label, possibly using the
|
||||
// given rule index and remote cache. Some special cases may be applied to
|
||||
// known proto import paths, depending on the current proto mode.
|
||||
//
|
||||
// This may be used directly by other language extensions related to Go
|
||||
// (gomock). Gazelle calls Language.Resolve instead.
|
||||
func ResolveGo(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, imp string, from label.Label) (label.Label, error) {
|
||||
gc := getGoConfig(c)
|
||||
pcMode := getProtoMode(c)
|
||||
if build.IsLocalImport(imp) {
|
||||
cleanRel := path.Clean(path.Join(from.Pkg, imp))
|
||||
if build.IsLocalImport(cleanRel) {
|
||||
return label.NoLabel, fmt.Errorf("relative import path %q from %q points outside of repository", imp, from.Pkg)
|
||||
}
|
||||
imp = path.Join(gc.prefix, cleanRel)
|
||||
}
|
||||
|
||||
if IsStandard(imp) {
|
||||
return label.NoLabel, skipImportError
|
||||
}
|
||||
|
||||
if l, ok := resolve.FindRuleWithOverride(c, resolve.ImportSpec{Lang: "go", Imp: imp}, "go"); ok {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
if pcMode.ShouldUseKnownImports() {
|
||||
// These are commonly used libraries that depend on Well Known Types.
|
||||
// They depend on the generated versions of these protos to avoid conflicts.
|
||||
// However, since protoc-gen-go depends on these libraries, we generate
|
||||
// its rules in disable_global mode (to avoid cyclic dependency), so the
|
||||
// "go_default_library" versions of these libraries depend on the
|
||||
// pre-generated versions of the proto libraries.
|
||||
switch imp {
|
||||
case "github.com/golang/protobuf/proto":
|
||||
return label.New("com_github_golang_protobuf", "proto", "go_default_library"), nil
|
||||
case "github.com/golang/protobuf/jsonpb":
|
||||
return label.New("com_github_golang_protobuf", "jsonpb", "go_default_library_gen"), nil
|
||||
case "github.com/golang/protobuf/descriptor":
|
||||
return label.New("com_github_golang_protobuf", "descriptor", "go_default_library_gen"), nil
|
||||
case "github.com/golang/protobuf/ptypes":
|
||||
return label.New("com_github_golang_protobuf", "ptypes", "go_default_library_gen"), nil
|
||||
case "github.com/golang/protobuf/protoc-gen-go/generator":
|
||||
return label.New("com_github_golang_protobuf", "protoc-gen-go/generator", "go_default_library_gen"), nil
|
||||
case "google.golang.org/grpc":
|
||||
return label.New("org_golang_google_grpc", "", "go_default_library"), nil
|
||||
}
|
||||
if l, ok := knownGoProtoImports[imp]; ok {
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
|
||||
if l, err := resolveWithIndexGo(ix, imp, from); err == nil || err == skipImportError {
|
||||
return l, err
|
||||
} else if err != notFoundError {
|
||||
return label.NoLabel, err
|
||||
}
|
||||
|
||||
// Special cases for rules_go and bazel_gazelle.
|
||||
// These have names that don't following conventions and they're
|
||||
// typeically declared with http_archive, not go_repository, so Gazelle
|
||||
// won't recognize them.
|
||||
if pathtools.HasPrefix(imp, "github.com/bazelbuild/rules_go") {
|
||||
pkg := pathtools.TrimPrefix(imp, "github.com/bazelbuild/rules_go")
|
||||
return label.New("io_bazel_rules_go", pkg, "go_default_library"), nil
|
||||
} else if pathtools.HasPrefix(imp, "github.com/bazelbuild/bazel-gazelle") {
|
||||
pkg := pathtools.TrimPrefix(imp, "github.com/bazelbuild/bazel-gazelle")
|
||||
return label.New("bazel_gazelle", pkg, "go_default_library"), nil
|
||||
}
|
||||
|
||||
if !c.IndexLibraries {
|
||||
// packages in current repo were not indexed, relying on prefix to decide what may have been in
|
||||
// current repo
|
||||
if pathtools.HasPrefix(imp, gc.prefix) {
|
||||
pkg := path.Join(gc.prefixRel, pathtools.TrimPrefix(imp, gc.prefix))
|
||||
return label.New("", pkg, defaultLibName), nil
|
||||
}
|
||||
}
|
||||
|
||||
if gc.depMode == externalMode {
|
||||
return resolveExternal(gc.moduleMode, rc, imp)
|
||||
} else {
|
||||
return resolveVendored(rc, imp)
|
||||
}
|
||||
}
|
||||
|
||||
// IsStandard returns whether a package is in the standard library.
|
||||
func IsStandard(imp string) bool {
|
||||
return stdPackages[imp]
|
||||
}
|
||||
|
||||
func resolveWithIndexGo(ix *resolve.RuleIndex, imp string, from label.Label) (label.Label, error) {
|
||||
matches := ix.FindRulesByImport(resolve.ImportSpec{Lang: "go", Imp: imp}, "go")
|
||||
var bestMatch resolve.FindResult
|
||||
var bestMatchIsVendored bool
|
||||
var bestMatchVendorRoot string
|
||||
var matchError error
|
||||
|
||||
for _, m := range matches {
|
||||
// Apply vendoring logic for Go libraries. A library in a vendor directory
|
||||
// is only visible in the parent tree. Vendored libraries supercede
|
||||
// non-vendored libraries, and libraries closer to from.Pkg supercede
|
||||
// those further up the tree.
|
||||
isVendored := false
|
||||
vendorRoot := ""
|
||||
parts := strings.Split(m.Label.Pkg, "/")
|
||||
for i := len(parts) - 1; i >= 0; i-- {
|
||||
if parts[i] == "vendor" {
|
||||
isVendored = true
|
||||
vendorRoot = strings.Join(parts[:i], "/")
|
||||
break
|
||||
}
|
||||
}
|
||||
if isVendored {
|
||||
}
|
||||
if isVendored && !label.New(m.Label.Repo, vendorRoot, "").Contains(from) {
|
||||
// vendor directory not visible
|
||||
continue
|
||||
}
|
||||
if bestMatch.Label.Equal(label.NoLabel) || isVendored && (!bestMatchIsVendored || len(vendorRoot) > len(bestMatchVendorRoot)) {
|
||||
// Current match is better
|
||||
bestMatch = m
|
||||
bestMatchIsVendored = isVendored
|
||||
bestMatchVendorRoot = vendorRoot
|
||||
matchError = nil
|
||||
} else if (!isVendored && bestMatchIsVendored) || (isVendored && len(vendorRoot) < len(bestMatchVendorRoot)) {
|
||||
// Current match is worse
|
||||
} else {
|
||||
// Match is ambiguous
|
||||
// TODO: consider listing all the ambiguous rules here.
|
||||
matchError = fmt.Errorf("rule %s imports %q which matches multiple rules: %s and %s. # gazelle:resolve may be used to disambiguate", from, imp, bestMatch.Label, m.Label)
|
||||
}
|
||||
}
|
||||
if matchError != nil {
|
||||
return label.NoLabel, matchError
|
||||
}
|
||||
if bestMatch.Label.Equal(label.NoLabel) {
|
||||
return label.NoLabel, notFoundError
|
||||
}
|
||||
if bestMatch.IsSelfImport(from) {
|
||||
return label.NoLabel, skipImportError
|
||||
}
|
||||
return bestMatch.Label, nil
|
||||
}
|
||||
|
||||
var modMajorRex = regexp.MustCompile(`/v\d+(?:/|$)`)
|
||||
|
||||
func resolveExternal(moduleMode bool, rc *repo.RemoteCache, imp string) (label.Label, error) {
|
||||
// If we're in module mode, use "go list" to find the module path and
|
||||
// repository name. Otherwise, use special cases (for github.com, golang.org)
|
||||
// or send a GET with ?go-get=1 to find the root. If the path contains
|
||||
// a major version suffix (e.g., /v2), treat it as a module anyway though.
|
||||
//
|
||||
// Eventually module mode will be the only mode. But for now, it's expensive
|
||||
// and not the common case, especially when known repositories aren't
|
||||
// listed in WORKSPACE (which is currently the case within go_repository).
|
||||
if !moduleMode {
|
||||
moduleMode = pathWithoutSemver(imp) != ""
|
||||
}
|
||||
|
||||
var prefix, repo string
|
||||
var err error
|
||||
if moduleMode {
|
||||
prefix, repo, err = rc.Mod(imp)
|
||||
} else {
|
||||
prefix, repo, err = rc.Root(imp)
|
||||
}
|
||||
if err != nil {
|
||||
return label.NoLabel, err
|
||||
}
|
||||
|
||||
var pkg string
|
||||
if pathtools.HasPrefix(imp, prefix) {
|
||||
pkg = pathtools.TrimPrefix(imp, prefix)
|
||||
} else if impWithoutSemver := pathWithoutSemver(imp); pathtools.HasPrefix(impWithoutSemver, prefix) {
|
||||
// We may have used minimal module compatibility to resolve a path
|
||||
// without a semantic import version suffix to a repository that has one.
|
||||
pkg = pathtools.TrimPrefix(impWithoutSemver, prefix)
|
||||
}
|
||||
|
||||
return label.New(repo, pkg, defaultLibName), nil
|
||||
}
|
||||
|
||||
func resolveVendored(rc *repo.RemoteCache, imp string) (label.Label, error) {
|
||||
return label.New("", path.Join("vendor", imp), defaultLibName), nil
|
||||
}
|
||||
|
||||
func resolveProto(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, imp string, from label.Label) (label.Label, error) {
|
||||
pcMode := getProtoMode(c)
|
||||
|
||||
if wellKnownProtos[imp] {
|
||||
return label.NoLabel, skipImportError
|
||||
}
|
||||
|
||||
if l, ok := resolve.FindRuleWithOverride(c, resolve.ImportSpec{Lang: "proto", Imp: imp}, "go"); ok {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
if l, ok := knownProtoImports[imp]; ok && pcMode.ShouldUseKnownImports() {
|
||||
if l.Equal(from) {
|
||||
return label.NoLabel, skipImportError
|
||||
} else {
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
|
||||
if l, err := resolveWithIndexProto(ix, imp, from); err == nil || err == skipImportError {
|
||||
return l, err
|
||||
} else if err != notFoundError {
|
||||
return label.NoLabel, err
|
||||
}
|
||||
|
||||
// As a fallback, guess the label based on the proto file name. We assume
|
||||
// all proto files in a directory belong to the same package, and the
|
||||
// package name matches the directory base name. We also assume that protos
|
||||
// in the vendor directory must refer to something else in vendor.
|
||||
rel := path.Dir(imp)
|
||||
if rel == "." {
|
||||
rel = ""
|
||||
}
|
||||
if from.Pkg == "vendor" || strings.HasPrefix(from.Pkg, "vendor/") {
|
||||
rel = path.Join("vendor", rel)
|
||||
}
|
||||
return label.New("", rel, defaultLibName), nil
|
||||
}
|
||||
|
||||
// wellKnownProtos is the set of proto sets for which we don't need to add
|
||||
// an explicit dependency in go_proto_library.
|
||||
// TODO(jayconrod): generate from
|
||||
// @io_bazel_rules_go//proto/wkt:WELL_KNOWN_TYPE_PACKAGES
|
||||
var wellKnownProtos = map[string]bool{
|
||||
"google/protobuf/any.proto": true,
|
||||
"google/protobuf/api.proto": true,
|
||||
"google/protobuf/compiler/plugin.proto": true,
|
||||
"google/protobuf/descriptor.proto": true,
|
||||
"google/protobuf/duration.proto": true,
|
||||
"google/protobuf/empty.proto": true,
|
||||
"google/protobuf/field_mask.proto": true,
|
||||
"google/protobuf/source_context.proto": true,
|
||||
"google/protobuf/struct.proto": true,
|
||||
"google/protobuf/timestamp.proto": true,
|
||||
"google/protobuf/type.proto": true,
|
||||
"google/protobuf/wrappers.proto": true,
|
||||
}
|
||||
|
||||
func resolveWithIndexProto(ix *resolve.RuleIndex, imp string, from label.Label) (label.Label, error) {
|
||||
matches := ix.FindRulesByImport(resolve.ImportSpec{Lang: "proto", Imp: imp}, "go")
|
||||
if len(matches) == 0 {
|
||||
return label.NoLabel, notFoundError
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return label.NoLabel, fmt.Errorf("multiple rules (%s and %s) may be imported with %q from %s", matches[0].Label, matches[1].Label, imp, from)
|
||||
}
|
||||
if matches[0].IsSelfImport(from) {
|
||||
return label.NoLabel, skipImportError
|
||||
}
|
||||
return matches[0].Label, nil
|
||||
}
|
||||
|
||||
func isGoLibrary(kind string) bool {
|
||||
return kind == "go_library" || isGoProtoLibrary(kind)
|
||||
}
|
||||
|
||||
func isGoProtoLibrary(kind string) bool {
|
||||
return kind == "go_proto_library" || kind == "go_grpc_library"
|
||||
}
|
||||
|
|
@ -1,678 +0,0 @@
|
|||
|
||||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Generated by gen_std_package_list.go
|
||||
// DO NOT EDIT
|
||||
|
||||
package golang
|
||||
|
||||
var stdPackages = map[string]bool{
|
||||
"archive/tar": true,
|
||||
"archive/tar/testdata": true,
|
||||
"archive/zip": true,
|
||||
"archive/zip/testdata": true,
|
||||
"bufio": true,
|
||||
"builtin": true,
|
||||
"bytes": true,
|
||||
"cmd": true,
|
||||
"cmd/addr2line": true,
|
||||
"cmd/api": true,
|
||||
"cmd/api/testdata/src/issue21181/dep": true,
|
||||
"cmd/api/testdata/src/issue21181/indirect": true,
|
||||
"cmd/api/testdata/src/issue21181/p": true,
|
||||
"cmd/api/testdata/src/issue29837/p": true,
|
||||
"cmd/api/testdata/src/pkg/p1": true,
|
||||
"cmd/api/testdata/src/pkg/p2": true,
|
||||
"cmd/api/testdata/src/pkg/p3": true,
|
||||
"cmd/asm": true,
|
||||
"cmd/asm/internal/arch": true,
|
||||
"cmd/asm/internal/asm": true,
|
||||
"cmd/asm/internal/asm/testdata": true,
|
||||
"cmd/asm/internal/asm/testdata/avx512enc": true,
|
||||
"cmd/asm/internal/flags": true,
|
||||
"cmd/asm/internal/lex": true,
|
||||
"cmd/buildid": true,
|
||||
"cmd/cgo": true,
|
||||
"cmd/compile": true,
|
||||
"cmd/compile/internal/amd64": true,
|
||||
"cmd/compile/internal/arm": true,
|
||||
"cmd/compile/internal/arm64": true,
|
||||
"cmd/compile/internal/gc": true,
|
||||
"cmd/compile/internal/gc/builtin": true,
|
||||
"cmd/compile/internal/gc/testdata": true,
|
||||
"cmd/compile/internal/gc/testdata/gen": true,
|
||||
"cmd/compile/internal/gc/testdata/reproducible": true,
|
||||
"cmd/compile/internal/mips": true,
|
||||
"cmd/compile/internal/mips64": true,
|
||||
"cmd/compile/internal/ppc64": true,
|
||||
"cmd/compile/internal/s390x": true,
|
||||
"cmd/compile/internal/ssa": true,
|
||||
"cmd/compile/internal/ssa/gen": true,
|
||||
"cmd/compile/internal/ssa/testdata": true,
|
||||
"cmd/compile/internal/syntax": true,
|
||||
"cmd/compile/internal/syntax/testdata": true,
|
||||
"cmd/compile/internal/test": true,
|
||||
"cmd/compile/internal/types": true,
|
||||
"cmd/compile/internal/wasm": true,
|
||||
"cmd/compile/internal/x86": true,
|
||||
"cmd/cover": true,
|
||||
"cmd/cover/testdata": true,
|
||||
"cmd/cover/testdata/html": true,
|
||||
"cmd/dist": true,
|
||||
"cmd/doc": true,
|
||||
"cmd/doc/testdata": true,
|
||||
"cmd/doc/testdata/nested": true,
|
||||
"cmd/doc/testdata/nested/nested": true,
|
||||
"cmd/fix": true,
|
||||
"cmd/go": true,
|
||||
"cmd/go/internal/auth": true,
|
||||
"cmd/go/internal/base": true,
|
||||
"cmd/go/internal/bug": true,
|
||||
"cmd/go/internal/cache": true,
|
||||
"cmd/go/internal/cfg": true,
|
||||
"cmd/go/internal/clean": true,
|
||||
"cmd/go/internal/cmdflag": true,
|
||||
"cmd/go/internal/dirhash": true,
|
||||
"cmd/go/internal/doc": true,
|
||||
"cmd/go/internal/envcmd": true,
|
||||
"cmd/go/internal/fix": true,
|
||||
"cmd/go/internal/fmtcmd": true,
|
||||
"cmd/go/internal/generate": true,
|
||||
"cmd/go/internal/get": true,
|
||||
"cmd/go/internal/help": true,
|
||||
"cmd/go/internal/imports": true,
|
||||
"cmd/go/internal/imports/testdata/android": true,
|
||||
"cmd/go/internal/imports/testdata/illumos": true,
|
||||
"cmd/go/internal/imports/testdata/star": true,
|
||||
"cmd/go/internal/list": true,
|
||||
"cmd/go/internal/load": true,
|
||||
"cmd/go/internal/lockedfile": true,
|
||||
"cmd/go/internal/lockedfile/internal/filelock": true,
|
||||
"cmd/go/internal/modcmd": true,
|
||||
"cmd/go/internal/modconv": true,
|
||||
"cmd/go/internal/modconv/testdata": true,
|
||||
"cmd/go/internal/modfetch": true,
|
||||
"cmd/go/internal/modfetch/codehost": true,
|
||||
"cmd/go/internal/modfile": true,
|
||||
"cmd/go/internal/modfile/testdata": true,
|
||||
"cmd/go/internal/modget": true,
|
||||
"cmd/go/internal/modinfo": true,
|
||||
"cmd/go/internal/modload": true,
|
||||
"cmd/go/internal/module": true,
|
||||
"cmd/go/internal/mvs": true,
|
||||
"cmd/go/internal/note": true,
|
||||
"cmd/go/internal/par": true,
|
||||
"cmd/go/internal/renameio": true,
|
||||
"cmd/go/internal/robustio": true,
|
||||
"cmd/go/internal/run": true,
|
||||
"cmd/go/internal/search": true,
|
||||
"cmd/go/internal/semver": true,
|
||||
"cmd/go/internal/str": true,
|
||||
"cmd/go/internal/sumweb": true,
|
||||
"cmd/go/internal/test": true,
|
||||
"cmd/go/internal/tlog": true,
|
||||
"cmd/go/internal/tool": true,
|
||||
"cmd/go/internal/txtar": true,
|
||||
"cmd/go/internal/version": true,
|
||||
"cmd/go/internal/vet": true,
|
||||
"cmd/go/internal/web": true,
|
||||
"cmd/go/internal/work": true,
|
||||
"cmd/go/testdata": true,
|
||||
"cmd/go/testdata/badmod": true,
|
||||
"cmd/go/testdata/failssh": true,
|
||||
"cmd/go/testdata/generate": true,
|
||||
"cmd/go/testdata/importcom": true,
|
||||
"cmd/go/testdata/importcom/src/bad": true,
|
||||
"cmd/go/testdata/importcom/src/conflict": true,
|
||||
"cmd/go/testdata/importcom/src/works/x": true,
|
||||
"cmd/go/testdata/importcom/src/wrongplace": true,
|
||||
"cmd/go/testdata/importcycle/src/selfimport": true,
|
||||
"cmd/go/testdata/local": true,
|
||||
"cmd/go/testdata/local/easysub": true,
|
||||
"cmd/go/testdata/local/sub": true,
|
||||
"cmd/go/testdata/local/sub/sub": true,
|
||||
"cmd/go/testdata/mod": true,
|
||||
"cmd/go/testdata/modlegacy/src/new": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/p1": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/p2": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/sub": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/sub/inner": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/sub/inner/x": true,
|
||||
"cmd/go/testdata/modlegacy/src/new/sub/x/v1/y": true,
|
||||
"cmd/go/testdata/modlegacy/src/old/p1": true,
|
||||
"cmd/go/testdata/modlegacy/src/old/p2": true,
|
||||
"cmd/go/testdata/norunexample": true,
|
||||
"cmd/go/testdata/rundir": true,
|
||||
"cmd/go/testdata/rundir/sub": true,
|
||||
"cmd/go/testdata/script": true,
|
||||
"cmd/go/testdata/shadow/root1/src/foo": true,
|
||||
"cmd/go/testdata/shadow/root1/src/math": true,
|
||||
"cmd/go/testdata/shadow/root2/src/foo": true,
|
||||
"cmd/go/testdata/src": true,
|
||||
"cmd/go/testdata/src/badc": true,
|
||||
"cmd/go/testdata/src/badpkg": true,
|
||||
"cmd/go/testdata/src/bench": true,
|
||||
"cmd/go/testdata/src/benchfatal": true,
|
||||
"cmd/go/testdata/src/canonical/a": true,
|
||||
"cmd/go/testdata/src/canonical/a/vendor/c": true,
|
||||
"cmd/go/testdata/src/canonical/b": true,
|
||||
"cmd/go/testdata/src/canonical/d": true,
|
||||
"cmd/go/testdata/src/cgoasm": true,
|
||||
"cmd/go/testdata/src/cgocover": true,
|
||||
"cmd/go/testdata/src/cgocover2": true,
|
||||
"cmd/go/testdata/src/cgocover3": true,
|
||||
"cmd/go/testdata/src/cgocover4": true,
|
||||
"cmd/go/testdata/src/cgotest": true,
|
||||
"cmd/go/testdata/src/coverasm": true,
|
||||
"cmd/go/testdata/src/coverbad": true,
|
||||
"cmd/go/testdata/src/coverdep": true,
|
||||
"cmd/go/testdata/src/coverdep/p1": true,
|
||||
"cmd/go/testdata/src/coverdep2/p1": true,
|
||||
"cmd/go/testdata/src/coverdep2/p2": true,
|
||||
"cmd/go/testdata/src/coverdot1": true,
|
||||
"cmd/go/testdata/src/coverdot2": true,
|
||||
"cmd/go/testdata/src/dupload": true,
|
||||
"cmd/go/testdata/src/dupload/p": true,
|
||||
"cmd/go/testdata/src/dupload/p2": true,
|
||||
"cmd/go/testdata/src/dupload/vendor/p": true,
|
||||
"cmd/go/testdata/src/empty/pkg": true,
|
||||
"cmd/go/testdata/src/empty/pkgtest": true,
|
||||
"cmd/go/testdata/src/empty/pkgtestxtest": true,
|
||||
"cmd/go/testdata/src/empty/pkgxtest": true,
|
||||
"cmd/go/testdata/src/empty/test": true,
|
||||
"cmd/go/testdata/src/empty/testxtest": true,
|
||||
"cmd/go/testdata/src/empty/xtest": true,
|
||||
"cmd/go/testdata/src/exclude": true,
|
||||
"cmd/go/testdata/src/exclude/empty": true,
|
||||
"cmd/go/testdata/src/exclude/ignore": true,
|
||||
"cmd/go/testdata/src/gencycle": true,
|
||||
"cmd/go/testdata/src/go-cmd-test": true,
|
||||
"cmd/go/testdata/src/hello": true,
|
||||
"cmd/go/testdata/src/importmain/ismain": true,
|
||||
"cmd/go/testdata/src/importmain/test": true,
|
||||
"cmd/go/testdata/src/main_test": true,
|
||||
"cmd/go/testdata/src/multimain": true,
|
||||
"cmd/go/testdata/src/my.pkg": true,
|
||||
"cmd/go/testdata/src/my.pkg/main": true,
|
||||
"cmd/go/testdata/src/not_main": true,
|
||||
"cmd/go/testdata/src/notest": true,
|
||||
"cmd/go/testdata/src/run": true,
|
||||
"cmd/go/testdata/src/run/internal": true,
|
||||
"cmd/go/testdata/src/run/subdir/internal/private": true,
|
||||
"cmd/go/testdata/src/skipper": true,
|
||||
"cmd/go/testdata/src/sleepy1": true,
|
||||
"cmd/go/testdata/src/sleepy2": true,
|
||||
"cmd/go/testdata/src/sleepybad": true,
|
||||
"cmd/go/testdata/src/syntaxerror": true,
|
||||
"cmd/go/testdata/src/testcache": true,
|
||||
"cmd/go/testdata/src/testcycle/p1": true,
|
||||
"cmd/go/testdata/src/testcycle/p2": true,
|
||||
"cmd/go/testdata/src/testcycle/p3": true,
|
||||
"cmd/go/testdata/src/testcycle/q1": true,
|
||||
"cmd/go/testdata/src/testdep/p1": true,
|
||||
"cmd/go/testdata/src/testdep/p2": true,
|
||||
"cmd/go/testdata/src/testdep/p3": true,
|
||||
"cmd/go/testdata/src/testlist": true,
|
||||
"cmd/go/testdata/src/testnorun": true,
|
||||
"cmd/go/testdata/src/testrace": true,
|
||||
"cmd/go/testdata/src/testregexp": true,
|
||||
"cmd/go/testdata/src/vend": true,
|
||||
"cmd/go/testdata/src/vend/dir1": true,
|
||||
"cmd/go/testdata/src/vend/hello": true,
|
||||
"cmd/go/testdata/src/vend/subdir": true,
|
||||
"cmd/go/testdata/src/vend/vendor/p": true,
|
||||
"cmd/go/testdata/src/vend/vendor/q": true,
|
||||
"cmd/go/testdata/src/vend/vendor/strings": true,
|
||||
"cmd/go/testdata/src/vend/vendor/vend/dir1/dir2": true,
|
||||
"cmd/go/testdata/src/vend/x": true,
|
||||
"cmd/go/testdata/src/vend/x/invalid": true,
|
||||
"cmd/go/testdata/src/vend/x/vendor/p": true,
|
||||
"cmd/go/testdata/src/vend/x/vendor/p/p": true,
|
||||
"cmd/go/testdata/src/vend/x/vendor/r": true,
|
||||
"cmd/go/testdata/src/vetcycle": true,
|
||||
"cmd/go/testdata/src/vetfail/p1": true,
|
||||
"cmd/go/testdata/src/vetfail/p2": true,
|
||||
"cmd/go/testdata/src/vetpkg": true,
|
||||
"cmd/go/testdata/src/xtestonly": true,
|
||||
"cmd/go/testdata/testcover/pkg1": true,
|
||||
"cmd/go/testdata/testcover/pkg2": true,
|
||||
"cmd/go/testdata/testcover/pkg3": true,
|
||||
"cmd/go/testdata/testcover/pkg4": true,
|
||||
"cmd/go/testdata/testimport": true,
|
||||
"cmd/go/testdata/testimport/p1": true,
|
||||
"cmd/go/testdata/testimport/p2": true,
|
||||
"cmd/go/testdata/testinternal": true,
|
||||
"cmd/go/testdata/testinternal2": true,
|
||||
"cmd/go/testdata/testinternal2/x/y/z/internal/w": true,
|
||||
"cmd/go/testdata/testinternal3": true,
|
||||
"cmd/go/testdata/testinternal4/src/p": true,
|
||||
"cmd/go/testdata/testinternal4/src/q/internal/x": true,
|
||||
"cmd/go/testdata/testinternal4/src/q/j": true,
|
||||
"cmd/go/testdata/testonly": true,
|
||||
"cmd/go/testdata/testonly2": true,
|
||||
"cmd/go/testdata/testterminal18153": true,
|
||||
"cmd/go/testdata/testvendor/src/p": true,
|
||||
"cmd/go/testdata/testvendor/src/q/vendor/x": true,
|
||||
"cmd/go/testdata/testvendor/src/q/y": true,
|
||||
"cmd/go/testdata/testvendor/src/q/z": true,
|
||||
"cmd/go/testdata/testvendor2/src/p": true,
|
||||
"cmd/go/testdata/testvendor2/vendor/x": true,
|
||||
"cmd/gofmt": true,
|
||||
"cmd/gofmt/testdata": true,
|
||||
"cmd/internal/bio": true,
|
||||
"cmd/internal/browser": true,
|
||||
"cmd/internal/buildid": true,
|
||||
"cmd/internal/buildid/testdata": true,
|
||||
"cmd/internal/dwarf": true,
|
||||
"cmd/internal/edit": true,
|
||||
"cmd/internal/gcprog": true,
|
||||
"cmd/internal/goobj": true,
|
||||
"cmd/internal/goobj/testdata": true,
|
||||
"cmd/internal/goobj/testdata/mycgo": true,
|
||||
"cmd/internal/obj": true,
|
||||
"cmd/internal/obj/arm": true,
|
||||
"cmd/internal/obj/arm64": true,
|
||||
"cmd/internal/obj/mips": true,
|
||||
"cmd/internal/obj/ppc64": true,
|
||||
"cmd/internal/obj/s390x": true,
|
||||
"cmd/internal/obj/wasm": true,
|
||||
"cmd/internal/obj/x86": true,
|
||||
"cmd/internal/objabi": true,
|
||||
"cmd/internal/objfile": true,
|
||||
"cmd/internal/src": true,
|
||||
"cmd/internal/sys": true,
|
||||
"cmd/internal/test2json": true,
|
||||
"cmd/internal/test2json/testdata": true,
|
||||
"cmd/link": true,
|
||||
"cmd/link/internal/amd64": true,
|
||||
"cmd/link/internal/arm": true,
|
||||
"cmd/link/internal/arm64": true,
|
||||
"cmd/link/internal/ld": true,
|
||||
"cmd/link/internal/ld/testdata/httptest/main": true,
|
||||
"cmd/link/internal/ld/testdata/issue10978": true,
|
||||
"cmd/link/internal/ld/testdata/issue25459/a": true,
|
||||
"cmd/link/internal/ld/testdata/issue25459/main": true,
|
||||
"cmd/link/internal/ld/testdata/issue26237/b.dir": true,
|
||||
"cmd/link/internal/ld/testdata/issue26237/main": true,
|
||||
"cmd/link/internal/ld/testdata/issue32233/lib": true,
|
||||
"cmd/link/internal/ld/testdata/issue32233/main": true,
|
||||
"cmd/link/internal/loadelf": true,
|
||||
"cmd/link/internal/loadmacho": true,
|
||||
"cmd/link/internal/loadpe": true,
|
||||
"cmd/link/internal/loadxcoff": true,
|
||||
"cmd/link/internal/mips": true,
|
||||
"cmd/link/internal/mips64": true,
|
||||
"cmd/link/internal/objfile": true,
|
||||
"cmd/link/internal/ppc64": true,
|
||||
"cmd/link/internal/s390x": true,
|
||||
"cmd/link/internal/sym": true,
|
||||
"cmd/link/internal/wasm": true,
|
||||
"cmd/link/internal/x86": true,
|
||||
"cmd/link/testdata": true,
|
||||
"cmd/nm": true,
|
||||
"cmd/objdump": true,
|
||||
"cmd/objdump/testdata": true,
|
||||
"cmd/pack": true,
|
||||
"cmd/pprof": true,
|
||||
"cmd/test2json": true,
|
||||
"cmd/trace": true,
|
||||
"cmd/vendor": true,
|
||||
"cmd/vendor/github.com/google/pprof": true,
|
||||
"cmd/vendor/github.com/google/pprof/driver": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/binutils": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/driver": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/elfexec": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/graph": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/measurement": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/plugin": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/report": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/symbolizer": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/symbolz": true,
|
||||
"cmd/vendor/github.com/google/pprof/internal/transport": true,
|
||||
"cmd/vendor/github.com/google/pprof/profile": true,
|
||||
"cmd/vendor/github.com/google/pprof/third_party/d3": true,
|
||||
"cmd/vendor/github.com/google/pprof/third_party/d3flamegraph": true,
|
||||
"cmd/vendor/github.com/google/pprof/third_party/svgpan": true,
|
||||
"cmd/vendor/github.com/ianlancetaylor/demangle": true,
|
||||
"cmd/vendor/golang.org/x/arch": true,
|
||||
"cmd/vendor/golang.org/x/arch/arm/armasm": true,
|
||||
"cmd/vendor/golang.org/x/arch/arm64/arm64asm": true,
|
||||
"cmd/vendor/golang.org/x/arch/ppc64/ppc64asm": true,
|
||||
"cmd/vendor/golang.org/x/arch/x86/x86asm": true,
|
||||
"cmd/vendor/golang.org/x/crypto": true,
|
||||
"cmd/vendor/golang.org/x/crypto/ssh/terminal": true,
|
||||
"cmd/vendor/golang.org/x/sys": true,
|
||||
"cmd/vendor/golang.org/x/sys/unix": true,
|
||||
"cmd/vendor/golang.org/x/sys/windows": true,
|
||||
"cmd/vendor/golang.org/x/tools": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/internal/facts": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/assign": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/bools": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/composite": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/printf": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/shift": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/structtag": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/tests": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/analysis/unitchecker": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/ast/astutil": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/ast/inspector": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/cfg": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/types/objectpath": true,
|
||||
"cmd/vendor/golang.org/x/tools/go/types/typeutil": true,
|
||||
"cmd/vet": true,
|
||||
"cmd/vet/testdata/asm": true,
|
||||
"cmd/vet/testdata/assign": true,
|
||||
"cmd/vet/testdata/atomic": true,
|
||||
"cmd/vet/testdata/bool": true,
|
||||
"cmd/vet/testdata/buildtag": true,
|
||||
"cmd/vet/testdata/cgo": true,
|
||||
"cmd/vet/testdata/composite": true,
|
||||
"cmd/vet/testdata/copylock": true,
|
||||
"cmd/vet/testdata/deadcode": true,
|
||||
"cmd/vet/testdata/httpresponse": true,
|
||||
"cmd/vet/testdata/lostcancel": true,
|
||||
"cmd/vet/testdata/method": true,
|
||||
"cmd/vet/testdata/nilfunc": true,
|
||||
"cmd/vet/testdata/print": true,
|
||||
"cmd/vet/testdata/rangeloop": true,
|
||||
"cmd/vet/testdata/shift": true,
|
||||
"cmd/vet/testdata/structtag": true,
|
||||
"cmd/vet/testdata/tagtest": true,
|
||||
"cmd/vet/testdata/testingpkg": true,
|
||||
"cmd/vet/testdata/unmarshal": true,
|
||||
"cmd/vet/testdata/unsafeptr": true,
|
||||
"cmd/vet/testdata/unused": true,
|
||||
"compress/bzip2": true,
|
||||
"compress/bzip2/testdata": true,
|
||||
"compress/flate": true,
|
||||
"compress/flate/testdata": true,
|
||||
"compress/gzip": true,
|
||||
"compress/gzip/testdata": true,
|
||||
"compress/lzw": true,
|
||||
"compress/testdata": true,
|
||||
"compress/zlib": true,
|
||||
"container/heap": true,
|
||||
"container/list": true,
|
||||
"container/ring": true,
|
||||
"context": true,
|
||||
"crypto": true,
|
||||
"crypto/aes": true,
|
||||
"crypto/cipher": true,
|
||||
"crypto/des": true,
|
||||
"crypto/dsa": true,
|
||||
"crypto/ecdsa": true,
|
||||
"crypto/ecdsa/testdata": true,
|
||||
"crypto/ed25519": true,
|
||||
"crypto/ed25519/internal/edwards25519": true,
|
||||
"crypto/ed25519/testdata": true,
|
||||
"crypto/elliptic": true,
|
||||
"crypto/hmac": true,
|
||||
"crypto/internal/randutil": true,
|
||||
"crypto/internal/subtle": true,
|
||||
"crypto/md5": true,
|
||||
"crypto/rand": true,
|
||||
"crypto/rc4": true,
|
||||
"crypto/rsa": true,
|
||||
"crypto/rsa/testdata": true,
|
||||
"crypto/sha1": true,
|
||||
"crypto/sha256": true,
|
||||
"crypto/sha512": true,
|
||||
"crypto/subtle": true,
|
||||
"crypto/tls": true,
|
||||
"crypto/tls/testdata": true,
|
||||
"crypto/x509": true,
|
||||
"crypto/x509/pkix": true,
|
||||
"crypto/x509/testdata": true,
|
||||
"database/sql": true,
|
||||
"database/sql/driver": true,
|
||||
"debug/dwarf": true,
|
||||
"debug/dwarf/testdata": true,
|
||||
"debug/elf": true,
|
||||
"debug/elf/testdata": true,
|
||||
"debug/gosym": true,
|
||||
"debug/gosym/testdata": true,
|
||||
"debug/macho": true,
|
||||
"debug/macho/testdata": true,
|
||||
"debug/pe": true,
|
||||
"debug/pe/testdata": true,
|
||||
"debug/plan9obj": true,
|
||||
"debug/plan9obj/testdata": true,
|
||||
"encoding": true,
|
||||
"encoding/ascii85": true,
|
||||
"encoding/asn1": true,
|
||||
"encoding/base32": true,
|
||||
"encoding/base64": true,
|
||||
"encoding/binary": true,
|
||||
"encoding/csv": true,
|
||||
"encoding/gob": true,
|
||||
"encoding/hex": true,
|
||||
"encoding/json": true,
|
||||
"encoding/json/testdata": true,
|
||||
"encoding/pem": true,
|
||||
"encoding/xml": true,
|
||||
"errors": true,
|
||||
"expvar": true,
|
||||
"flag": true,
|
||||
"fmt": true,
|
||||
"go/ast": true,
|
||||
"go/build": true,
|
||||
"go/build/testdata/doc": true,
|
||||
"go/build/testdata/empty": true,
|
||||
"go/build/testdata/multi": true,
|
||||
"go/build/testdata/other": true,
|
||||
"go/build/testdata/other/file": true,
|
||||
"go/build/testdata/withvendor/src/a/b": true,
|
||||
"go/build/testdata/withvendor/src/a/vendor/c/d": true,
|
||||
"go/constant": true,
|
||||
"go/doc": true,
|
||||
"go/doc/testdata": true,
|
||||
"go/format": true,
|
||||
"go/importer": true,
|
||||
"go/internal/gccgoimporter": true,
|
||||
"go/internal/gccgoimporter/testdata": true,
|
||||
"go/internal/gcimporter": true,
|
||||
"go/internal/gcimporter/testdata": true,
|
||||
"go/internal/gcimporter/testdata/versions": true,
|
||||
"go/internal/srcimporter": true,
|
||||
"go/internal/srcimporter/testdata/issue20855": true,
|
||||
"go/internal/srcimporter/testdata/issue23092": true,
|
||||
"go/internal/srcimporter/testdata/issue24392": true,
|
||||
"go/parser": true,
|
||||
"go/parser/testdata": true,
|
||||
"go/printer": true,
|
||||
"go/printer/testdata": true,
|
||||
"go/scanner": true,
|
||||
"go/token": true,
|
||||
"go/types": true,
|
||||
"go/types/testdata": true,
|
||||
"hash": true,
|
||||
"hash/adler32": true,
|
||||
"hash/crc32": true,
|
||||
"hash/crc64": true,
|
||||
"hash/fnv": true,
|
||||
"html": true,
|
||||
"html/template": true,
|
||||
"image": true,
|
||||
"image/color": true,
|
||||
"image/color/palette": true,
|
||||
"image/draw": true,
|
||||
"image/gif": true,
|
||||
"image/internal/imageutil": true,
|
||||
"image/jpeg": true,
|
||||
"image/png": true,
|
||||
"image/png/testdata": true,
|
||||
"image/png/testdata/pngsuite": true,
|
||||
"image/testdata": true,
|
||||
"index/suffixarray": true,
|
||||
"internal/bytealg": true,
|
||||
"internal/cfg": true,
|
||||
"internal/cpu": true,
|
||||
"internal/fmtsort": true,
|
||||
"internal/goroot": true,
|
||||
"internal/goversion": true,
|
||||
"internal/lazyregexp": true,
|
||||
"internal/lazytemplate": true,
|
||||
"internal/nettrace": true,
|
||||
"internal/oserror": true,
|
||||
"internal/poll": true,
|
||||
"internal/race": true,
|
||||
"internal/reflectlite": true,
|
||||
"internal/singleflight": true,
|
||||
"internal/syscall/unix": true,
|
||||
"internal/syscall/windows": true,
|
||||
"internal/syscall/windows/registry": true,
|
||||
"internal/syscall/windows/sysdll": true,
|
||||
"internal/testenv": true,
|
||||
"internal/testlog": true,
|
||||
"internal/trace": true,
|
||||
"internal/trace/testdata": true,
|
||||
"internal/xcoff": true,
|
||||
"internal/xcoff/testdata": true,
|
||||
"io": true,
|
||||
"io/ioutil": true,
|
||||
"io/ioutil/testdata": true,
|
||||
"log": true,
|
||||
"log/syslog": true,
|
||||
"math": true,
|
||||
"math/big": true,
|
||||
"math/bits": true,
|
||||
"math/cmplx": true,
|
||||
"math/rand": true,
|
||||
"mime": true,
|
||||
"mime/multipart": true,
|
||||
"mime/multipart/testdata": true,
|
||||
"mime/quotedprintable": true,
|
||||
"mime/testdata": true,
|
||||
"net": true,
|
||||
"net/http": true,
|
||||
"net/http/cgi": true,
|
||||
"net/http/cgi/testdata": true,
|
||||
"net/http/cookiejar": true,
|
||||
"net/http/fcgi": true,
|
||||
"net/http/httptest": true,
|
||||
"net/http/httptrace": true,
|
||||
"net/http/httputil": true,
|
||||
"net/http/internal": true,
|
||||
"net/http/pprof": true,
|
||||
"net/http/testdata": true,
|
||||
"net/internal/socktest": true,
|
||||
"net/mail": true,
|
||||
"net/rpc": true,
|
||||
"net/rpc/jsonrpc": true,
|
||||
"net/smtp": true,
|
||||
"net/testdata": true,
|
||||
"net/textproto": true,
|
||||
"net/url": true,
|
||||
"os": true,
|
||||
"os/exec": true,
|
||||
"os/signal": true,
|
||||
"os/signal/internal/pty": true,
|
||||
"os/user": true,
|
||||
"path": true,
|
||||
"path/filepath": true,
|
||||
"plugin": true,
|
||||
"reflect": true,
|
||||
"regexp": true,
|
||||
"regexp/syntax": true,
|
||||
"regexp/testdata": true,
|
||||
"runtime": true,
|
||||
"runtime/cgo": true,
|
||||
"runtime/debug": true,
|
||||
"runtime/internal/atomic": true,
|
||||
"runtime/internal/math": true,
|
||||
"runtime/internal/sys": true,
|
||||
"runtime/msan": true,
|
||||
"runtime/pprof": true,
|
||||
"runtime/pprof/internal/profile": true,
|
||||
"runtime/pprof/testdata": true,
|
||||
"runtime/pprof/testdata/mappingtest": true,
|
||||
"runtime/race": true,
|
||||
"runtime/race/testdata": true,
|
||||
"runtime/testdata/testprog": true,
|
||||
"runtime/testdata/testprogcgo": true,
|
||||
"runtime/testdata/testprogcgo/windows": true,
|
||||
"runtime/testdata/testprognet": true,
|
||||
"runtime/trace": true,
|
||||
"sort": true,
|
||||
"strconv": true,
|
||||
"strconv/testdata": true,
|
||||
"strings": true,
|
||||
"sync": true,
|
||||
"sync/atomic": true,
|
||||
"syscall": true,
|
||||
"syscall/js": true,
|
||||
"testdata": true,
|
||||
"testing": true,
|
||||
"testing/internal/testdeps": true,
|
||||
"testing/iotest": true,
|
||||
"testing/quick": true,
|
||||
"text/scanner": true,
|
||||
"text/tabwriter": true,
|
||||
"text/template": true,
|
||||
"text/template/parse": true,
|
||||
"text/template/testdata": true,
|
||||
"time": true,
|
||||
"unicode": true,
|
||||
"unicode/utf16": true,
|
||||
"unicode/utf8": true,
|
||||
"unsafe": true,
|
||||
"vendor": true,
|
||||
"vendor/golang.org/x/crypto": true,
|
||||
"vendor/golang.org/x/crypto/chacha20poly1305": true,
|
||||
"vendor/golang.org/x/crypto/cryptobyte": true,
|
||||
"vendor/golang.org/x/crypto/cryptobyte/asn1": true,
|
||||
"vendor/golang.org/x/crypto/curve25519": true,
|
||||
"vendor/golang.org/x/crypto/hkdf": true,
|
||||
"vendor/golang.org/x/crypto/internal/chacha20": true,
|
||||
"vendor/golang.org/x/crypto/internal/subtle": true,
|
||||
"vendor/golang.org/x/crypto/poly1305": true,
|
||||
"vendor/golang.org/x/net": true,
|
||||
"vendor/golang.org/x/net/dns/dnsmessage": true,
|
||||
"vendor/golang.org/x/net/http/httpguts": true,
|
||||
"vendor/golang.org/x/net/http/httpproxy": true,
|
||||
"vendor/golang.org/x/net/http2/hpack": true,
|
||||
"vendor/golang.org/x/net/idna": true,
|
||||
"vendor/golang.org/x/net/lif": true,
|
||||
"vendor/golang.org/x/net/nettest": true,
|
||||
"vendor/golang.org/x/net/route": true,
|
||||
"vendor/golang.org/x/sys": true,
|
||||
"vendor/golang.org/x/sys/cpu": true,
|
||||
"vendor/golang.org/x/text": true,
|
||||
"vendor/golang.org/x/text/secure/bidirule": true,
|
||||
"vendor/golang.org/x/text/transform": true,
|
||||
"vendor/golang.org/x/text/unicode/bidi": true,
|
||||
"vendor/golang.org/x/text/unicode/norm": true,
|
||||
}
|
||||
|
|
@ -1,109 +0,0 @@
|
|||
/* Copyright 2019 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package golang
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// UpdateRepos generates go_repository rules corresponding to modules in
|
||||
// args.Imports. Each module argument may specify a version with an '@' suffix
|
||||
// (in the same format as 'go get'). If no version is specified, @latest
|
||||
// is requested.
|
||||
func (*goLang) UpdateRepos(args language.UpdateReposArgs) language.UpdateReposResult {
|
||||
gen := make([]*rule.Rule, len(args.Imports))
|
||||
var eg errgroup.Group
|
||||
for i := range args.Imports {
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
arg := args.Imports[i]
|
||||
modPath, query := arg, "latest"
|
||||
if i := strings.IndexByte(arg, '@'); i >= 0 {
|
||||
modPath, query = arg[:i], arg[i+1:]
|
||||
}
|
||||
name, version, sum, err := args.Cache.ModVersion(modPath, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gen[i] = rule.NewRule("go_repository", name)
|
||||
gen[i].SetAttr("importpath", modPath)
|
||||
gen[i].SetAttr("version", version)
|
||||
gen[i].SetAttr("sum", sum)
|
||||
setBuildAttrs(getGoConfig(args.Config), gen[i])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return language.UpdateReposResult{Error: err}
|
||||
}
|
||||
return language.UpdateReposResult{Gen: gen}
|
||||
}
|
||||
|
||||
var repoImportFuncs = map[string]func(args language.ImportReposArgs) language.ImportReposResult{
|
||||
"Gopkg.lock": importReposFromDep,
|
||||
"go.mod": importReposFromModules,
|
||||
"Godeps.json": importReposFromGodep,
|
||||
}
|
||||
|
||||
func (*goLang) CanImport(path string) bool {
|
||||
return repoImportFuncs[filepath.Base(path)] != nil
|
||||
}
|
||||
|
||||
func (*goLang) ImportRepos(args language.ImportReposArgs) language.ImportReposResult {
|
||||
res := repoImportFuncs[filepath.Base(args.Path)](args)
|
||||
for _, r := range res.Gen {
|
||||
setBuildAttrs(getGoConfig(args.Config), r)
|
||||
}
|
||||
if args.Prune {
|
||||
genNamesSet := make(map[string]bool)
|
||||
for _, r := range res.Gen {
|
||||
genNamesSet[r.Name()] = true
|
||||
}
|
||||
for _, r := range args.Config.Repos {
|
||||
if name := r.Name(); r.Kind() == "go_repository" && !genNamesSet[name] {
|
||||
res.Empty = append(res.Empty, rule.NewRule("go_repository", name))
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func setBuildAttrs(gc *goConfig, r *rule.Rule) {
|
||||
if gc.buildExternalAttr != "" {
|
||||
r.SetAttr("build_external", gc.buildExternalAttr)
|
||||
}
|
||||
if gc.buildFileNamesAttr != "" {
|
||||
r.SetAttr("build_file_name", gc.buildFileNamesAttr)
|
||||
}
|
||||
if gc.buildFileGenerationAttr != "" {
|
||||
r.SetAttr("build_file_generation", gc.buildFileGenerationAttr)
|
||||
}
|
||||
if gc.buildTagsAttr != "" {
|
||||
r.SetAttr("build_tags", gc.buildTagsAttr)
|
||||
}
|
||||
if gc.buildFileProtoModeAttr != "" {
|
||||
r.SetAttr("build_file_proto_mode", gc.buildFileProtoModeAttr)
|
||||
}
|
||||
if gc.buildExtraArgsAttr != "" {
|
||||
extraArgs := strings.Split(gc.buildExtraArgsAttr, ",")
|
||||
r.SetAttr("build_extra_args", extraArgs)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,151 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package language provides an interface for language extensions in Gazelle.
|
||||
// Support for a new language can be added by defining a package with a
|
||||
// function named "New" that returns a value assignable to this interface.
|
||||
//
|
||||
// TODO(jayconrod): document how to incorporate languages into a gazelle
|
||||
// binary that can be run by Bazel.
|
||||
package language
|
||||
|
||||
import (
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/resolve"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// Language describes an extension for Gazelle that provides support for
|
||||
// a set of Bazel rules.
|
||||
//
|
||||
// Languages are used primarily by the fix and update commands. The order
|
||||
// in which languages are used matters, since languages may depend on
|
||||
// one another. For example, go depends on proto, since go_proto_libraries
|
||||
// are generated from metadata stored in proto_libraries.
|
||||
//
|
||||
// A single instance of Language is created for each fix / update run. Some
|
||||
// state may be stored in this instance, but stateless behavior is encouraged,
|
||||
// especially since some operations may be concurrent in the future.
|
||||
//
|
||||
// Tasks languages are used for
|
||||
//
|
||||
// * Configuration (embedded interface config.Configurer). Languages may
|
||||
// define command line flags and alter the configuration in a directory
|
||||
// based on directives in build files.
|
||||
//
|
||||
// * Fixing deprecated usage of rules in build files.
|
||||
//
|
||||
// * Generating rules from source files in a directory.
|
||||
//
|
||||
// * Resolving library imports (embedded interface resolve.Resolver). For
|
||||
// example, import strings like "github.com/foo/bar" in Go can be resolved
|
||||
// into Bazel labels like "@com_github_foo_bar//:go_default_library".
|
||||
//
|
||||
// Tasks languages support
|
||||
//
|
||||
// * Generating load statements: languages list files and symbols that may
|
||||
// be loaded.
|
||||
//
|
||||
// * Merging generated rules into existing rules: languages provide metadata
|
||||
// that helps with rule matching, merging, and deletion.
|
||||
type Language interface {
|
||||
// TODO(jayconrod): is embedding Configurer strictly necessary?
|
||||
config.Configurer
|
||||
resolve.Resolver
|
||||
|
||||
// Kinds returns a map of maps rule names (kinds) and information on how to
|
||||
// match and merge attributes that may be found in rules of those kinds. All
|
||||
// kinds of rules generated for this language may be found here.
|
||||
Kinds() map[string]rule.KindInfo
|
||||
|
||||
// Loads returns .bzl files and symbols they define. Every rule generated by
|
||||
// GenerateRules, now or in the past, should be loadable from one of these
|
||||
// files.
|
||||
Loads() []rule.LoadInfo
|
||||
|
||||
// GenerateRules extracts build metadata from source files in a directory.
|
||||
// GenerateRules is called in each directory where an update is requested
|
||||
// in depth-first post-order.
|
||||
//
|
||||
// args contains the arguments for GenerateRules. This is passed as a
|
||||
// struct to avoid breaking implementations in the future when new
|
||||
// fields are added.
|
||||
//
|
||||
// A GenerateResult struct is returned. Optional fields may be added to this
|
||||
// type in the future.
|
||||
//
|
||||
// Any non-fatal errors this function encounters should be logged using
|
||||
// log.Print.
|
||||
GenerateRules(args GenerateArgs) GenerateResult
|
||||
|
||||
// Fix repairs deprecated usage of language-specific rules in f. This is
|
||||
// called before the file is indexed. Unless c.ShouldFix is true, fixes
|
||||
// that delete or rename rules should not be performed.
|
||||
Fix(c *config.Config, f *rule.File)
|
||||
}
|
||||
|
||||
// GenerateArgs contains arguments for language.GenerateRules. Arguments are
|
||||
// passed in a struct value so that new fields may be added in the future
|
||||
// without breaking existing implementations.
|
||||
type GenerateArgs struct {
|
||||
// Config is the configuration for the directory where rules are being
|
||||
// generated.
|
||||
Config *config.Config
|
||||
|
||||
// Dir is the canonical absolute path to the directory.
|
||||
Dir string
|
||||
|
||||
// Rel is the slash-separated path to the directory, relative to the
|
||||
// repository root ("" for the root directory itself). This may be used
|
||||
// as the package name in labels.
|
||||
Rel string
|
||||
|
||||
// File is the build file for the directory. File is nil if there is
|
||||
// no existing build file.
|
||||
File *rule.File
|
||||
|
||||
// Subdirs is a list of subdirectories in the directory, including
|
||||
// symbolic links to directories that Gazelle will follow.
|
||||
// RegularFiles is a list of regular files including other symbolic
|
||||
// links.
|
||||
// GeneratedFiles is a list of generated files in the directory
|
||||
// (usually these are mentioned as "out" or "outs" attributes in rules).
|
||||
Subdirs, RegularFiles, GenFiles []string
|
||||
|
||||
// OtherEmpty is a list of empty rules generated by other languages.
|
||||
// OtherGen is a list of generated rules generated by other languages.
|
||||
OtherEmpty, OtherGen []*rule.Rule
|
||||
}
|
||||
|
||||
// GenerateResult contains return values for language.GenerateRules.
|
||||
// Results are returned through a struct value so that new (optional)
|
||||
// fields may be added without breaking existing implementations.
|
||||
type GenerateResult struct {
|
||||
// Gen is a list of rules generated from files found in the directory
|
||||
// GenerateRules was asked to process. These will be merged with existing
|
||||
// rules or added to the build file.
|
||||
Gen []*rule.Rule
|
||||
|
||||
// Empty is a list of rules that cannot be built with the files found in the
|
||||
// directory GenerateRules was asked to process. These will be merged with
|
||||
// existing rules. If ther merged rules are empty, they will be deleted.
|
||||
Empty []*rule.Rule
|
||||
|
||||
// Imports contains information about the imported libraries for each
|
||||
// rule in Gen. Gen and Imports must have the same length, since they
|
||||
// correspond. These values are passed to Resolve after merge. The type
|
||||
// is opaque since different languages may use different representations.
|
||||
Imports []interface{}
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"constants.go",
|
||||
"fileinfo.go",
|
||||
"fix.go",
|
||||
"generate.go",
|
||||
"kinds.go",
|
||||
"known_imports.go",
|
||||
"lang.go",
|
||||
"package.go",
|
||||
"resolve.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/language/proto",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/language/proto",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/config:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/language:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/repo:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/resolve:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,284 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// ProtoConfig contains configuration values related to protos.
|
||||
//
|
||||
// This type is public because other languages need to generate rules based
|
||||
// on protos, so this configuration may be relevant to them.
|
||||
type ProtoConfig struct {
|
||||
// Mode determines how rules are generated for protos.
|
||||
Mode Mode
|
||||
|
||||
// ModeExplicit indicates whether the proto mode was set explicitly.
|
||||
ModeExplicit bool
|
||||
|
||||
// GoPrefix is the current Go prefix (the Go extension may set this in the
|
||||
// root directory only). Used to generate proto rule names in the root
|
||||
// directory when there are no proto files or the proto package name
|
||||
// can't be determined.
|
||||
// TODO(jayconrod): deprecate and remove Go-specific behavior.
|
||||
GoPrefix string
|
||||
|
||||
// groupOption is an option name that Gazelle will use to group .proto
|
||||
// files into proto_library rules. If unset, the proto package name is used.
|
||||
groupOption string
|
||||
|
||||
// stripImportPrefix The prefix to strip from the paths of the .proto files.
|
||||
// If set, Gazelle will apply this value to the strip_import_prefix attribute
|
||||
// within the proto_library_rule.
|
||||
stripImportPrefix string
|
||||
|
||||
// importPrefix The prefix to add to the paths of the .proto files.
|
||||
// If set, Gazelle will apply this value to the import_prefix attribute
|
||||
// within the proto_library_rule.
|
||||
importPrefix string
|
||||
}
|
||||
|
||||
// GetProtoConfig returns the proto language configuration. If the proto
|
||||
// extension was not run, it will return nil.
|
||||
func GetProtoConfig(c *config.Config) *ProtoConfig {
|
||||
pc := c.Exts[protoName]
|
||||
if pc == nil {
|
||||
return nil
|
||||
}
|
||||
return pc.(*ProtoConfig)
|
||||
}
|
||||
|
||||
// Mode determines how proto rules are generated.
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
// DefaultMode generates proto_library rules. Other languages should generate
|
||||
// library rules based on these (e.g., go_proto_library) and should ignore
|
||||
// checked-in generated files (e.g., .pb.go files) when there is a .proto
|
||||
// file with a similar name.
|
||||
DefaultMode Mode = iota
|
||||
|
||||
// DisableMode ignores .proto files and generates empty proto_library rules.
|
||||
// Checked-in generated files (e.g., .pb.go files) should be treated as
|
||||
// normal sources.
|
||||
DisableMode
|
||||
|
||||
// DisableGlobalMode is similar to DisableMode, but it also prevents
|
||||
// the use of special cases in dependency resolution for well known types
|
||||
// and Google APIs.
|
||||
DisableGlobalMode
|
||||
|
||||
// LegacyMode generates filegroups for .proto files if .pb.go files are
|
||||
// present in the same directory.
|
||||
LegacyMode
|
||||
|
||||
// PackageMode generates a proto_library for each set of .proto files with
|
||||
// the same package name in each directory.
|
||||
PackageMode
|
||||
)
|
||||
|
||||
func ModeFromString(s string) (Mode, error) {
|
||||
switch s {
|
||||
case "default":
|
||||
return DefaultMode, nil
|
||||
case "disable":
|
||||
return DisableMode, nil
|
||||
case "disable_global":
|
||||
return DisableGlobalMode, nil
|
||||
case "legacy":
|
||||
return LegacyMode, nil
|
||||
case "package":
|
||||
return PackageMode, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unrecognized proto mode: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Mode) String() string {
|
||||
switch m {
|
||||
case DefaultMode:
|
||||
return "default"
|
||||
case DisableMode:
|
||||
return "disable"
|
||||
case DisableGlobalMode:
|
||||
return "disable_global"
|
||||
case LegacyMode:
|
||||
return "legacy"
|
||||
case PackageMode:
|
||||
return "package"
|
||||
default:
|
||||
log.Panicf("unknown mode %d", m)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (m Mode) ShouldGenerateRules() bool {
|
||||
switch m {
|
||||
case DisableMode, DisableGlobalMode, LegacyMode:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (m Mode) ShouldIncludePregeneratedFiles() bool {
|
||||
switch m {
|
||||
case DisableMode, DisableGlobalMode, LegacyMode:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (m Mode) ShouldUseKnownImports() bool {
|
||||
return m != DisableGlobalMode
|
||||
}
|
||||
|
||||
type modeFlag struct {
|
||||
mode *Mode
|
||||
}
|
||||
|
||||
func (f *modeFlag) Set(value string) error {
|
||||
if mode, err := ModeFromString(value); err != nil {
|
||||
return err
|
||||
} else {
|
||||
*f.mode = mode
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *modeFlag) String() string {
|
||||
var mode Mode
|
||||
if f != nil && f.mode != nil {
|
||||
mode = *f.mode
|
||||
}
|
||||
return mode.String()
|
||||
}
|
||||
|
||||
func (_ *protoLang) RegisterFlags(fs *flag.FlagSet, cmd string, c *config.Config) {
|
||||
pc := &ProtoConfig{}
|
||||
c.Exts[protoName] = pc
|
||||
|
||||
// Note: the -proto flag does not set the ModeExplicit flag. We want to
|
||||
// be able to switch to DisableMode in vendor directories, even when
|
||||
// this is set for compatibility with older versions.
|
||||
fs.Var(&modeFlag{&pc.Mode}, "proto", "default: generates a proto_library rule for one package\n\tpackage: generates a proto_library rule for for each package\n\tdisable: does not touch proto rules\n\tdisable_global: does not touch proto rules and does not use special cases for protos in dependency resolution")
|
||||
fs.StringVar(&pc.groupOption, "proto_group", "", "option name used to group .proto files into proto_library rules")
|
||||
fs.StringVar(&pc.importPrefix, "proto_import_prefix", "", "When set, .proto source files in the srcs attribute of the rule are accessible at their path with this prefix appended on.")
|
||||
}
|
||||
|
||||
func (_ *protoLang) CheckFlags(fs *flag.FlagSet, c *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ *protoLang) KnownDirectives() []string {
|
||||
return []string{"proto", "proto_group", "proto_strip_import_prefix", "proto_import_prefix"}
|
||||
}
|
||||
|
||||
func (_ *protoLang) Configure(c *config.Config, rel string, f *rule.File) {
|
||||
pc := &ProtoConfig{}
|
||||
*pc = *GetProtoConfig(c)
|
||||
c.Exts[protoName] = pc
|
||||
if f != nil {
|
||||
for _, d := range f.Directives {
|
||||
switch d.Key {
|
||||
case "proto":
|
||||
mode, err := ModeFromString(d.Value)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
continue
|
||||
}
|
||||
pc.Mode = mode
|
||||
pc.ModeExplicit = true
|
||||
case "proto_group":
|
||||
pc.groupOption = d.Value
|
||||
case "proto_strip_import_prefix":
|
||||
pc.stripImportPrefix = d.Value
|
||||
if rel != "" {
|
||||
if err := checkStripImportPrefix(pc.stripImportPrefix, rel); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
case "proto_import_prefix":
|
||||
pc.importPrefix = d.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
inferProtoMode(c, rel, f)
|
||||
}
|
||||
|
||||
// inferProtoMode sets ProtoConfig.Mode based on the directory name and the
|
||||
// contents of f. If the proto mode is set explicitly, this function does not
|
||||
// change it. If this is a vendor directory, or go_proto_library is loaded from
|
||||
// another file, proto rule generation is disabled.
|
||||
//
|
||||
// TODO(jayconrod): this logic is archaic, now that rules are generated by
|
||||
// separate language extensions. Proto rule generation should be independent
|
||||
// from Go.
|
||||
func inferProtoMode(c *config.Config, rel string, f *rule.File) {
|
||||
pc := GetProtoConfig(c)
|
||||
if pc.Mode != DefaultMode || pc.ModeExplicit {
|
||||
return
|
||||
}
|
||||
if pc.GoPrefix == wellKnownTypesGoPrefix {
|
||||
pc.Mode = LegacyMode
|
||||
return
|
||||
}
|
||||
if path.Base(rel) == "vendor" {
|
||||
pc.Mode = DisableMode
|
||||
return
|
||||
}
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
mode := DefaultMode
|
||||
outer:
|
||||
for _, l := range f.Loads {
|
||||
name := l.Name()
|
||||
if name == "@io_bazel_rules_go//proto:def.bzl" {
|
||||
break
|
||||
}
|
||||
if name == "@io_bazel_rules_go//proto:go_proto_library.bzl" {
|
||||
mode = LegacyMode
|
||||
break
|
||||
}
|
||||
for _, sym := range l.Symbols() {
|
||||
if sym == "go_proto_library" {
|
||||
mode = DisableMode
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
if mode == DefaultMode || pc.Mode == mode || c.ShouldFix && mode == LegacyMode {
|
||||
return
|
||||
}
|
||||
pc.Mode = mode
|
||||
}
|
||||
|
||||
func checkStripImportPrefix(prefix, rel string) error {
|
||||
if !strings.HasPrefix(prefix, "/") || !strings.HasPrefix(rel, prefix[1:]) {
|
||||
return fmt.Errorf("invalid proto_strip_import_prefix %q at %s", prefix, rel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
const (
|
||||
// PackageInfoKey is the name of a private attribute set on generated
|
||||
// proto_library rules. This attribute contains a Package record which
|
||||
// describes the library and its sources.
|
||||
PackageKey = "_package"
|
||||
|
||||
// wellKnownTypesGoPrefix is the import path for the Go repository containing
|
||||
// pre-generated code for the Well Known Types.
|
||||
wellKnownTypesGoPrefix = "github.com/golang/protobuf"
|
||||
)
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileInfo contains metadata extracted from a .proto file.
|
||||
type FileInfo struct {
|
||||
Path, Name string
|
||||
|
||||
PackageName string
|
||||
|
||||
Options []Option
|
||||
Imports []string
|
||||
|
||||
HasServices bool
|
||||
}
|
||||
|
||||
// Option represents a top-level option statement in a .proto file. Only
|
||||
// string options are supported for now.
|
||||
type Option struct {
|
||||
Key, Value string
|
||||
}
|
||||
|
||||
var protoRe = buildProtoRegexp()
|
||||
|
||||
func protoFileInfo(dir, name string) FileInfo {
|
||||
info := FileInfo{
|
||||
Path: filepath.Join(dir, name),
|
||||
Name: name,
|
||||
}
|
||||
content, err := ioutil.ReadFile(info.Path)
|
||||
if err != nil {
|
||||
log.Printf("%s: error reading proto file: %v", info.Path, err)
|
||||
return info
|
||||
}
|
||||
|
||||
for _, match := range protoRe.FindAllSubmatch(content, -1) {
|
||||
switch {
|
||||
case match[importSubexpIndex] != nil:
|
||||
imp := unquoteProtoString(match[importSubexpIndex])
|
||||
info.Imports = append(info.Imports, imp)
|
||||
|
||||
case match[packageSubexpIndex] != nil:
|
||||
pkg := string(match[packageSubexpIndex])
|
||||
if info.PackageName == "" {
|
||||
info.PackageName = pkg
|
||||
}
|
||||
|
||||
case match[optkeySubexpIndex] != nil:
|
||||
key := string(match[optkeySubexpIndex])
|
||||
value := unquoteProtoString(match[optvalSubexpIndex])
|
||||
info.Options = append(info.Options, Option{key, value})
|
||||
|
||||
case match[serviceSubexpIndex] != nil:
|
||||
info.HasServices = true
|
||||
|
||||
default:
|
||||
// Comment matched. Nothing to extract.
|
||||
}
|
||||
}
|
||||
sort.Strings(info.Imports)
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
const (
|
||||
importSubexpIndex = 1
|
||||
packageSubexpIndex = 2
|
||||
optkeySubexpIndex = 3
|
||||
optvalSubexpIndex = 4
|
||||
serviceSubexpIndex = 5
|
||||
)
|
||||
|
||||
// Based on https://developers.google.com/protocol-buffers/docs/reference/proto3-spec
|
||||
func buildProtoRegexp() *regexp.Regexp {
|
||||
hexEscape := `\\[xX][0-9a-fA-f]{2}`
|
||||
octEscape := `\\[0-7]{3}`
|
||||
charEscape := `\\[abfnrtv'"\\]`
|
||||
charValue := strings.Join([]string{hexEscape, octEscape, charEscape, "[^\x00\\'\\\"\\\\]"}, "|")
|
||||
strLit := `'(?:` + charValue + `|")*'|"(?:` + charValue + `|')*"`
|
||||
ident := `[A-Za-z][A-Za-z0-9_]*`
|
||||
fullIdent := ident + `(?:\.` + ident + `)*`
|
||||
importStmt := `\bimport\s*(?:public|weak)?\s*(?P<import>` + strLit + `)\s*;`
|
||||
packageStmt := `\bpackage\s*(?P<package>` + fullIdent + `)\s*;`
|
||||
optionStmt := `\boption\s*(?P<optkey>` + fullIdent + `)\s*=\s*(?P<optval>` + strLit + `)\s*;`
|
||||
serviceStmt := `(?P<service>service)`
|
||||
comment := `//[^\n]*`
|
||||
protoReSrc := strings.Join([]string{importStmt, packageStmt, optionStmt, serviceStmt, comment}, "|")
|
||||
return regexp.MustCompile(protoReSrc)
|
||||
}
|
||||
|
||||
func unquoteProtoString(q []byte) string {
|
||||
// Adjust quotes so that Unquote is happy. We need a double quoted string
|
||||
// without unescaped double quote characters inside.
|
||||
noQuotes := bytes.Split(q[1:len(q)-1], []byte{'"'})
|
||||
if len(noQuotes) != 1 {
|
||||
for i := 0; i < len(noQuotes)-1; i++ {
|
||||
if len(noQuotes[i]) == 0 || noQuotes[i][len(noQuotes[i])-1] != '\\' {
|
||||
noQuotes[i] = append(noQuotes[i], '\\')
|
||||
}
|
||||
}
|
||||
q = append([]byte{'"'}, bytes.Join(noQuotes, []byte{'"'})...)
|
||||
q = append(q, '"')
|
||||
}
|
||||
if q[0] == '\'' {
|
||||
q[0] = '"'
|
||||
q[len(q)-1] = '"'
|
||||
}
|
||||
|
||||
s, err := strconv.Unquote(string(q))
|
||||
if err != nil {
|
||||
log.Panicf("unquoting string literal %s from proto: %v", q, err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func (_ *protoLang) Fix(c *config.Config, f *rule.File) {
|
||||
}
|
||||
|
|
@ -1,265 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/language"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func (_ *protoLang) GenerateRules(args language.GenerateArgs) language.GenerateResult {
|
||||
c := args.Config
|
||||
pc := GetProtoConfig(c)
|
||||
if !pc.Mode.ShouldGenerateRules() {
|
||||
// Don't create or delete proto rules in this mode. Any existing rules
|
||||
// are likely hand-written.
|
||||
return language.GenerateResult{}
|
||||
}
|
||||
|
||||
var regularProtoFiles []string
|
||||
for _, name := range args.RegularFiles {
|
||||
if strings.HasSuffix(name, ".proto") {
|
||||
regularProtoFiles = append(regularProtoFiles, name)
|
||||
}
|
||||
}
|
||||
var genProtoFiles []string
|
||||
for _, name := range args.GenFiles {
|
||||
if strings.HasSuffix(name, ".proto") {
|
||||
genProtoFiles = append(genProtoFiles, name)
|
||||
}
|
||||
}
|
||||
pkgs := buildPackages(pc, args.Dir, args.Rel, regularProtoFiles, genProtoFiles)
|
||||
shouldSetVisibility := args.File == nil || !args.File.HasDefaultVisibility()
|
||||
var res language.GenerateResult
|
||||
for _, pkg := range pkgs {
|
||||
r := generateProto(pc, args.Rel, pkg, shouldSetVisibility)
|
||||
if r.IsEmpty(protoKinds[r.Kind()]) {
|
||||
res.Empty = append(res.Empty, r)
|
||||
} else {
|
||||
res.Gen = append(res.Gen, r)
|
||||
}
|
||||
}
|
||||
sort.SliceStable(res.Gen, func(i, j int) bool {
|
||||
return res.Gen[i].Name() < res.Gen[j].Name()
|
||||
})
|
||||
res.Imports = make([]interface{}, len(res.Gen))
|
||||
for i, r := range res.Gen {
|
||||
res.Imports[i] = r.PrivateAttr(config.GazelleImportsKey)
|
||||
}
|
||||
res.Empty = append(res.Empty, generateEmpty(args.File, regularProtoFiles, genProtoFiles)...)
|
||||
return res
|
||||
}
|
||||
|
||||
// RuleName returns a name for a proto_library derived from the given strings.
|
||||
// For each string, RuleName will look for a non-empty suffix of identifier
|
||||
// characters and then append "_proto" to that.
|
||||
func RuleName(names ...string) string {
|
||||
base := "root"
|
||||
for _, name := range names {
|
||||
notIdent := func(c rune) bool {
|
||||
return !('A' <= c && c <= 'Z' ||
|
||||
'a' <= c && c <= 'z' ||
|
||||
'0' <= c && c <= '9' ||
|
||||
c == '_')
|
||||
}
|
||||
if i := strings.LastIndexFunc(name, notIdent); i >= 0 {
|
||||
name = name[i+1:]
|
||||
}
|
||||
if name != "" {
|
||||
base = name
|
||||
break
|
||||
}
|
||||
}
|
||||
return base + "_proto"
|
||||
}
|
||||
|
||||
// buildPackage extracts metadata from the .proto files in a directory and
|
||||
// constructs possibly several packages, then selects a package to generate
|
||||
// a proto_library rule for.
|
||||
func buildPackages(pc *ProtoConfig, dir, rel string, protoFiles, genFiles []string) []*Package {
|
||||
packageMap := make(map[string]*Package)
|
||||
for _, name := range protoFiles {
|
||||
info := protoFileInfo(dir, name)
|
||||
key := info.PackageName
|
||||
if pc.groupOption != "" {
|
||||
for _, opt := range info.Options {
|
||||
if opt.Key == pc.groupOption {
|
||||
key = opt.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if packageMap[key] == nil {
|
||||
packageMap[key] = newPackage(info.PackageName)
|
||||
}
|
||||
packageMap[key].addFile(info)
|
||||
}
|
||||
|
||||
switch pc.Mode {
|
||||
case DefaultMode:
|
||||
pkg, err := selectPackage(dir, rel, packageMap)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
if pkg == nil {
|
||||
return nil // empty rule created in generateEmpty
|
||||
}
|
||||
for _, name := range genFiles {
|
||||
pkg.addGenFile(dir, name)
|
||||
}
|
||||
return []*Package{pkg}
|
||||
|
||||
case PackageMode:
|
||||
pkgs := make([]*Package, 0, len(packageMap))
|
||||
for _, pkg := range packageMap {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
return pkgs
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// selectPackage chooses a package to generate rules for.
|
||||
func selectPackage(dir, rel string, packageMap map[string]*Package) (*Package, error) {
|
||||
if len(packageMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(packageMap) == 1 {
|
||||
for _, pkg := range packageMap {
|
||||
return pkg, nil
|
||||
}
|
||||
}
|
||||
defaultPackageName := strings.Replace(rel, "/", "_", -1)
|
||||
for _, pkg := range packageMap {
|
||||
if pkgName := goPackageName(pkg); pkgName != "" && pkgName == defaultPackageName {
|
||||
return pkg, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("%s: directory contains multiple proto packages. Gazelle can only generate a proto_library for one package.", dir)
|
||||
}
|
||||
|
||||
// goPackageName guesses the identifier in package declarations at the top of
|
||||
// the .pb.go files that will be generated for this package. "" is returned
|
||||
// if the package name cannot be determined.
|
||||
//
|
||||
// TODO(jayconrod): remove all Go-specific functionality. This is here
|
||||
// temporarily for compatibility.
|
||||
func goPackageName(pkg *Package) string {
|
||||
if opt, ok := pkg.Options["go_package"]; ok {
|
||||
if i := strings.IndexByte(opt, ';'); i >= 0 {
|
||||
return opt[i+1:]
|
||||
} else if i := strings.LastIndexByte(opt, '/'); i >= 0 {
|
||||
return opt[i+1:]
|
||||
} else {
|
||||
return opt
|
||||
}
|
||||
}
|
||||
if pkg.Name != "" {
|
||||
return strings.Replace(pkg.Name, ".", "_", -1)
|
||||
}
|
||||
if len(pkg.Files) == 1 {
|
||||
for s := range pkg.Files {
|
||||
return strings.TrimSuffix(s, ".proto")
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// generateProto creates a new proto_library rule for a package. The rule may
|
||||
// be empty if there are no sources.
|
||||
func generateProto(pc *ProtoConfig, rel string, pkg *Package, shouldSetVisibility bool) *rule.Rule {
|
||||
var name string
|
||||
if pc.Mode == DefaultMode {
|
||||
name = RuleName(goPackageName(pkg), pc.GoPrefix, rel)
|
||||
} else {
|
||||
name = RuleName(pkg.Options[pc.groupOption], pkg.Name, rel)
|
||||
}
|
||||
r := rule.NewRule("proto_library", name)
|
||||
srcs := make([]string, 0, len(pkg.Files))
|
||||
for f := range pkg.Files {
|
||||
srcs = append(srcs, f)
|
||||
}
|
||||
sort.Strings(srcs)
|
||||
if len(srcs) > 0 {
|
||||
r.SetAttr("srcs", srcs)
|
||||
}
|
||||
r.SetPrivateAttr(PackageKey, *pkg)
|
||||
imports := make([]string, 0, len(pkg.Imports))
|
||||
for i := range pkg.Imports {
|
||||
imports = append(imports, i)
|
||||
}
|
||||
sort.Strings(imports)
|
||||
// NOTE: This attribute should not be used outside this extension. It's still
|
||||
// convenient for testing though.
|
||||
r.SetPrivateAttr(config.GazelleImportsKey, imports)
|
||||
for k, v := range pkg.Options {
|
||||
r.SetPrivateAttr(k, v)
|
||||
}
|
||||
if shouldSetVisibility {
|
||||
vis := rule.CheckInternalVisibility(rel, "//visibility:public")
|
||||
r.SetAttr("visibility", []string{vis})
|
||||
}
|
||||
if pc.stripImportPrefix != "" {
|
||||
r.SetAttr("strip_import_prefix", pc.stripImportPrefix)
|
||||
}
|
||||
if pc.importPrefix != "" {
|
||||
r.SetAttr("import_prefix", pc.importPrefix)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// generateEmpty generates a list of proto_library rules that may be deleted.
|
||||
// This is generated from existing proto_library rules with srcs lists that
|
||||
// don't match any static or generated files.
|
||||
func generateEmpty(f *rule.File, regularFiles, genFiles []string) []*rule.Rule {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
knownFiles := make(map[string]bool)
|
||||
for _, f := range regularFiles {
|
||||
knownFiles[f] = true
|
||||
}
|
||||
for _, f := range genFiles {
|
||||
knownFiles[f] = true
|
||||
}
|
||||
var empty []*rule.Rule
|
||||
outer:
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() != "proto_library" {
|
||||
continue
|
||||
}
|
||||
srcs := r.AttrStrings("srcs")
|
||||
if len(srcs) == 0 && r.Attr("srcs") != nil {
|
||||
// srcs is not a string list; leave it alone
|
||||
continue
|
||||
}
|
||||
for _, src := range r.AttrStrings("srcs") {
|
||||
if knownFiles[src] {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
empty = append(empty, rule.NewRule("proto_library", r.Name()))
|
||||
}
|
||||
return empty
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import "github.com/bazelbuild/bazel-gazelle/rule"
|
||||
|
||||
var protoKinds = map[string]rule.KindInfo{
|
||||
"proto_library": {
|
||||
NonEmptyAttrs: map[string]bool{"srcs": true},
|
||||
MergeableAttrs: map[string]bool{
|
||||
"srcs": true,
|
||||
},
|
||||
ResolveAttrs: map[string]bool{"deps": true},
|
||||
},
|
||||
}
|
||||
|
||||
func (_ *protoLang) Kinds() map[string]rule.KindInfo { return protoKinds }
|
||||
func (_ *protoLang) Loads() []rule.LoadInfo { return nil }
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,72 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package proto provides support for protocol buffer rules.
|
||||
// It generates proto_library rules only (not go_proto_library or any other
|
||||
// language-specific implementations).
|
||||
//
|
||||
// Configuration
|
||||
//
|
||||
// Configuration is largely controlled by Mode. In disable mode, proto rules are
|
||||
// left alone (neither generated nor deleted). In legacy mode, filegroups are
|
||||
// emitted containing protos. In default mode, proto_library rules are
|
||||
// emitted. The proto mode may be set with the -proto command line flag or the
|
||||
// "# gazelle:proto" directive.
|
||||
//
|
||||
// The configuration is largely public, and other languages may depend on it.
|
||||
// For example, go uses Mode to determine whether to generate go_proto_library
|
||||
// rules and ignore static .pb.go files.
|
||||
//
|
||||
// Rule generation
|
||||
//
|
||||
// Currently, Gazelle generates at most one proto_library per directory. Protos
|
||||
// in the same package are grouped together into a proto_library. If there are
|
||||
// sources for multiple packages, the package name that matches the directory
|
||||
// name will be chosen; if there is no such package, an error will be printed.
|
||||
// We expect to provide support for multiple proto_libraries in the future
|
||||
// when Go has support for multiple packages and we have better rule matching.
|
||||
// The generated proto_library will be named after the directory, not the
|
||||
// proto or the package. For example, for foo/bar/baz.proto, a proto_library
|
||||
// rule will be generated named //foo/bar:bar_proto.
|
||||
//
|
||||
// Dependency resolution
|
||||
//
|
||||
// proto_library rules are indexed by their srcs attribute. Gazelle attempts
|
||||
// to resolve proto imports (e.g., import foo/bar/bar.proto) to the
|
||||
// proto_library that contains the named source file
|
||||
// (e.g., //foo/bar:bar_proto). If no indexed proto_library provides the source
|
||||
// file, Gazelle will guess a label, following conventions.
|
||||
//
|
||||
// No attempt is made to resolve protos to rules in external repositories,
|
||||
// since there's no indication that a proto import comes from an external
|
||||
// repository. In the future, build files in external repos will be indexed,
|
||||
// so we can support this (#12).
|
||||
//
|
||||
// Gazelle has special cases for Well Known Types (i.e., imports of the form
|
||||
// google/protobuf/*.proto). These are resolved to rules in
|
||||
// @com_google_protobuf.
|
||||
package proto
|
||||
|
||||
import "github.com/bazelbuild/bazel-gazelle/language"
|
||||
|
||||
const protoName = "proto"
|
||||
|
||||
type protoLang struct{}
|
||||
|
||||
func (_ *protoLang) Name() string { return protoName }
|
||||
|
||||
func NewLanguage() language.Language {
|
||||
return &protoLang{}
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Package contains metadata for a set of .proto files that have the
|
||||
// same package name. This translates to a proto_library rule.
|
||||
type Package struct {
|
||||
Name string
|
||||
Files map[string]FileInfo
|
||||
Imports map[string]bool
|
||||
Options map[string]string
|
||||
HasServices bool
|
||||
}
|
||||
|
||||
func newPackage(name string) *Package {
|
||||
return &Package{
|
||||
Name: name,
|
||||
Files: map[string]FileInfo{},
|
||||
Imports: map[string]bool{},
|
||||
Options: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Package) addFile(info FileInfo) {
|
||||
p.Files[info.Name] = info
|
||||
for _, imp := range info.Imports {
|
||||
p.Imports[imp] = true
|
||||
}
|
||||
for _, opt := range info.Options {
|
||||
p.Options[opt.Key] = opt.Value
|
||||
}
|
||||
p.HasServices = p.HasServices || info.HasServices
|
||||
}
|
||||
|
||||
func (p *Package) addGenFile(dir, name string) {
|
||||
p.Files[name] = FileInfo{
|
||||
Name: name,
|
||||
Path: filepath.Join(dir, filepath.FromSlash(name)),
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,136 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/resolve"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
func (_ *protoLang) Imports(c *config.Config, r *rule.Rule, f *rule.File) []resolve.ImportSpec {
|
||||
rel := f.Pkg
|
||||
srcs := r.AttrStrings("srcs")
|
||||
imports := make([]resolve.ImportSpec, len(srcs))
|
||||
pc := GetProtoConfig(c)
|
||||
prefix := rel
|
||||
if pc.stripImportPrefix != "" {
|
||||
prefix = strings.TrimPrefix(rel, pc.stripImportPrefix[1:])
|
||||
if rel == prefix {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if pc.importPrefix != "" {
|
||||
prefix = path.Join(pc.importPrefix, prefix)
|
||||
}
|
||||
for i, src := range srcs {
|
||||
imports[i] = resolve.ImportSpec{Lang: "proto", Imp: path.Join(prefix, src)}
|
||||
}
|
||||
return imports
|
||||
}
|
||||
|
||||
func (_ *protoLang) Embeds(r *rule.Rule, from label.Label) []label.Label {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ *protoLang) Resolve(c *config.Config, ix *resolve.RuleIndex, rc *repo.RemoteCache, r *rule.Rule, importsRaw interface{}, from label.Label) {
|
||||
if importsRaw == nil {
|
||||
// may not be set in tests.
|
||||
return
|
||||
}
|
||||
imports := importsRaw.([]string)
|
||||
r.DelAttr("deps")
|
||||
depSet := make(map[string]bool)
|
||||
for _, imp := range imports {
|
||||
l, err := resolveProto(c, ix, r, imp, from)
|
||||
if err == skipImportError {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Print(err)
|
||||
} else {
|
||||
l = l.Rel(from.Repo, from.Pkg)
|
||||
depSet[l.String()] = true
|
||||
}
|
||||
}
|
||||
if len(depSet) > 0 {
|
||||
deps := make([]string, 0, len(depSet))
|
||||
for dep := range depSet {
|
||||
deps = append(deps, dep)
|
||||
}
|
||||
sort.Strings(deps)
|
||||
r.SetAttr("deps", deps)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
skipImportError = errors.New("std import")
|
||||
notFoundError = errors.New("not found")
|
||||
)
|
||||
|
||||
func resolveProto(c *config.Config, ix *resolve.RuleIndex, r *rule.Rule, imp string, from label.Label) (label.Label, error) {
|
||||
pc := GetProtoConfig(c)
|
||||
if !strings.HasSuffix(imp, ".proto") {
|
||||
return label.NoLabel, fmt.Errorf("can't import non-proto: %q", imp)
|
||||
}
|
||||
|
||||
if l, ok := resolve.FindRuleWithOverride(c, resolve.ImportSpec{Imp: imp, Lang: "proto"}, "proto"); ok {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
if l, ok := knownImports[imp]; ok && pc.Mode.ShouldUseKnownImports() {
|
||||
if l.Equal(from) {
|
||||
return label.NoLabel, skipImportError
|
||||
} else {
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
|
||||
if l, err := resolveWithIndex(ix, imp, from); err == nil || err == skipImportError {
|
||||
return l, err
|
||||
} else if err != notFoundError {
|
||||
return label.NoLabel, err
|
||||
}
|
||||
|
||||
rel := path.Dir(imp)
|
||||
if rel == "." {
|
||||
rel = ""
|
||||
}
|
||||
name := RuleName(rel)
|
||||
return label.New("", rel, name), nil
|
||||
}
|
||||
|
||||
func resolveWithIndex(ix *resolve.RuleIndex, imp string, from label.Label) (label.Label, error) {
|
||||
matches := ix.FindRulesByImport(resolve.ImportSpec{Lang: "proto", Imp: imp}, "proto")
|
||||
if len(matches) == 0 {
|
||||
return label.NoLabel, notFoundError
|
||||
}
|
||||
if len(matches) > 1 {
|
||||
return label.NoLabel, fmt.Errorf("multiple rules (%s and %s) may be imported with %q from %s", matches[0].Label, matches[1].Label, imp, from)
|
||||
}
|
||||
if matches[0].IsSelfImport(from) {
|
||||
return label.NoLabel, skipImportError
|
||||
}
|
||||
return matches[0].Label, nil
|
||||
}
|
||||
|
|
@ -1,120 +0,0 @@
|
|||
/* Copyright 2019 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// RepoUpdater may be implemented by languages that support updating
|
||||
// repository rules that provide named libraries.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type RepoUpdater interface {
|
||||
UpdateRepos(args UpdateReposArgs) UpdateReposResult
|
||||
}
|
||||
|
||||
// UpdateReposArgs contains arguments for RepoUpdater.UpdateRepos.
|
||||
// Arguments are passed in a struct value so that new fields may be added
|
||||
// in the future without breaking existing implementations.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type UpdateReposArgs struct {
|
||||
// Config is the configuration for the main workspace.
|
||||
Config *config.Config
|
||||
|
||||
// Imports is a list of libraries to update. UpdateRepos should return
|
||||
// repository rules that provide these libraries. It may also return
|
||||
// repository rules providing transitive dependencies.
|
||||
Imports []string
|
||||
|
||||
// Cache stores information fetched from the network and ensures that
|
||||
// the same request isn't made multiple times.
|
||||
Cache *repo.RemoteCache
|
||||
}
|
||||
|
||||
// UpdateReposResult contains return values for RepoUpdater.UpdateRepos.
|
||||
// Results are returned through a struct so that new (optional) fields may be
|
||||
// added without breaking existing implementations.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type UpdateReposResult struct {
|
||||
// Gen is a list of repository rules that provide libraries named by
|
||||
// UpdateImportArgs.Imports. These will be merged with existing rules or
|
||||
// added to WORKSPACE. This list may be shorter or longer than the list
|
||||
// of imports, since a single repository may provide multiple imports,
|
||||
// and additional repositories may be needed for transitive dependencies.
|
||||
Gen []*rule.Rule
|
||||
|
||||
// Error is any fatal error that occurred. Non-fatal errors should be logged.
|
||||
Error error
|
||||
}
|
||||
|
||||
// RepoImporter may be implemented by languages that support importing
|
||||
// repository rules from another build system.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type RepoImporter interface {
|
||||
// CanImport returns whether a given configuration file may be imported
|
||||
// with this extension. Only one extension may import any given file.
|
||||
// ImportRepos will not be called unless this returns true.
|
||||
CanImport(path string) bool
|
||||
|
||||
// ImportRepos generates a list of repository rules by reading a
|
||||
// configuration file from another build system.
|
||||
ImportRepos(args ImportReposArgs) ImportReposResult
|
||||
}
|
||||
|
||||
// ImportReposArgs contains arguments for RepoImporter.ImportRepos.
|
||||
// Arguments are passed in a struct value so that new fields may be added
|
||||
// in the future without breaking existing implementations.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type ImportReposArgs struct {
|
||||
// Config is the configuration for the main workspace.
|
||||
Config *config.Config
|
||||
|
||||
// Path is the name of the configuration file to import.
|
||||
Path string
|
||||
|
||||
// Prune indicates whether repository rules that are no longer needed
|
||||
// should be deleted. This means the Empty list in the result should be
|
||||
// filled in.
|
||||
Prune bool
|
||||
|
||||
// Cache stores information fetched from the network and ensures that
|
||||
// the same request isn't made multiple times.
|
||||
Cache *repo.RemoteCache
|
||||
}
|
||||
|
||||
// ImportReposResult contains return values for RepoImporter.ImportRepos.
|
||||
// Results are returned through a struct so that new (optional) fields may
|
||||
// be added without breaking existing implementations.
|
||||
//
|
||||
// EXPERIMENTAL: this may change or be removed.
|
||||
type ImportReposResult struct {
|
||||
// Gen is a list of imported repository rules.
|
||||
Gen []*rule.Rule
|
||||
|
||||
// Empty is a list of repository rules that may be deleted. This should only
|
||||
// be set if ImportReposArgs.Prune is true.
|
||||
Empty []*rule.Rule
|
||||
|
||||
// Error is any fatal error that occurred. Non-fatal errors should be logged.
|
||||
Error error
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fix.go",
|
||||
"merger.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/merger",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/merger",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library"],
|
||||
)
|
||||
|
|
@ -1,179 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package merger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// FixLoads removes loads of unused go rules and adds loads of newly used rules.
|
||||
// This should be called after FixFile and MergeFile, since symbols
|
||||
// may be introduced that aren't loaded.
|
||||
//
|
||||
// This function calls File.Sync before processing loads.
|
||||
func FixLoads(f *rule.File, knownLoads []rule.LoadInfo) {
|
||||
knownFiles := make(map[string]bool)
|
||||
knownKinds := make(map[string]string)
|
||||
for _, l := range knownLoads {
|
||||
knownFiles[l.Name] = true
|
||||
for _, k := range l.Symbols {
|
||||
knownKinds[k] = l.Name
|
||||
}
|
||||
}
|
||||
|
||||
// Sync the file. We need File.Loads and File.Rules to contain inserted
|
||||
// statements and not deleted statements.
|
||||
f.Sync()
|
||||
|
||||
// Scan load statements in the file. Keep track of loads of known files,
|
||||
// since these may be changed. Keep track of symbols loaded from unknown
|
||||
// files; we will not add loads for these.
|
||||
var loads []*rule.Load
|
||||
otherLoadedKinds := make(map[string]bool)
|
||||
for _, l := range f.Loads {
|
||||
if knownFiles[l.Name()] {
|
||||
loads = append(loads, l)
|
||||
continue
|
||||
}
|
||||
for _, sym := range l.Symbols() {
|
||||
otherLoadedKinds[sym] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map of all the symbols from known files used in this file.
|
||||
usedKinds := make(map[string]map[string]bool)
|
||||
for _, r := range f.Rules {
|
||||
kind := r.Kind()
|
||||
if file, ok := knownKinds[kind]; ok && !otherLoadedKinds[kind] {
|
||||
if usedKinds[file] == nil {
|
||||
usedKinds[file] = make(map[string]bool)
|
||||
}
|
||||
usedKinds[file][kind] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Fix the load statements. The order is important, so we iterate over
|
||||
// knownLoads instead of knownFiles.
|
||||
for _, known := range knownLoads {
|
||||
file := known.Name
|
||||
first := true
|
||||
for _, l := range loads {
|
||||
if l.Name() != file {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
fixLoad(l, file, usedKinds[file], knownKinds)
|
||||
first = false
|
||||
} else {
|
||||
fixLoad(l, file, nil, knownKinds)
|
||||
}
|
||||
if l.IsEmpty() {
|
||||
l.Delete()
|
||||
}
|
||||
}
|
||||
if first {
|
||||
load := fixLoad(nil, file, usedKinds[file], knownKinds)
|
||||
if load != nil {
|
||||
index := newLoadIndex(f, known.After)
|
||||
load.Insert(f, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fixLoad updates a load statement with the given symbols. If load is nil,
|
||||
// a new load may be created and returned. Symbols in kinds will be added
|
||||
// to the load if they're not already present. Known symbols not in kinds
|
||||
// will be removed if present. Other symbols will be preserved. If load is
|
||||
// empty, nil is returned.
|
||||
func fixLoad(load *rule.Load, file string, kinds map[string]bool, knownKinds map[string]string) *rule.Load {
|
||||
if load == nil {
|
||||
if len(kinds) == 0 {
|
||||
return nil
|
||||
}
|
||||
load = rule.NewLoad(file)
|
||||
}
|
||||
|
||||
for k := range kinds {
|
||||
load.Add(k)
|
||||
}
|
||||
for _, k := range load.Symbols() {
|
||||
if knownKinds[k] != "" && !kinds[k] {
|
||||
load.Remove(k)
|
||||
}
|
||||
}
|
||||
return load
|
||||
}
|
||||
|
||||
// newLoadIndex returns the index in stmts where a new load statement should
|
||||
// be inserted. after is a list of function names that the load should not
|
||||
// be inserted before.
|
||||
func newLoadIndex(f *rule.File, after []string) int {
|
||||
if len(after) == 0 {
|
||||
return 0
|
||||
}
|
||||
index := 0
|
||||
for _, r := range f.Rules {
|
||||
for _, a := range after {
|
||||
if r.Kind() == a && r.Index() >= index {
|
||||
index = r.Index() + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
// CheckGazelleLoaded searches the given WORKSPACE file for a repository named
|
||||
// "bazel_gazelle". If no such repository is found *and* the repo is not
|
||||
// declared with a directive *and* at least one load statement mentions
|
||||
// the repository, a descriptive error will be returned.
|
||||
//
|
||||
// This should be called after modifications have been made to WORKSPACE
|
||||
// (i.e., after FixLoads) before writing it to disk.
|
||||
func CheckGazelleLoaded(f *rule.File) error {
|
||||
needGazelle := false
|
||||
for _, l := range f.Loads {
|
||||
if strings.HasPrefix(l.Name(), "@bazel_gazelle//") {
|
||||
needGazelle = true
|
||||
}
|
||||
}
|
||||
if !needGazelle {
|
||||
return nil
|
||||
}
|
||||
for _, r := range f.Rules {
|
||||
if r.Name() == "bazel_gazelle" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
for _, d := range f.Directives {
|
||||
if d.Key != "repo" {
|
||||
continue
|
||||
}
|
||||
if fs := strings.Fields(d.Value); len(fs) > 0 && fs[0] == "bazel_gazelle" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(`%s: error: bazel_gazelle is not declared in WORKSPACE.
|
||||
Without this repository, Gazelle cannot safely modify the WORKSPACE file.
|
||||
See the instructions at https://github.com/bazelbuild/bazel-gazelle.
|
||||
If the bazel_gazelle is declared inside a macro, you can suppress this error
|
||||
by adding a comment like this to WORKSPACE:
|
||||
# gazelle:repo bazel_gazelle
|
||||
`, f.Path)
|
||||
}
|
||||
|
|
@ -1,250 +0,0 @@
|
|||
/* Copyright 2016 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package merger provides functions for merging generated rules into
|
||||
// existing build files.
|
||||
//
|
||||
// Gazelle's normal workflow is roughly as follows:
|
||||
//
|
||||
// 1. Read metadata from sources.
|
||||
//
|
||||
// 2. Generate new rules.
|
||||
//
|
||||
// 3. Merge newly generated rules with rules in the existing build file
|
||||
// if there is one.
|
||||
//
|
||||
// 4. Build an index of merged library rules for dependency resolution.
|
||||
//
|
||||
// 5. Resolve dependencies (i.e., convert import strings to deps labels).
|
||||
//
|
||||
// 6. Merge the newly resolved dependencies.
|
||||
//
|
||||
// 7. Write the merged file back to disk.
|
||||
//
|
||||
// This package is used for sets 3 and 6 above.
|
||||
package merger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// Phase indicates which attributes should be merged in matching rules.
|
||||
type Phase int
|
||||
|
||||
const (
|
||||
// The pre-resolve merge is performed before rules are indexed for dependency
|
||||
// resolution. All attributes not related to dependencies are merged
|
||||
// (i.e., rule.KindInfo.MergeableAttrs). This merge must be performed
|
||||
// before indexing because attributes related to indexing (e.g.,
|
||||
// srcs, importpath) will be affected.
|
||||
PreResolve Phase = iota
|
||||
|
||||
// The post-resolve merge is performed after rules are indexed. All attributes
|
||||
// related to dependencies are merged (i.e., rule.KindInfo.ResolveAttrs).
|
||||
PostResolve
|
||||
)
|
||||
|
||||
// MergeFile combines information from newly generated rules with matching
|
||||
// rules in an existing build file. MergeFile can also delete rules which
|
||||
// are empty after merging.
|
||||
//
|
||||
// oldFile is the file to merge. It must not be nil.
|
||||
//
|
||||
// emptyRules is a list of stub rules (with no attributes other than name)
|
||||
// which were not generated. These are merged with matching rules. The merged
|
||||
// rules are deleted if they contain no attributes that make them buildable
|
||||
// (e.g., srcs, deps, anything in rule.KindInfo.NonEmptyAttrs).
|
||||
//
|
||||
// genRules is a list of newly generated rules. These are merged with
|
||||
// matching rules. A rule matches if it has the same kind and name or if
|
||||
// some other attribute in rule.KindInfo.MatchAttrs matches (e.g.,
|
||||
// "importpath" in go_library). Elements of genRules that don't match
|
||||
// any existing rule are appended to the end of oldFile.
|
||||
//
|
||||
// phase indicates whether this is a pre- or post-resolve merge. Different
|
||||
// attributes (rule.KindInfo.MergeableAttrs or ResolveAttrs) will be merged.
|
||||
//
|
||||
// kinds maps rule kinds (e.g., "go_library") to metadata that helps merge
|
||||
// rules of that kind.
|
||||
//
|
||||
// When a generated and existing rule are merged, each attribute is merged
|
||||
// separately. If an attribute is mergeable (according to KindInfo), values
|
||||
// from the existing attribute are replaced by values from the generated
|
||||
// attribute. Comments are preserved on values that are present in both
|
||||
// versions of the attribute. If at attribute is not mergeable, the generated
|
||||
// version of the attribute will be added if no existing attribute is present;
|
||||
// otherwise, the existing attribute will be preserved.
|
||||
//
|
||||
// Note that "# keep" comments affect merging. If a value within an existing
|
||||
// attribute is marked with a "# keep" comment, it will not be removed.
|
||||
// If an attribute is marked with a "# keep" comment, it will not be merged.
|
||||
// If a rule is marked with a "# keep" comment, the whole rule will not
|
||||
// be modified.
|
||||
func MergeFile(oldFile *rule.File, emptyRules, genRules []*rule.Rule, phase Phase, kinds map[string]rule.KindInfo) {
|
||||
getMergeAttrs := func(r *rule.Rule) map[string]bool {
|
||||
if phase == PreResolve {
|
||||
return kinds[r.Kind()].MergeableAttrs
|
||||
} else {
|
||||
return kinds[r.Kind()].ResolveAttrs
|
||||
}
|
||||
}
|
||||
|
||||
// Merge empty rules into the file and delete any rules which become empty.
|
||||
for _, emptyRule := range emptyRules {
|
||||
if oldRule, _ := Match(oldFile.Rules, emptyRule, kinds[emptyRule.Kind()]); oldRule != nil {
|
||||
if oldRule.ShouldKeep() {
|
||||
continue
|
||||
}
|
||||
rule.MergeRules(emptyRule, oldRule, getMergeAttrs(emptyRule), oldFile.Path)
|
||||
if oldRule.IsEmpty(kinds[oldRule.Kind()]) {
|
||||
oldRule.Delete()
|
||||
}
|
||||
}
|
||||
}
|
||||
oldFile.Sync()
|
||||
|
||||
// Match generated rules with existing rules in the file. Keep track of
|
||||
// rules with non-standard names.
|
||||
matchRules := make([]*rule.Rule, len(genRules))
|
||||
matchErrors := make([]error, len(genRules))
|
||||
substitutions := make(map[string]string)
|
||||
for i, genRule := range genRules {
|
||||
oldRule, err := Match(oldFile.Rules, genRule, kinds[genRule.Kind()])
|
||||
if err != nil {
|
||||
// TODO(jayconrod): add a verbose mode and log errors. They are too chatty
|
||||
// to print by default.
|
||||
matchErrors[i] = err
|
||||
continue
|
||||
}
|
||||
matchRules[i] = oldRule
|
||||
if oldRule != nil {
|
||||
if oldRule.Name() != genRule.Name() {
|
||||
substitutions[genRule.Name()] = oldRule.Name()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rename labels in generated rules that refer to other generated rules.
|
||||
if len(substitutions) > 0 {
|
||||
for _, genRule := range genRules {
|
||||
substituteRule(genRule, substitutions, kinds[genRule.Kind()])
|
||||
}
|
||||
}
|
||||
|
||||
// Merge generated rules with existing rules or append to the end of the file.
|
||||
for i, genRule := range genRules {
|
||||
if matchErrors[i] != nil {
|
||||
continue
|
||||
}
|
||||
if matchRules[i] == nil {
|
||||
genRule.Insert(oldFile)
|
||||
} else {
|
||||
rule.MergeRules(genRule, matchRules[i], getMergeAttrs(genRule), oldFile.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// substituteRule replaces local labels (those beginning with ":", referring to
|
||||
// targets in the same package) according to a substitution map. This is used
|
||||
// to update generated rules before merging when the corresponding existing
|
||||
// rules have different names. If substituteRule replaces a string, it returns
|
||||
// a new expression; it will not modify the original expression.
|
||||
func substituteRule(r *rule.Rule, substitutions map[string]string, info rule.KindInfo) {
|
||||
for attr := range info.SubstituteAttrs {
|
||||
if expr := r.Attr(attr); expr != nil {
|
||||
expr = rule.MapExprStrings(expr, func(s string) string {
|
||||
if rename, ok := substitutions[strings.TrimPrefix(s, ":")]; ok {
|
||||
return ":" + rename
|
||||
} else {
|
||||
return s
|
||||
}
|
||||
})
|
||||
r.SetAttr(attr, expr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Match searches for a rule that can be merged with x in rules.
|
||||
//
|
||||
// A rule is considered a match if its kind is equal to x's kind AND either its
|
||||
// name is equal OR at least one of the attributes in matchAttrs is equal.
|
||||
//
|
||||
// If there are no matches, nil and nil are returned.
|
||||
//
|
||||
// If a rule has the same name but a different kind, nill and an error
|
||||
// are returned.
|
||||
//
|
||||
// If there is exactly one match, the rule and nil are returned.
|
||||
//
|
||||
// If there are multiple matches, match will attempt to disambiguate, based on
|
||||
// the quality of the match (name match is best, then attribute match in the
|
||||
// order that attributes are listed). If disambiguation is successful,
|
||||
// the rule and nil are returned. Otherwise, nil and an error are returned.
|
||||
func Match(rules []*rule.Rule, x *rule.Rule, info rule.KindInfo) (*rule.Rule, error) {
|
||||
xname := x.Name()
|
||||
xkind := x.Kind()
|
||||
var nameMatches []*rule.Rule
|
||||
var kindMatches []*rule.Rule
|
||||
for _, y := range rules {
|
||||
if xname == y.Name() {
|
||||
nameMatches = append(nameMatches, y)
|
||||
}
|
||||
if xkind == y.Kind() {
|
||||
kindMatches = append(kindMatches, y)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nameMatches) == 1 {
|
||||
y := nameMatches[0]
|
||||
if xkind != y.Kind() {
|
||||
return nil, fmt.Errorf("could not merge %s(%s): a rule of the same name has kind %s", xkind, xname, y.Kind())
|
||||
}
|
||||
return y, nil
|
||||
}
|
||||
if len(nameMatches) > 1 {
|
||||
return nil, fmt.Errorf("could not merge %s(%s): multiple rules have the same name", xkind, xname)
|
||||
}
|
||||
|
||||
for _, key := range info.MatchAttrs {
|
||||
var attrMatches []*rule.Rule
|
||||
xvalue := x.AttrString(key)
|
||||
if xvalue == "" {
|
||||
continue
|
||||
}
|
||||
for _, y := range kindMatches {
|
||||
if xvalue == y.AttrString(key) {
|
||||
attrMatches = append(attrMatches, y)
|
||||
}
|
||||
}
|
||||
if len(attrMatches) == 1 {
|
||||
return attrMatches[0], nil
|
||||
} else if len(attrMatches) > 1 {
|
||||
return nil, fmt.Errorf("could not merge %s(%s): multiple rules have the same attribute %s = %q", xkind, xname, key, xvalue)
|
||||
}
|
||||
}
|
||||
|
||||
if info.MatchAny {
|
||||
if len(kindMatches) == 1 {
|
||||
return kindMatches[0], nil
|
||||
} else if len(kindMatches) > 1 {
|
||||
return nil, fmt.Errorf("could not merge %s(%s): multiple rules have the same kind but different names", xkind, xname)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["path.go"],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/pathtools",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/pathtools",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
@ -1,111 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package pathtools provides utilities for manipulating paths. Most paths
|
||||
// within Gazelle are slash-separated paths, relative to the repository root
|
||||
// directory. The repository root directory is represented by the empty
|
||||
// string. Paths in this format may be used directly as package names in labels.
|
||||
package pathtools
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HasPrefix returns whether the slash-separated path p has the given
|
||||
// prefix. Unlike strings.HasPrefix, this function respects component
|
||||
// boundaries, so "/home/foo" is not a prefix is "/home/foobar/baz". If the
|
||||
// prefix is empty, this function always returns true.
|
||||
func HasPrefix(p, prefix string) bool {
|
||||
return prefix == "" || p == prefix || strings.HasPrefix(p, prefix+"/")
|
||||
}
|
||||
|
||||
// TrimPrefix returns p without the provided prefix. If p doesn't start
|
||||
// with prefix, it returns p unchanged. Unlike strings.HasPrefix, this function
|
||||
// respects component boundaries (assuming slash-separated paths), so
|
||||
// TrimPrefix("foo/bar", "foo") returns "baz".
|
||||
func TrimPrefix(p, prefix string) string {
|
||||
if prefix == "" {
|
||||
return p
|
||||
}
|
||||
if prefix == p {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(p, prefix+"/")
|
||||
}
|
||||
|
||||
// RelBaseName returns the base name for rel, a slash-separated path relative
|
||||
// to the repository root. If rel is empty, RelBaseName returns the base name
|
||||
// of prefix. If prefix is empty, RelBaseName returns the base name of root,
|
||||
// the absolute file path of the repository root directory. If that's empty
|
||||
// to, then RelBaseName returns "root".
|
||||
func RelBaseName(rel, prefix, root string) string {
|
||||
base := path.Base(rel)
|
||||
if base == "." || base == "/" {
|
||||
base = path.Base(prefix)
|
||||
}
|
||||
if base == "." || base == "/" {
|
||||
base = filepath.Base(root)
|
||||
}
|
||||
if base == "." || base == "/" {
|
||||
base = "root"
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// Index returns the starting index of the string sub within the non-absolute
|
||||
// slash-separated path p. sub must start and end at component boundaries
|
||||
// within p.
|
||||
func Index(p, sub string) int {
|
||||
if sub == "" {
|
||||
return 0
|
||||
}
|
||||
p = path.Clean(p)
|
||||
sub = path.Clean(sub)
|
||||
if path.IsAbs(sub) {
|
||||
if HasPrefix(p, sub) {
|
||||
return 0
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
if p == "" || p == "/" {
|
||||
return -1
|
||||
}
|
||||
|
||||
i := 0 // i is the index of the first byte of a path element
|
||||
if len(p) > 0 && p[0] == '/' {
|
||||
i++
|
||||
}
|
||||
for {
|
||||
suffix := p[i:]
|
||||
if len(suffix) < len(sub) {
|
||||
return -1
|
||||
}
|
||||
if suffix[:len(sub)] == sub && (len(suffix) == len(sub) || suffix[len(sub)] == '/') {
|
||||
return i
|
||||
}
|
||||
j := strings.IndexByte(suffix, '/')
|
||||
if j < 0 {
|
||||
return -1
|
||||
}
|
||||
i += j + 1
|
||||
if i >= len(p) {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"remote.go",
|
||||
"repo.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/repo",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/repo",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/pathtools:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
"//vendor/golang.org/x/tools/go/vcs:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,583 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package repo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/pathtools"
|
||||
"golang.org/x/tools/go/vcs"
|
||||
)
|
||||
|
||||
// RemoteCache stores information about external repositories. The cache may
|
||||
// be initialized with information about known repositories, i.e., those listed
|
||||
// in the WORKSPACE file and mentioned on the command line. Other information
|
||||
// is retrieved over the network.
|
||||
//
|
||||
// Public methods of RemoteCache may be slow in cases where a network fetch
|
||||
// is needed. Public methods may be called concurrently.
|
||||
//
|
||||
// TODO(jayconrod): this is very Go-centric. It should be moved to language/go.
|
||||
// Unfortunately, doing so would break the resolve.Resolver interface.
|
||||
type RemoteCache struct {
|
||||
// RepoRootForImportPath is vcs.RepoRootForImportPath by default. It may
|
||||
// be overridden so that tests may avoid accessing the network.
|
||||
RepoRootForImportPath func(string, bool) (*vcs.RepoRoot, error)
|
||||
|
||||
// HeadCmd returns the latest commit on the default branch in the given
|
||||
// repository. This is used by Head. It may be stubbed out for tests.
|
||||
HeadCmd func(remote, vcs string) (string, error)
|
||||
|
||||
// ModInfo returns the module path and version that provides the package
|
||||
// with the given import path. This is used by Mod. It may be stubbed
|
||||
// out for tests.
|
||||
ModInfo func(importPath string) (modPath string, err error)
|
||||
|
||||
// ModVersionInfo returns the module path, true version, and sum for
|
||||
// the module that provides the package with the given import path.
|
||||
// This is used by ModVersion. It may be stubbed out for tests.
|
||||
ModVersionInfo func(modPath, query string) (version, sum string, err error)
|
||||
|
||||
root, remote, head, mod, modVersion remoteCacheMap
|
||||
|
||||
tmpOnce sync.Once
|
||||
tmpDir string
|
||||
tmpErr error
|
||||
}
|
||||
|
||||
// remoteCacheMap is a thread-safe, idempotent cache. It is used to store
|
||||
// information which should be fetched over the network no more than once.
|
||||
// This follows the Memo pattern described in The Go Programming Language,
|
||||
// section 9.7.
|
||||
type remoteCacheMap struct {
|
||||
mu sync.Mutex
|
||||
cache map[string]*remoteCacheEntry
|
||||
}
|
||||
|
||||
type remoteCacheEntry struct {
|
||||
value interface{}
|
||||
err error
|
||||
|
||||
// ready is nil for entries that were added when the cache was initialized.
|
||||
// It is non-nil for other entries. It is closed when an entry is ready,
|
||||
// i.e., the operation loading the entry completed.
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
type rootValue struct {
|
||||
root, name string
|
||||
}
|
||||
|
||||
type remoteValue struct {
|
||||
remote, vcs string
|
||||
}
|
||||
|
||||
type headValue struct {
|
||||
commit, tag string
|
||||
}
|
||||
|
||||
type modValue struct {
|
||||
path, name string
|
||||
known bool
|
||||
}
|
||||
|
||||
type modVersionValue struct {
|
||||
path, name, version, sum string
|
||||
}
|
||||
|
||||
// Repo describes details of a Go repository known in advance. It is used to
|
||||
// initialize RemoteCache so that some repositories don't need to be looked up.
|
||||
//
|
||||
// DEPRECATED: Go-specific details should be removed from RemoteCache, and
|
||||
// lookup logic should be moved to language/go. This means RemoteCache will
|
||||
// need to be initialized in a different way.
|
||||
type Repo struct {
|
||||
Name, GoPrefix, Remote, VCS string
|
||||
}
|
||||
|
||||
// NewRemoteCache creates a new RemoteCache with a set of known repositories.
|
||||
// The Root and Remote methods will return information about repositories listed
|
||||
// here without accessing the network. However, the Head method will still
|
||||
// access the network for these repositories to retrieve information about new
|
||||
// versions.
|
||||
//
|
||||
// A cleanup function is also returned. The caller must call this when
|
||||
// RemoteCache is no longer needed. RemoteCache may write files to a temporary
|
||||
// directory. This will delete them.
|
||||
func NewRemoteCache(knownRepos []Repo) (r *RemoteCache, cleanup func() error) {
|
||||
r = &RemoteCache{
|
||||
RepoRootForImportPath: vcs.RepoRootForImportPath,
|
||||
HeadCmd: defaultHeadCmd,
|
||||
root: remoteCacheMap{cache: make(map[string]*remoteCacheEntry)},
|
||||
remote: remoteCacheMap{cache: make(map[string]*remoteCacheEntry)},
|
||||
head: remoteCacheMap{cache: make(map[string]*remoteCacheEntry)},
|
||||
mod: remoteCacheMap{cache: make(map[string]*remoteCacheEntry)},
|
||||
modVersion: remoteCacheMap{cache: make(map[string]*remoteCacheEntry)},
|
||||
}
|
||||
r.ModInfo = func(importPath string) (string, error) {
|
||||
return defaultModInfo(r, importPath)
|
||||
}
|
||||
r.ModVersionInfo = func(modPath, query string) (string, string, error) {
|
||||
return defaultModVersionInfo(r, modPath, query)
|
||||
}
|
||||
for _, repo := range knownRepos {
|
||||
r.root.cache[repo.GoPrefix] = &remoteCacheEntry{
|
||||
value: rootValue{
|
||||
root: repo.GoPrefix,
|
||||
name: repo.Name,
|
||||
},
|
||||
}
|
||||
if repo.Remote != "" {
|
||||
r.remote.cache[repo.GoPrefix] = &remoteCacheEntry{
|
||||
value: remoteValue{
|
||||
remote: repo.Remote,
|
||||
vcs: repo.VCS,
|
||||
},
|
||||
}
|
||||
}
|
||||
r.mod.cache[repo.GoPrefix] = &remoteCacheEntry{
|
||||
value: modValue{
|
||||
path: repo.GoPrefix,
|
||||
name: repo.Name,
|
||||
known: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Augment knownRepos with additional prefixes for
|
||||
// minimal module compatibility. For example, if repo "com_example_foo_v2"
|
||||
// has prefix "example.com/foo/v2", map "example.com/foo" to the same
|
||||
// entry.
|
||||
// TODO(jayconrod): there should probably be some control over whether
|
||||
// callers can use these mappings: packages within modules should not be
|
||||
// allowed to use them. However, we'll return the same result nearly all
|
||||
// the time, and simpler is better.
|
||||
for _, repo := range knownRepos {
|
||||
path := pathWithoutSemver(repo.GoPrefix)
|
||||
if path == "" || r.root.cache[path] != nil {
|
||||
continue
|
||||
}
|
||||
r.root.cache[path] = r.root.cache[repo.GoPrefix]
|
||||
if e := r.remote.cache[repo.GoPrefix]; e != nil {
|
||||
r.remote.cache[path] = e
|
||||
}
|
||||
r.mod.cache[path] = r.mod.cache[repo.GoPrefix]
|
||||
}
|
||||
|
||||
return r, r.cleanup
|
||||
}
|
||||
|
||||
func (r *RemoteCache) cleanup() error {
|
||||
if r.tmpDir == "" {
|
||||
return nil
|
||||
}
|
||||
return os.RemoveAll(r.tmpDir)
|
||||
}
|
||||
|
||||
var gopkginPattern = regexp.MustCompile("^(gopkg.in/(?:[^/]+/)?[^/]+\\.v\\d+)(?:/|$)")
|
||||
|
||||
var knownPrefixes = []struct {
|
||||
prefix string
|
||||
missing int
|
||||
}{
|
||||
{prefix: "golang.org/x", missing: 1},
|
||||
{prefix: "google.golang.org", missing: 1},
|
||||
{prefix: "cloud.google.com", missing: 1},
|
||||
{prefix: "github.com", missing: 2},
|
||||
}
|
||||
|
||||
// Root returns the portion of an import path that corresponds to the root
|
||||
// directory of the repository containing the given import path. For example,
|
||||
// given "golang.org/x/tools/go/loader", this will return "golang.org/x/tools".
|
||||
// The workspace name of the repository is also returned. This may be a custom
|
||||
// name set in WORKSPACE, or it may be a generated name based on the root path.
|
||||
func (r *RemoteCache) Root(importPath string) (root, name string, err error) {
|
||||
// Try prefixes of the import path in the cache, but don't actually go out
|
||||
// to vcs yet. We do this before handling known special cases because
|
||||
// the cache is pre-populated with repository rules, and we want to use their
|
||||
// names if we can.
|
||||
prefix := importPath
|
||||
for {
|
||||
v, ok, err := r.root.get(prefix)
|
||||
if ok {
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(rootValue)
|
||||
return value.root, value.name, nil
|
||||
}
|
||||
|
||||
prefix = path.Dir(prefix)
|
||||
if prefix == "." || prefix == "/" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Try known prefixes.
|
||||
for _, p := range knownPrefixes {
|
||||
if pathtools.HasPrefix(importPath, p.prefix) {
|
||||
rest := pathtools.TrimPrefix(importPath, p.prefix)
|
||||
var components []string
|
||||
if rest != "" {
|
||||
components = strings.Split(rest, "/")
|
||||
}
|
||||
if len(components) < p.missing {
|
||||
return "", "", fmt.Errorf("import path %q is shorter than the known prefix %q", importPath, p.prefix)
|
||||
}
|
||||
root = p.prefix
|
||||
for _, c := range components[:p.missing] {
|
||||
root = path.Join(root, c)
|
||||
}
|
||||
name = label.ImportPathToBazelRepoName(root)
|
||||
return root, name, nil
|
||||
}
|
||||
}
|
||||
|
||||
// gopkg.in is special, and might have either one or two levels of
|
||||
// missing paths. See http://labix.org/gopkg.in for URL patterns.
|
||||
if match := gopkginPattern.FindStringSubmatch(importPath); len(match) > 0 {
|
||||
root = match[1]
|
||||
name = label.ImportPathToBazelRepoName(root)
|
||||
return root, name, nil
|
||||
}
|
||||
|
||||
// Find the prefix using vcs and cache the result.
|
||||
v, err := r.root.ensure(importPath, func() (interface{}, error) {
|
||||
res, err := r.RepoRootForImportPath(importPath, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rootValue{res.Root, label.ImportPathToBazelRepoName(res.Root)}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(rootValue)
|
||||
return value.root, value.name, nil
|
||||
}
|
||||
|
||||
// Remote returns the VCS name and the remote URL for a repository with the
|
||||
// given root import path. This is suitable for creating new repository rules.
|
||||
func (r *RemoteCache) Remote(root string) (remote, vcs string, err error) {
|
||||
v, err := r.remote.ensure(root, func() (interface{}, error) {
|
||||
repo, err := r.RepoRootForImportPath(root, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteValue{remote: repo.Repo, vcs: repo.VCS.Cmd}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(remoteValue)
|
||||
return value.remote, value.vcs, nil
|
||||
}
|
||||
|
||||
// Head returns the most recent commit id on the default branch and latest
|
||||
// version tag for the given remote repository. The tag "" is returned if
|
||||
// no latest version was found.
|
||||
//
|
||||
// TODO(jayconrod): support VCS other than git.
|
||||
// TODO(jayconrod): support version tags. "" is always returned.
|
||||
func (r *RemoteCache) Head(remote, vcs string) (commit, tag string, err error) {
|
||||
if vcs != "git" {
|
||||
return "", "", fmt.Errorf("could not locate recent commit in repo %q with unknown version control scheme %q", remote, vcs)
|
||||
}
|
||||
|
||||
v, err := r.head.ensure(remote, func() (interface{}, error) {
|
||||
commit, err := r.HeadCmd(remote, vcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return headValue{commit: commit}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(headValue)
|
||||
return value.commit, value.tag, nil
|
||||
}
|
||||
|
||||
func defaultHeadCmd(remote, vcs string) (string, error) {
|
||||
switch vcs {
|
||||
case "local":
|
||||
return "", nil
|
||||
|
||||
case "git":
|
||||
// Old versions of git ls-remote exit with code 129 when "--" is passed.
|
||||
// We'll try to validate the argument here instead.
|
||||
if strings.HasPrefix(remote, "-") {
|
||||
return "", fmt.Errorf("remote must not start with '-': %q", remote)
|
||||
}
|
||||
cmd := exec.Command("git", "ls-remote", remote, "HEAD")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var stdErr []byte
|
||||
if e, ok := err.(*exec.ExitError); ok {
|
||||
stdErr = e.Stderr
|
||||
}
|
||||
return "", fmt.Errorf("git ls-remote for %s : %v : %s", remote, err, stdErr)
|
||||
}
|
||||
ix := bytes.IndexByte(out, '\t')
|
||||
if ix < 0 {
|
||||
return "", fmt.Errorf("could not parse output for git ls-remote for %q", remote)
|
||||
}
|
||||
return string(out[:ix]), nil
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unknown version control system: %s", vcs)
|
||||
}
|
||||
}
|
||||
|
||||
// Mod returns the module path for the module that contains the package
|
||||
// named by importPath. The name of the go_repository rule for the module
|
||||
// is also returned. For example, calling Mod on "github.com/foo/bar/v2/baz"
|
||||
// would give the module path "github.com/foo/bar/v2" and the name
|
||||
// "com_github_foo_bar_v2".
|
||||
//
|
||||
// If a known repository *could* provide importPath (because its "importpath"
|
||||
// is a prefix of importPath), Mod will assume that it does. This may give
|
||||
// inaccurate results if importPath is in an undeclared nested module. Run
|
||||
// "gazelle update-repos -from_file=go.mod" first for best results.
|
||||
//
|
||||
// If no known repository could provide importPath, Mod will run "go list" to
|
||||
// find the module. The special patterns that Root uses are ignored. Results are
|
||||
// cached. Use GOPROXY for faster results.
|
||||
func (r *RemoteCache) Mod(importPath string) (modPath, name string, err error) {
|
||||
// Check if any of the known repositories is a prefix.
|
||||
prefix := importPath
|
||||
for {
|
||||
v, ok, err := r.mod.get(prefix)
|
||||
if ok {
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(modValue)
|
||||
if value.known {
|
||||
return value.path, value.name, nil
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
prefix = path.Dir(prefix)
|
||||
if prefix == "." || prefix == "/" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Ask "go list".
|
||||
v, err := r.mod.ensure(importPath, func() (interface{}, error) {
|
||||
modPath, err := r.ModInfo(importPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return modValue{
|
||||
path: modPath,
|
||||
name: label.ImportPathToBazelRepoName(modPath),
|
||||
}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value := v.(modValue)
|
||||
return value.path, value.name, nil
|
||||
}
|
||||
|
||||
func defaultModInfo(rc *RemoteCache, importPath string) (modPath string, err error) {
|
||||
rc.initTmp()
|
||||
if rc.tmpErr != nil {
|
||||
return "", rc.tmpErr
|
||||
}
|
||||
|
||||
goTool := findGoTool()
|
||||
cmd := exec.Command(goTool, "list", "-find", "-f", "{{.Module.Path}}", "--", importPath)
|
||||
cmd.Dir = rc.tmpDir
|
||||
cmd.Env = append(os.Environ(), "GO111MODULE=on")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var stdErr []byte
|
||||
if e, ok := err.(*exec.ExitError); ok {
|
||||
stdErr = e.Stderr
|
||||
}
|
||||
return "", fmt.Errorf("finding module path for import %s: %v: %s", importPath, err, stdErr)
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
||||
// ModVersion looks up information about a module at a given version.
|
||||
// The path must be the module path, not a package within the module.
|
||||
// The version may be a canonical semantic version, a query like "latest",
|
||||
// or a branch, tag, or revision name. ModVersion returns the name of
|
||||
// the repository rule providing the module (if any), the true version,
|
||||
// and the sum.
|
||||
func (r *RemoteCache) ModVersion(modPath, query string) (name, version, sum string, err error) {
|
||||
// Ask "go list".
|
||||
arg := modPath + "@" + query
|
||||
v, err := r.modVersion.ensure(arg, func() (interface{}, error) {
|
||||
version, sum, err := r.ModVersionInfo(modPath, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return modVersionValue{
|
||||
path: modPath,
|
||||
version: version,
|
||||
sum: sum,
|
||||
}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
value := v.(modVersionValue)
|
||||
|
||||
// Try to find the repository name for the module, if there's already
|
||||
// a repository rule that provides it.
|
||||
v, ok, err := r.mod.get(modPath)
|
||||
if ok && err == nil {
|
||||
name = v.(modValue).name
|
||||
} else {
|
||||
name = label.ImportPathToBazelRepoName(modPath)
|
||||
}
|
||||
|
||||
return name, value.version, value.sum, nil
|
||||
}
|
||||
|
||||
func defaultModVersionInfo(rc *RemoteCache, modPath, query string) (version, sum string, err error) {
|
||||
rc.initTmp()
|
||||
if rc.tmpErr != nil {
|
||||
return "", "", rc.tmpErr
|
||||
}
|
||||
|
||||
goTool := findGoTool()
|
||||
cmd := exec.Command(goTool, "mod", "download", "-json", "--", modPath+"@"+query)
|
||||
cmd.Dir = rc.tmpDir
|
||||
cmd.Env = append(os.Environ(), "GO111MODULE=on")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var stdErr []byte
|
||||
if e, ok := err.(*exec.ExitError); ok {
|
||||
stdErr = e.Stderr
|
||||
}
|
||||
return "", "", fmt.Errorf("finding module version and sum for %s@%s: %v: %s", modPath, query, err, stdErr)
|
||||
}
|
||||
|
||||
var result struct{ Version, Sum string }
|
||||
if err := json.Unmarshal(out, &result); err != nil {
|
||||
fmt.Println(out)
|
||||
return "", "", fmt.Errorf("finding module version and sum for %s@%s: invalid output from 'go mod download': %v", modPath, query, err)
|
||||
}
|
||||
return result.Version, result.Sum, nil
|
||||
}
|
||||
|
||||
// get retrieves a value associated with the given key from the cache. ok will
|
||||
// be true if the key exists in the cache, even if it's in the process of
|
||||
// being fetched.
|
||||
func (m *remoteCacheMap) get(key string) (value interface{}, ok bool, err error) {
|
||||
m.mu.Lock()
|
||||
e, ok := m.cache[key]
|
||||
m.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, ok, nil
|
||||
}
|
||||
if e.ready != nil {
|
||||
<-e.ready
|
||||
}
|
||||
return e.value, ok, e.err
|
||||
}
|
||||
|
||||
// ensure retreives a value associated with the given key from the cache. If
|
||||
// the key does not exist in the cache, the load function will be called,
|
||||
// and its result will be associated with the key. The load function will not
|
||||
// be called more than once for any key.
|
||||
func (m *remoteCacheMap) ensure(key string, load func() (interface{}, error)) (interface{}, error) {
|
||||
m.mu.Lock()
|
||||
e, ok := m.cache[key]
|
||||
if !ok {
|
||||
e = &remoteCacheEntry{ready: make(chan struct{})}
|
||||
m.cache[key] = e
|
||||
m.mu.Unlock()
|
||||
e.value, e.err = load()
|
||||
close(e.ready)
|
||||
} else {
|
||||
m.mu.Unlock()
|
||||
if e.ready != nil {
|
||||
<-e.ready
|
||||
}
|
||||
}
|
||||
return e.value, e.err
|
||||
}
|
||||
|
||||
func (rc *RemoteCache) initTmp() {
|
||||
rc.tmpOnce.Do(func() {
|
||||
rc.tmpDir, rc.tmpErr = ioutil.TempDir("", "gazelle-remotecache-")
|
||||
if rc.tmpErr != nil {
|
||||
return
|
||||
}
|
||||
rc.tmpErr = ioutil.WriteFile(filepath.Join(rc.tmpDir, "go.mod"), []byte(`module gazelle_remote_cache__\n`), 0666)
|
||||
})
|
||||
}
|
||||
|
||||
var semverRex = regexp.MustCompile(`^.*?(/v\d+)(?:/.*)?$`)
|
||||
|
||||
// pathWithoutSemver removes a semantic version suffix from path.
|
||||
// For example, if path is "example.com/foo/v2/bar", pathWithoutSemver
|
||||
// will return "example.com/foo/bar". If there is no semantic version suffix,
|
||||
// "" will be returned.
|
||||
// TODO(jayconrod): copied from language/go. This whole type should be
|
||||
// migrated there.
|
||||
func pathWithoutSemver(path string) string {
|
||||
m := semverRex.FindStringSubmatchIndex(path)
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
v := path[m[2]+2 : m[3]]
|
||||
if v == "0" || v == "1" {
|
||||
return ""
|
||||
}
|
||||
return path[:m[2]] + path[m[3]:]
|
||||
}
|
||||
|
||||
// findGoTool attempts to locate the go executable. If GOROOT is set, we'll
|
||||
// prefer the one in there; otherwise, we'll rely on PATH. If the wrapper
|
||||
// script generated by the gazelle rule is invoked by Bazel, it will set
|
||||
// GOROOT to the configured SDK. We don't want to rely on the host SDK in
|
||||
// that situation.
|
||||
//
|
||||
// TODO(jayconrod): copied from language/go (though it was originally in this
|
||||
// package). Go-specific details should be removed from RemoteCache, and
|
||||
// this copy should be deleted.
|
||||
func findGoTool() string {
|
||||
path := "go" // rely on PATH by default
|
||||
if goroot, ok := os.LookupEnv("GOROOT"); ok {
|
||||
path = filepath.Join(goroot, "bin", "go")
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
path += ".exe"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
|
@ -1,166 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package repo provides functionality for managing Go repository rules.
|
||||
//
|
||||
// UNSTABLE: The exported APIs in this package may change. In the future,
|
||||
// language extensions should implement an interface for repository
|
||||
// rule management. The update-repos command will call interface methods,
|
||||
// and most if this package's functionality will move to language/go.
|
||||
// Moving this package to an internal directory would break existing
|
||||
// extensions, since RemoteCache is referenced through the resolve.Resolver
|
||||
// interface, which extensions are required to implement.
|
||||
package repo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
type byRuleName []*rule.Rule
|
||||
|
||||
func (s byRuleName) Len() int { return len(s) }
|
||||
func (s byRuleName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||||
func (s byRuleName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// FindExternalRepo attempts to locate the directory where Bazel has fetched
|
||||
// the external repository with the given name. An error is returned if the
|
||||
// repository directory cannot be located.
|
||||
func FindExternalRepo(repoRoot, name string) (string, error) {
|
||||
// See https://docs.bazel.build/versions/master/output_directories.html
|
||||
// for documentation on Bazel directory layout.
|
||||
// We expect the bazel-out symlink in the workspace root directory to point to
|
||||
// <output-base>/execroot/<workspace-name>/bazel-out
|
||||
// We expect the external repository to be checked out at
|
||||
// <output-base>/external/<name>
|
||||
// Note that users can change the prefix for most of the Bazel symlinks with
|
||||
// --symlink_prefix, but this does not include bazel-out.
|
||||
externalPath := strings.Join([]string{repoRoot, "bazel-out", "..", "..", "..", "external", name}, string(os.PathSeparator))
|
||||
cleanPath, err := filepath.EvalSymlinks(externalPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
st, err := os.Stat(cleanPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", externalPath)
|
||||
}
|
||||
return cleanPath, nil
|
||||
}
|
||||
|
||||
// ListRepositories extracts metadata about repositories declared in a
|
||||
// file.
|
||||
func ListRepositories(workspace *rule.File) (repos []*rule.Rule, repoFileMap map[string]*rule.File, err error) {
|
||||
repoIndexMap := make(map[string]int)
|
||||
repoFileMap = make(map[string]*rule.File)
|
||||
for _, repo := range workspace.Rules {
|
||||
if name := repo.Name(); name != "" {
|
||||
repos = append(repos, repo)
|
||||
repoFileMap[name] = workspace
|
||||
repoIndexMap[name] = len(repos) - 1
|
||||
}
|
||||
}
|
||||
extraRepos, err := parseRepositoryDirectives(workspace.Directives)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, repo := range extraRepos {
|
||||
if i, ok := repoIndexMap[repo.Name()]; ok {
|
||||
repos[i] = repo
|
||||
} else {
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
repoFileMap[repo.Name()] = workspace
|
||||
}
|
||||
|
||||
for _, d := range workspace.Directives {
|
||||
switch d.Key {
|
||||
case "repository_macro":
|
||||
f, defName, err := parseRepositoryMacroDirective(d.Value)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f = filepath.Join(filepath.Dir(workspace.Path), filepath.Clean(f))
|
||||
macroFile, err := rule.LoadMacroFile(f, "", defName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, repo := range macroFile.Rules {
|
||||
if name := repo.Name(); name != "" {
|
||||
repos = append(repos, repo)
|
||||
repoFileMap[name] = macroFile
|
||||
repoIndexMap[name] = len(repos) - 1
|
||||
}
|
||||
}
|
||||
extraRepos, err = parseRepositoryDirectives(macroFile.Directives)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, repo := range extraRepos {
|
||||
if i, ok := repoIndexMap[repo.Name()]; ok {
|
||||
repos[i] = repo
|
||||
} else {
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
repoFileMap[repo.Name()] = macroFile
|
||||
}
|
||||
}
|
||||
}
|
||||
return repos, repoFileMap, nil
|
||||
}
|
||||
|
||||
func parseRepositoryDirectives(directives []rule.Directive) (repos []*rule.Rule, err error) {
|
||||
for _, d := range directives {
|
||||
switch d.Key {
|
||||
case "repository":
|
||||
vals := strings.Fields(d.Value)
|
||||
if len(vals) < 2 {
|
||||
return nil, fmt.Errorf("failure parsing repository: %s, expected repository kind and attributes", d.Value)
|
||||
}
|
||||
kind := vals[0]
|
||||
r := rule.NewRule(kind, "")
|
||||
for _, val := range vals[1:] {
|
||||
kv := strings.SplitN(val, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("failure parsing repository: %s, expected format for attributes is attr1_name=attr1_value", d.Value)
|
||||
}
|
||||
r.SetAttr(kv[0], kv[1])
|
||||
}
|
||||
if r.Name() == "" {
|
||||
return nil, fmt.Errorf("failure parsing repository: %s, expected a name attribute for the given repository", d.Value)
|
||||
}
|
||||
repos = append(repos, r)
|
||||
}
|
||||
}
|
||||
return repos, nil
|
||||
}
|
||||
|
||||
func parseRepositoryMacroDirective(directive string) (string, string, error) {
|
||||
vals := strings.Split(directive, "%")
|
||||
if len(vals) != 2 {
|
||||
return "", "", fmt.Errorf("Failure parsing repository_macro: %s, expected format is macroFile%%defName", directive)
|
||||
}
|
||||
f := vals[0]
|
||||
if strings.HasPrefix(f, "..") {
|
||||
return "", "", fmt.Errorf("Failure parsing repository_macro: %s, macro file path %s should not start with \"..\"", directive, f)
|
||||
}
|
||||
return f, vals[1], nil
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"index.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/resolve",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/resolve",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/config:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/repo:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/rule:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resolve
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// FindRuleWithOverride searches the current configuration for user-specified
|
||||
// dependency resolution overrides. Overrides specified later (in configuration
|
||||
// files in deeper directories, or closer to the end of the file) are
|
||||
// returned first. If no override is found, label.NoLabel is returned.
|
||||
func FindRuleWithOverride(c *config.Config, imp ImportSpec, lang string) (label.Label, bool) {
|
||||
rc := getResolveConfig(c)
|
||||
for i := len(rc.overrides) - 1; i >= 0; i-- {
|
||||
o := rc.overrides[i]
|
||||
if o.matches(imp, lang) {
|
||||
return o.dep, true
|
||||
}
|
||||
}
|
||||
return label.NoLabel, false
|
||||
}
|
||||
|
||||
type overrideSpec struct {
|
||||
imp ImportSpec
|
||||
lang string
|
||||
dep label.Label
|
||||
}
|
||||
|
||||
func (o overrideSpec) matches(imp ImportSpec, lang string) bool {
|
||||
return imp.Lang == o.imp.Lang &&
|
||||
imp.Imp == o.imp.Imp &&
|
||||
(o.lang == "" || o.lang == lang)
|
||||
}
|
||||
|
||||
type resolveConfig struct {
|
||||
overrides []overrideSpec
|
||||
}
|
||||
|
||||
const resolveName = "_resolve"
|
||||
|
||||
func getResolveConfig(c *config.Config) *resolveConfig {
|
||||
return c.Exts[resolveName].(*resolveConfig)
|
||||
}
|
||||
|
||||
type Configurer struct{}
|
||||
|
||||
func (_ *Configurer) RegisterFlags(fs *flag.FlagSet, cmd string, c *config.Config) {
|
||||
c.Exts[resolveName] = &resolveConfig{}
|
||||
}
|
||||
|
||||
func (_ *Configurer) CheckFlags(fs *flag.FlagSet, c *config.Config) error { return nil }
|
||||
|
||||
func (_ *Configurer) KnownDirectives() []string {
|
||||
return []string{"resolve"}
|
||||
}
|
||||
|
||||
func (_ *Configurer) Configure(c *config.Config, rel string, f *rule.File) {
|
||||
rc := getResolveConfig(c)
|
||||
rcCopy := &resolveConfig{
|
||||
overrides: rc.overrides[:],
|
||||
}
|
||||
|
||||
if f != nil {
|
||||
for _, d := range f.Directives {
|
||||
if d.Key == "resolve" {
|
||||
parts := strings.Fields(d.Value)
|
||||
o := overrideSpec{}
|
||||
var lbl string
|
||||
if len(parts) == 3 {
|
||||
o.imp.Lang = parts[0]
|
||||
o.imp.Imp = parts[1]
|
||||
lbl = parts[2]
|
||||
} else if len(parts) == 4 {
|
||||
o.imp.Lang = parts[0]
|
||||
o.lang = parts[1]
|
||||
o.imp.Imp = parts[2]
|
||||
lbl = parts[3]
|
||||
} else {
|
||||
log.Printf("could not parse directive: %s\n\texpected gazelle:resolve source-language [import-language] import-string label", d.Value)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
o.dep, err = label.Parse(lbl)
|
||||
if err != nil {
|
||||
log.Printf("gazelle:resolve %s: %v", d.Value, err)
|
||||
continue
|
||||
}
|
||||
o.dep = o.dep.Abs("", rel)
|
||||
rcCopy.overrides = append(rcCopy.overrides, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.Exts[resolveName] = rcCopy
|
||||
}
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resolve
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/config"
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
"github.com/bazelbuild/bazel-gazelle/repo"
|
||||
"github.com/bazelbuild/bazel-gazelle/rule"
|
||||
)
|
||||
|
||||
// ImportSpec describes a library to be imported. Imp is an import string for
|
||||
// the library. Lang is the language in which the import string appears (this
|
||||
// should match Resolver.Name).
|
||||
type ImportSpec struct {
|
||||
Lang, Imp string
|
||||
}
|
||||
|
||||
// Resolver is an interface that language extensions can implement to resolve
|
||||
// dependencies in rules they generate.
|
||||
type Resolver interface {
|
||||
// Name returns the name of the language. This should be a prefix of the
|
||||
// kinds of rules generated by the language, e.g., "go" for the Go extension
|
||||
// since it generates "go_library" rules.
|
||||
Name() string
|
||||
|
||||
// Imports returns a list of ImportSpecs that can be used to import the rule
|
||||
// r. This is used to populate RuleIndex.
|
||||
//
|
||||
// If nil is returned, the rule will not be indexed. If any non-nil slice is
|
||||
// returned, including an empty slice, the rule will be indexed.
|
||||
Imports(c *config.Config, r *rule.Rule, f *rule.File) []ImportSpec
|
||||
|
||||
// Embeds returns a list of labels of rules that the given rule embeds. If
|
||||
// a rule is embedded by another importable rule of the same language, only
|
||||
// the embedding rule will be indexed. The embedding rule will inherit
|
||||
// the imports of the embedded rule.
|
||||
Embeds(r *rule.Rule, from label.Label) []label.Label
|
||||
|
||||
// Resolve translates imported libraries for a given rule into Bazel
|
||||
// dependencies. Information about imported libraries is returned for each
|
||||
// rule generated by language.GenerateRules in
|
||||
// language.GenerateResult.Imports. Resolve generates a "deps" attribute (or
|
||||
// the appropriate language-specific equivalent) for each import according to
|
||||
// language-specific rules and heuristics.
|
||||
Resolve(c *config.Config, ix *RuleIndex, rc *repo.RemoteCache, r *rule.Rule, imports interface{}, from label.Label)
|
||||
}
|
||||
|
||||
// RuleIndex is a table of rules in a workspace, indexed by label and by
|
||||
// import path. Used by Resolver to map import paths to labels.
|
||||
type RuleIndex struct {
|
||||
rules []*ruleRecord
|
||||
labelMap map[label.Label]*ruleRecord
|
||||
importMap map[ImportSpec][]*ruleRecord
|
||||
mrslv func(r *rule.Rule, pkgRel string) Resolver
|
||||
}
|
||||
|
||||
// ruleRecord contains information about a rule relevant to import indexing.
|
||||
type ruleRecord struct {
|
||||
rule *rule.Rule
|
||||
label label.Label
|
||||
file *rule.File
|
||||
|
||||
// importedAs is a list of ImportSpecs by which this rule may be imported.
|
||||
// Used to build a map from ImportSpecs to ruleRecords.
|
||||
importedAs []ImportSpec
|
||||
|
||||
// embeds is the transitive closure of labels for rules that this rule embeds
|
||||
// (as determined by the Embeds method). This only includes rules in the same
|
||||
// language (i.e., it includes a go_library embedding a go_proto_library, but
|
||||
// not a go_proto_library embedding a proto_library).
|
||||
embeds []label.Label
|
||||
|
||||
// embedded indicates whether another rule of the same language embeds this
|
||||
// rule. Embedded rules should not be indexed.
|
||||
embedded bool
|
||||
|
||||
didCollectEmbeds bool
|
||||
}
|
||||
|
||||
// NewRuleIndex creates a new index.
|
||||
//
|
||||
// kindToResolver is a map from rule kinds (for example, "go_library") to
|
||||
// Resolvers that support those kinds.
|
||||
func NewRuleIndex(mrslv func(r *rule.Rule, pkgRel string) Resolver) *RuleIndex {
|
||||
return &RuleIndex{
|
||||
labelMap: make(map[label.Label]*ruleRecord),
|
||||
mrslv: mrslv,
|
||||
}
|
||||
}
|
||||
|
||||
// AddRule adds a rule r to the index. The rule will only be indexed if there
|
||||
// is a known resolver for the rule's kind and Resolver.Imports returns a
|
||||
// non-nil slice.
|
||||
//
|
||||
// AddRule may only be called before Finish.
|
||||
func (ix *RuleIndex) AddRule(c *config.Config, r *rule.Rule, f *rule.File) {
|
||||
var imps []ImportSpec
|
||||
if rslv := ix.mrslv(r, f.Pkg); rslv != nil {
|
||||
imps = rslv.Imports(c, r, f)
|
||||
}
|
||||
// If imps == nil, the rule is not importable. If imps is the empty slice,
|
||||
// it may still be importable if it embeds importable libraries.
|
||||
if imps == nil {
|
||||
return
|
||||
}
|
||||
|
||||
record := &ruleRecord{
|
||||
rule: r,
|
||||
label: label.New(c.RepoName, f.Pkg, r.Name()),
|
||||
file: f,
|
||||
importedAs: imps,
|
||||
}
|
||||
if _, ok := ix.labelMap[record.label]; ok {
|
||||
log.Printf("multiple rules found with label %s", record.label)
|
||||
return
|
||||
}
|
||||
ix.rules = append(ix.rules, record)
|
||||
ix.labelMap[record.label] = record
|
||||
}
|
||||
|
||||
// Finish constructs the import index and performs any other necessary indexing
|
||||
// actions after all rules have been added. This step is necessary because
|
||||
// a rule may be indexed differently based on what rules are added later.
|
||||
//
|
||||
// Finish must be called after all AddRule calls and before any
|
||||
// FindRulesByImport calls.
|
||||
func (ix *RuleIndex) Finish() {
|
||||
for _, r := range ix.rules {
|
||||
ix.collectEmbeds(r)
|
||||
}
|
||||
ix.buildImportIndex()
|
||||
}
|
||||
|
||||
func (ix *RuleIndex) collectEmbeds(r *ruleRecord) {
|
||||
if r.didCollectEmbeds {
|
||||
return
|
||||
}
|
||||
resolver := ix.mrslv(r.rule, r.file.Pkg)
|
||||
r.didCollectEmbeds = true
|
||||
embedLabels := resolver.Embeds(r.rule, r.label)
|
||||
r.embeds = embedLabels
|
||||
for _, e := range embedLabels {
|
||||
er, ok := ix.findRuleByLabel(e, r.label)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ix.collectEmbeds(er)
|
||||
if resolver == ix.mrslv(er.rule, er.file.Pkg) {
|
||||
er.embedded = true
|
||||
r.embeds = append(r.embeds, er.embeds...)
|
||||
}
|
||||
r.importedAs = append(r.importedAs, er.importedAs...)
|
||||
}
|
||||
}
|
||||
|
||||
// buildImportIndex constructs the map used by FindRulesByImport.
|
||||
func (ix *RuleIndex) buildImportIndex() {
|
||||
ix.importMap = make(map[ImportSpec][]*ruleRecord)
|
||||
for _, r := range ix.rules {
|
||||
if r.embedded {
|
||||
continue
|
||||
}
|
||||
indexed := make(map[ImportSpec]bool)
|
||||
for _, imp := range r.importedAs {
|
||||
if indexed[imp] {
|
||||
continue
|
||||
}
|
||||
indexed[imp] = true
|
||||
ix.importMap[imp] = append(ix.importMap[imp], r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ix *RuleIndex) findRuleByLabel(label label.Label, from label.Label) (*ruleRecord, bool) {
|
||||
label = label.Abs(from.Repo, from.Pkg)
|
||||
r, ok := ix.labelMap[label]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
type FindResult struct {
|
||||
// Label is the absolute label (including repository and package name) for
|
||||
// a matched rule.
|
||||
Label label.Label
|
||||
|
||||
// Embeds is the transitive closure of labels for rules that the matched
|
||||
// rule embeds. It may contains duplicates and does not include the label
|
||||
// for the rule itself.
|
||||
Embeds []label.Label
|
||||
}
|
||||
|
||||
// FindRulesByImport attempts to resolve an import string to a rule record.
|
||||
// imp is the import to resolve (which includes the target language). lang is
|
||||
// the language of the rule with the dependency (for example, in
|
||||
// go_proto_library, imp will have ProtoLang and lang will be GoLang).
|
||||
// from is the rule which is doing the dependency. This is used to check
|
||||
// vendoring visibility and to check for self-imports.
|
||||
//
|
||||
// FindRulesByImport returns a list of rules, since any number of rules may
|
||||
// provide the same import. Callers may need to resolve ambiguities using
|
||||
// language-specific heuristics.
|
||||
func (ix *RuleIndex) FindRulesByImport(imp ImportSpec, lang string) []FindResult {
|
||||
matches := ix.importMap[imp]
|
||||
results := make([]FindResult, 0, len(matches))
|
||||
for _, m := range matches {
|
||||
if ix.mrslv(m.rule, "").Name() != lang {
|
||||
continue
|
||||
}
|
||||
results = append(results, FindResult{
|
||||
Label: m.label,
|
||||
Embeds: m.embeds,
|
||||
})
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// IsSelfImport returns true if the result's label matches the given label
|
||||
// or the result's rule transitively embeds the rule with the given label.
|
||||
// Self imports cause cyclic dependencies, so the caller may want to omit
|
||||
// the dependency or report an error.
|
||||
func (r FindResult) IsSelfImport(from label.Label) bool {
|
||||
if from.Equal(r.Label) {
|
||||
return true
|
||||
}
|
||||
for _, e := range r.Embeds {
|
||||
if from.Equal(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"directives.go",
|
||||
"expr.go",
|
||||
"merge.go",
|
||||
"platform.go",
|
||||
"platform_strings.go",
|
||||
"rule.go",
|
||||
"sort_labels.go",
|
||||
"types.go",
|
||||
"value.go",
|
||||
],
|
||||
importmap = "k8s.io/kops/vendor/github.com/bazelbuild/bazel-gazelle/rule",
|
||||
importpath = "github.com/bazelbuild/bazel-gazelle/rule",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/bazelbuild/bazel-gazelle/label:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/buildtools/build:go_default_library",
|
||||
"//vendor/github.com/bazelbuild/buildtools/tables:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// Directive is a key-value pair extracted from a top-level comment in
|
||||
// a build file. Directives have the following format:
|
||||
//
|
||||
// # gazelle:key value
|
||||
//
|
||||
// Keys may not contain spaces. Values may be empty and may contain spaces,
|
||||
// but surrounding space is trimmed.
|
||||
type Directive struct {
|
||||
Key, Value string
|
||||
}
|
||||
|
||||
// TODO(jayconrod): annotation directives will apply to an individual rule.
|
||||
// They must appear in the block of comments above that rule.
|
||||
|
||||
// ParseDirectives scans f for Gazelle directives. The full list of directives
|
||||
// is returned. Errors are reported for unrecognized directives and directives
|
||||
// out of place (after the first statement).
|
||||
func ParseDirectives(f *bzl.File) []Directive {
|
||||
return parseDirectives(f.Stmt)
|
||||
}
|
||||
|
||||
// ParseDirectivesFromMacro scans a macro body for Gazelle directives. The
|
||||
// full list of directives is returned. Errors are reported for unrecognized
|
||||
// directives and directives out of place (after the first statement).
|
||||
func ParseDirectivesFromMacro(f *bzl.DefStmt) []Directive {
|
||||
return parseDirectives(f.Body)
|
||||
}
|
||||
|
||||
func parseDirectives(stmt []bzl.Expr) []Directive {
|
||||
var directives []Directive
|
||||
parseComment := func(com bzl.Comment) {
|
||||
match := directiveRe.FindStringSubmatch(com.Token)
|
||||
if match == nil {
|
||||
return
|
||||
}
|
||||
key, value := match[1], match[2]
|
||||
directives = append(directives, Directive{key, value})
|
||||
}
|
||||
|
||||
for _, s := range stmt {
|
||||
coms := s.Comment()
|
||||
for _, com := range coms.Before {
|
||||
parseComment(com)
|
||||
}
|
||||
for _, com := range coms.After {
|
||||
parseComment(com)
|
||||
}
|
||||
}
|
||||
return directives
|
||||
}
|
||||
|
||||
var directiveRe = regexp.MustCompile(`^#\s*gazelle:(\w+)\s*(.*?)\s*$`)
|
||||
|
|
@ -1,354 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/bazelbuild/bazel-gazelle/label"
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// MapExprStrings applies a function to string sub-expressions within e.
|
||||
// An expression containing the results with the same structure as e is
|
||||
// returned.
|
||||
func MapExprStrings(e bzl.Expr, f func(string) string) bzl.Expr {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
switch expr := e.(type) {
|
||||
case *bzl.StringExpr:
|
||||
s := f(expr.Value)
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
ret := *expr
|
||||
ret.Value = s
|
||||
return &ret
|
||||
|
||||
case *bzl.ListExpr:
|
||||
var list []bzl.Expr
|
||||
for _, elem := range expr.List {
|
||||
elem = MapExprStrings(elem, f)
|
||||
if elem != nil {
|
||||
list = append(list, elem)
|
||||
}
|
||||
}
|
||||
if len(list) == 0 && len(expr.List) > 0 {
|
||||
return nil
|
||||
}
|
||||
ret := *expr
|
||||
ret.List = list
|
||||
return &ret
|
||||
|
||||
case *bzl.DictExpr:
|
||||
var cases []bzl.Expr
|
||||
isEmpty := true
|
||||
for _, kv := range expr.List {
|
||||
keyval, ok := kv.(*bzl.KeyValueExpr)
|
||||
if !ok {
|
||||
log.Panicf("unexpected expression in generated imports dict: %#v", kv)
|
||||
}
|
||||
value := MapExprStrings(keyval.Value, f)
|
||||
if value != nil {
|
||||
cases = append(cases, &bzl.KeyValueExpr{Key: keyval.Key, Value: value})
|
||||
if key, ok := keyval.Key.(*bzl.StringExpr); !ok || key.Value != "//conditions:default" {
|
||||
isEmpty = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if isEmpty {
|
||||
return nil
|
||||
}
|
||||
ret := *expr
|
||||
ret.List = cases
|
||||
return &ret
|
||||
|
||||
case *bzl.CallExpr:
|
||||
if x, ok := expr.X.(*bzl.Ident); !ok || x.Name != "select" || len(expr.List) != 1 {
|
||||
log.Panicf("unexpected call expression in generated imports: %#v", e)
|
||||
}
|
||||
arg := MapExprStrings(expr.List[0], f)
|
||||
if arg == nil {
|
||||
return nil
|
||||
}
|
||||
call := *expr
|
||||
call.List[0] = arg
|
||||
return &call
|
||||
|
||||
case *bzl.BinaryExpr:
|
||||
x := MapExprStrings(expr.X, f)
|
||||
y := MapExprStrings(expr.Y, f)
|
||||
if x == nil {
|
||||
return y
|
||||
}
|
||||
if y == nil {
|
||||
return x
|
||||
}
|
||||
binop := *expr
|
||||
binop.X = x
|
||||
binop.Y = y
|
||||
return &binop
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// FlattenExpr takes an expression that may have been generated from
|
||||
// PlatformStrings and returns its values in a flat, sorted, de-duplicated
|
||||
// list. Comments are accumulated and de-duplicated across duplicate
|
||||
// expressions. If the expression could not have been generted by
|
||||
// PlatformStrings, the expression will be returned unmodified.
|
||||
func FlattenExpr(e bzl.Expr) bzl.Expr {
|
||||
ps, err := extractPlatformStringsExprs(e)
|
||||
if err != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
ls := makeListSquasher()
|
||||
addElem := func(e bzl.Expr) bool {
|
||||
s, ok := e.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
ls.add(s)
|
||||
return true
|
||||
}
|
||||
addList := func(e bzl.Expr) bool {
|
||||
l, ok := e.(*bzl.ListExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, elem := range l.List {
|
||||
if !addElem(elem) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
addDict := func(d *bzl.DictExpr) bool {
|
||||
for _, kv := range d.List {
|
||||
if !addList(kv.(*bzl.KeyValueExpr).Value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if ps.generic != nil {
|
||||
if !addList(ps.generic) {
|
||||
return e
|
||||
}
|
||||
}
|
||||
for _, d := range []*bzl.DictExpr{ps.os, ps.arch, ps.platform} {
|
||||
if d == nil {
|
||||
continue
|
||||
}
|
||||
if !addDict(d) {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return ls.list()
|
||||
}
|
||||
|
||||
func isScalar(e bzl.Expr) bool {
|
||||
switch e.(type) {
|
||||
case *bzl.StringExpr, *bzl.LiteralExpr, *bzl.Ident:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func dictEntryKeyValue(e bzl.Expr) (string, *bzl.ListExpr, error) {
|
||||
kv, ok := e.(*bzl.KeyValueExpr)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("dict entry was not a key-value pair: %#v", e)
|
||||
}
|
||||
k, ok := kv.Key.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("dict key was not string: %#v", kv.Key)
|
||||
}
|
||||
v, ok := kv.Value.(*bzl.ListExpr)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("dict value was not list: %#v", kv.Value)
|
||||
}
|
||||
return k.Value, v, nil
|
||||
}
|
||||
|
||||
func stringValue(e bzl.Expr) string {
|
||||
s, ok := e.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return s.Value
|
||||
}
|
||||
|
||||
// platformStringsExprs is a set of sub-expressions that match the structure
|
||||
// of package.PlatformStrings. ExprFromValue produces expressions that
|
||||
// follow this structure for srcs, deps, and other attributes, so this matches
|
||||
// all non-scalar expressions generated by Gazelle.
|
||||
//
|
||||
// The matched expression has the form:
|
||||
//
|
||||
// [] + select({}) + select({}) + select({})
|
||||
//
|
||||
// The four collections may appear in any order, and some or all of them may
|
||||
// be omitted (all fields are nil for a nil expression).
|
||||
type platformStringsExprs struct {
|
||||
generic *bzl.ListExpr
|
||||
os, arch, platform *bzl.DictExpr
|
||||
}
|
||||
|
||||
// extractPlatformStringsExprs matches an expression and attempts to extract
|
||||
// sub-expressions in platformStringsExprs. The sub-expressions can then be
|
||||
// merged with corresponding sub-expressions. Any field in the returned
|
||||
// structure may be nil. An error is returned if the given expression does
|
||||
// not follow the pattern described by platformStringsExprs.
|
||||
func extractPlatformStringsExprs(expr bzl.Expr) (platformStringsExprs, error) {
|
||||
var ps platformStringsExprs
|
||||
if expr == nil {
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// Break the expression into a sequence of expressions combined with +.
|
||||
var parts []bzl.Expr
|
||||
for {
|
||||
binop, ok := expr.(*bzl.BinaryExpr)
|
||||
if !ok {
|
||||
parts = append(parts, expr)
|
||||
break
|
||||
}
|
||||
parts = append(parts, binop.Y)
|
||||
expr = binop.X
|
||||
}
|
||||
|
||||
// Process each part. They may be in any order.
|
||||
for _, part := range parts {
|
||||
switch part := part.(type) {
|
||||
case *bzl.ListExpr:
|
||||
if ps.generic != nil {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: multiple list expressions")
|
||||
}
|
||||
ps.generic = part
|
||||
|
||||
case *bzl.CallExpr:
|
||||
x, ok := part.X.(*bzl.Ident)
|
||||
if !ok || x.Name != "select" || len(part.List) != 1 {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: callee other than select or wrong number of args")
|
||||
}
|
||||
arg, ok := part.List[0].(*bzl.DictExpr)
|
||||
if !ok {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: select argument not dict")
|
||||
}
|
||||
var dict **bzl.DictExpr
|
||||
for _, item := range arg.List {
|
||||
kv := item.(*bzl.KeyValueExpr) // parser guarantees this
|
||||
k, ok := kv.Key.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: dict keys are not all strings")
|
||||
}
|
||||
if k.Value == "//conditions:default" {
|
||||
continue
|
||||
}
|
||||
key, err := label.Parse(k.Value)
|
||||
if err != nil {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: dict key is not label: %q", k.Value)
|
||||
}
|
||||
if KnownOSSet[key.Name] {
|
||||
dict = &ps.os
|
||||
break
|
||||
}
|
||||
if KnownArchSet[key.Name] {
|
||||
dict = &ps.arch
|
||||
break
|
||||
}
|
||||
osArch := strings.Split(key.Name, "_")
|
||||
if len(osArch) != 2 || !KnownOSSet[osArch[0]] || !KnownArchSet[osArch[1]] {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: dict key contains unknown platform: %q", k.Value)
|
||||
}
|
||||
dict = &ps.platform
|
||||
break
|
||||
}
|
||||
if dict == nil {
|
||||
// We could not identify the dict because it's empty or only contains
|
||||
// //conditions:default. We'll call it the platform dict to avoid
|
||||
// dropping it.
|
||||
dict = &ps.platform
|
||||
}
|
||||
if *dict != nil {
|
||||
return platformStringsExprs{}, fmt.Errorf("expression could not be matched: multiple selects that are either os-specific, arch-specific, or platform-specific")
|
||||
}
|
||||
*dict = arg
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// makePlatformStringsExpr constructs a single expression from the
|
||||
// sub-expressions in ps.
|
||||
func makePlatformStringsExpr(ps platformStringsExprs) bzl.Expr {
|
||||
makeSelect := func(dict *bzl.DictExpr) bzl.Expr {
|
||||
return &bzl.CallExpr{
|
||||
X: &bzl.Ident{Name: "select"},
|
||||
List: []bzl.Expr{dict},
|
||||
}
|
||||
}
|
||||
forceMultiline := func(e bzl.Expr) {
|
||||
switch e := e.(type) {
|
||||
case *bzl.ListExpr:
|
||||
e.ForceMultiLine = true
|
||||
case *bzl.CallExpr:
|
||||
e.List[0].(*bzl.DictExpr).ForceMultiLine = true
|
||||
}
|
||||
}
|
||||
|
||||
var parts []bzl.Expr
|
||||
if ps.generic != nil {
|
||||
parts = append(parts, ps.generic)
|
||||
}
|
||||
if ps.os != nil {
|
||||
parts = append(parts, makeSelect(ps.os))
|
||||
}
|
||||
if ps.arch != nil {
|
||||
parts = append(parts, makeSelect(ps.arch))
|
||||
}
|
||||
if ps.platform != nil {
|
||||
parts = append(parts, makeSelect(ps.platform))
|
||||
}
|
||||
|
||||
if len(parts) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
return parts[0]
|
||||
}
|
||||
expr := parts[0]
|
||||
forceMultiline(expr)
|
||||
for _, part := range parts[1:] {
|
||||
forceMultiline(part)
|
||||
expr = &bzl.BinaryExpr{
|
||||
Op: "+",
|
||||
X: expr,
|
||||
Y: part,
|
||||
}
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
|
@ -1,489 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// MergeRules copies information from src into dst, usually discarding
|
||||
// information in dst when they have the same attributes.
|
||||
//
|
||||
// If dst is marked with a "# keep" comment, either above the rule or as
|
||||
// a suffix, nothing will be changed.
|
||||
//
|
||||
// If src has an attribute that is not in dst, it will be copied into dst.
|
||||
//
|
||||
// If src and dst have the same attribute and the attribute is mergeable and the
|
||||
// attribute in dst is not marked with a "# keep" comment, values in the dst
|
||||
// attribute not marked with a "# keep" comment will be dropped, and values from
|
||||
// src will be copied in.
|
||||
//
|
||||
// If dst has an attribute not in src, and the attribute is mergeable and not
|
||||
// marked with a "# keep" comment, values in the attribute not marked with
|
||||
// a "# keep" comment will be dropped. If the attribute is empty afterward,
|
||||
// it will be deleted.
|
||||
func MergeRules(src, dst *Rule, mergeable map[string]bool, filename string) {
|
||||
if dst.ShouldKeep() {
|
||||
return
|
||||
}
|
||||
|
||||
// Process attributes that are in dst but not in src.
|
||||
for key, dstAttr := range dst.attrs {
|
||||
if _, ok := src.attrs[key]; ok || !mergeable[key] || ShouldKeep(dstAttr) {
|
||||
continue
|
||||
}
|
||||
dstValue := dstAttr.RHS
|
||||
if mergedValue, err := mergeExprs(nil, dstValue); err != nil {
|
||||
start, end := dstValue.Span()
|
||||
log.Printf("%s:%d.%d-%d.%d: could not merge expression", filename, start.Line, start.LineRune, end.Line, end.LineRune)
|
||||
} else if mergedValue == nil {
|
||||
dst.DelAttr(key)
|
||||
} else {
|
||||
dst.SetAttr(key, mergedValue)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge attributes from src into dst.
|
||||
for key, srcAttr := range src.attrs {
|
||||
srcValue := srcAttr.RHS
|
||||
if dstAttr, ok := dst.attrs[key]; !ok {
|
||||
dst.SetAttr(key, srcValue)
|
||||
} else if mergeable[key] && !ShouldKeep(dstAttr) {
|
||||
dstValue := dstAttr.RHS
|
||||
if mergedValue, err := mergeExprs(srcValue, dstValue); err != nil {
|
||||
start, end := dstValue.Span()
|
||||
log.Printf("%s:%d.%d-%d.%d: could not merge expression", filename, start.Line, start.LineRune, end.Line, end.LineRune)
|
||||
} else {
|
||||
dst.SetAttr(key, mergedValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mergeExprs combines information from src and dst and returns a merged
|
||||
// expression. dst may be modified during this process. The returned expression
|
||||
// may be different from dst when a structural change is needed.
|
||||
//
|
||||
// The following kinds of expressions are recognized.
|
||||
//
|
||||
// * nil
|
||||
// * strings (can only be merged with strings)
|
||||
// * lists of strings
|
||||
// * a call to select with a dict argument. The dict keys must be strings,
|
||||
// and the values must be lists of strings.
|
||||
// * a list of strings combined with a select call using +. The list must
|
||||
// be the left operand.
|
||||
//
|
||||
// An error is returned if the expressions can't be merged, for example
|
||||
// because they are not in one of the above formats.
|
||||
func mergeExprs(src, dst bzl.Expr) (bzl.Expr, error) {
|
||||
if ShouldKeep(dst) {
|
||||
return nil, nil
|
||||
}
|
||||
if src == nil && (dst == nil || isScalar(dst)) {
|
||||
return nil, nil
|
||||
}
|
||||
if isScalar(src) {
|
||||
return src, nil
|
||||
}
|
||||
|
||||
srcExprs, err := extractPlatformStringsExprs(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstExprs, err := extractPlatformStringsExprs(dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mergedExprs, err := mergePlatformStringsExprs(srcExprs, dstExprs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return makePlatformStringsExpr(mergedExprs), nil
|
||||
}
|
||||
|
||||
func mergePlatformStringsExprs(src, dst platformStringsExprs) (platformStringsExprs, error) {
|
||||
var ps platformStringsExprs
|
||||
var err error
|
||||
ps.generic = mergeList(src.generic, dst.generic)
|
||||
if ps.os, err = mergeDict(src.os, dst.os); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
if ps.arch, err = mergeDict(src.arch, dst.arch); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
if ps.platform, err = mergeDict(src.platform, dst.platform); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func mergeList(src, dst *bzl.ListExpr) *bzl.ListExpr {
|
||||
if dst == nil {
|
||||
return src
|
||||
}
|
||||
if src == nil {
|
||||
src = &bzl.ListExpr{List: []bzl.Expr{}}
|
||||
}
|
||||
|
||||
// Build a list of strings from the src list and keep matching strings
|
||||
// in the dst list. This preserves comments. Also keep anything with
|
||||
// a "# keep" comment, whether or not it's in the src list.
|
||||
srcSet := make(map[string]bool)
|
||||
for _, v := range src.List {
|
||||
if s := stringValue(v); s != "" {
|
||||
srcSet[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
var merged []bzl.Expr
|
||||
kept := make(map[string]bool)
|
||||
keepComment := false
|
||||
for _, v := range dst.List {
|
||||
s := stringValue(v)
|
||||
if keep := ShouldKeep(v); keep || srcSet[s] {
|
||||
keepComment = keepComment || keep
|
||||
merged = append(merged, v)
|
||||
if s != "" {
|
||||
kept[s] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add anything in the src list that wasn't kept.
|
||||
for _, v := range src.List {
|
||||
if s := stringValue(v); kept[s] {
|
||||
continue
|
||||
}
|
||||
merged = append(merged, v)
|
||||
}
|
||||
|
||||
if len(merged) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &bzl.ListExpr{
|
||||
List: merged,
|
||||
ForceMultiLine: src.ForceMultiLine || dst.ForceMultiLine || keepComment,
|
||||
}
|
||||
}
|
||||
|
||||
func mergeDict(src, dst *bzl.DictExpr) (*bzl.DictExpr, error) {
|
||||
if dst == nil {
|
||||
return src, nil
|
||||
}
|
||||
if src == nil {
|
||||
src = &bzl.DictExpr{List: []bzl.Expr{}}
|
||||
}
|
||||
|
||||
var entries []*dictEntry
|
||||
entryMap := make(map[string]*dictEntry)
|
||||
|
||||
for _, kv := range dst.List {
|
||||
k, v, err := dictEntryKeyValue(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := entryMap[k]; ok {
|
||||
return nil, fmt.Errorf("dst dict contains more than one case named %q", k)
|
||||
}
|
||||
e := &dictEntry{key: k, dstValue: v}
|
||||
entries = append(entries, e)
|
||||
entryMap[k] = e
|
||||
}
|
||||
|
||||
for _, kv := range src.List {
|
||||
k, v, err := dictEntryKeyValue(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, ok := entryMap[k]
|
||||
if !ok {
|
||||
e = &dictEntry{key: k}
|
||||
entries = append(entries, e)
|
||||
entryMap[k] = e
|
||||
}
|
||||
e.srcValue = v
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(entries))
|
||||
haveDefault := false
|
||||
for _, e := range entries {
|
||||
e.mergedValue = mergeList(e.srcValue, e.dstValue)
|
||||
if e.key == "//conditions:default" {
|
||||
// Keep the default case, even if it's empty.
|
||||
haveDefault = true
|
||||
if e.mergedValue == nil {
|
||||
e.mergedValue = &bzl.ListExpr{}
|
||||
}
|
||||
} else if e.mergedValue != nil {
|
||||
keys = append(keys, e.key)
|
||||
}
|
||||
}
|
||||
if len(keys) == 0 && (!haveDefault || len(entryMap["//conditions:default"].mergedValue.List) == 0) {
|
||||
return nil, nil
|
||||
}
|
||||
sort.Strings(keys)
|
||||
// Always put the default case last.
|
||||
if haveDefault {
|
||||
keys = append(keys, "//conditions:default")
|
||||
}
|
||||
|
||||
mergedEntries := make([]bzl.Expr, len(keys))
|
||||
for i, k := range keys {
|
||||
e := entryMap[k]
|
||||
mergedEntries[i] = &bzl.KeyValueExpr{
|
||||
Key: &bzl.StringExpr{Value: e.key},
|
||||
Value: e.mergedValue,
|
||||
}
|
||||
}
|
||||
|
||||
return &bzl.DictExpr{List: mergedEntries, ForceMultiLine: true}, nil
|
||||
}
|
||||
|
||||
type dictEntry struct {
|
||||
key string
|
||||
dstValue, srcValue, mergedValue *bzl.ListExpr
|
||||
}
|
||||
|
||||
// SquashRules copies information from src into dst without discarding
|
||||
// information in dst. SquashRules detects duplicate elements in lists and
|
||||
// dictionaries, but it doesn't sort elements after squashing. If squashing
|
||||
// fails because the expression is not understood, an error is returned,
|
||||
// and neither rule is modified.
|
||||
func SquashRules(src, dst *Rule, filename string) error {
|
||||
if dst.ShouldKeep() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for key, srcAttr := range src.attrs {
|
||||
srcValue := srcAttr.RHS
|
||||
if dstAttr, ok := dst.attrs[key]; !ok {
|
||||
dst.SetAttr(key, srcValue)
|
||||
} else if !ShouldKeep(dstAttr) {
|
||||
dstValue := dstAttr.RHS
|
||||
if squashedValue, err := squashExprs(srcValue, dstValue); err != nil {
|
||||
start, end := dstValue.Span()
|
||||
return fmt.Errorf("%s:%d.%d-%d.%d: could not squash expression", filename, start.Line, start.LineRune, end.Line, end.LineRune)
|
||||
} else {
|
||||
dst.SetAttr(key, squashedValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
dst.expr.Comment().Before = append(dst.expr.Comment().Before, src.expr.Comment().Before...)
|
||||
dst.expr.Comment().Suffix = append(dst.expr.Comment().Suffix, src.expr.Comment().Suffix...)
|
||||
dst.expr.Comment().After = append(dst.expr.Comment().After, src.expr.Comment().After...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func squashExprs(src, dst bzl.Expr) (bzl.Expr, error) {
|
||||
if ShouldKeep(dst) {
|
||||
return dst, nil
|
||||
}
|
||||
if isScalar(dst) {
|
||||
// may lose src, but they should always be the same.
|
||||
return dst, nil
|
||||
}
|
||||
srcExprs, err := extractPlatformStringsExprs(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstExprs, err := extractPlatformStringsExprs(dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
squashedExprs, err := squashPlatformStringsExprs(srcExprs, dstExprs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return makePlatformStringsExpr(squashedExprs), nil
|
||||
}
|
||||
|
||||
func squashPlatformStringsExprs(x, y platformStringsExprs) (platformStringsExprs, error) {
|
||||
var ps platformStringsExprs
|
||||
var err error
|
||||
if ps.generic, err = squashList(x.generic, y.generic); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
if ps.os, err = squashDict(x.os, y.os); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
if ps.arch, err = squashDict(x.arch, y.arch); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
if ps.platform, err = squashDict(x.platform, y.platform); err != nil {
|
||||
return platformStringsExprs{}, err
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func squashList(x, y *bzl.ListExpr) (*bzl.ListExpr, error) {
|
||||
if x == nil {
|
||||
return y, nil
|
||||
}
|
||||
if y == nil {
|
||||
return x, nil
|
||||
}
|
||||
|
||||
ls := makeListSquasher()
|
||||
for _, e := range x.List {
|
||||
s, ok := e.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return nil, errors.New("could not squash non-string")
|
||||
}
|
||||
ls.add(s)
|
||||
}
|
||||
for _, e := range y.List {
|
||||
s, ok := e.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return nil, errors.New("could not squash non-string")
|
||||
}
|
||||
ls.add(s)
|
||||
}
|
||||
squashed := ls.list()
|
||||
squashed.Comments.Before = append(x.Comments.Before, y.Comments.Before...)
|
||||
squashed.Comments.Suffix = append(x.Comments.Suffix, y.Comments.Suffix...)
|
||||
squashed.Comments.After = append(x.Comments.After, y.Comments.After...)
|
||||
return squashed, nil
|
||||
}
|
||||
|
||||
func squashDict(x, y *bzl.DictExpr) (*bzl.DictExpr, error) {
|
||||
if x == nil {
|
||||
return y, nil
|
||||
}
|
||||
if y == nil {
|
||||
return x, nil
|
||||
}
|
||||
|
||||
cases := make(map[string]*bzl.KeyValueExpr)
|
||||
addCase := func(e bzl.Expr) error {
|
||||
kv := e.(*bzl.KeyValueExpr)
|
||||
key, ok := kv.Key.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return errors.New("could not squash non-string dict key")
|
||||
}
|
||||
if _, ok := kv.Value.(*bzl.ListExpr); !ok {
|
||||
return errors.New("could not squash non-list dict value")
|
||||
}
|
||||
if c, ok := cases[key.Value]; ok {
|
||||
if sq, err := squashList(kv.Value.(*bzl.ListExpr), c.Value.(*bzl.ListExpr)); err != nil {
|
||||
return err
|
||||
} else {
|
||||
c.Value = sq
|
||||
}
|
||||
} else {
|
||||
kvCopy := *kv
|
||||
cases[key.Value] = &kvCopy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range x.List {
|
||||
if err := addCase(e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, e := range y.List {
|
||||
if err := addCase(e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cases))
|
||||
haveDefault := false
|
||||
for k := range cases {
|
||||
if k == "//conditions:default" {
|
||||
haveDefault = true
|
||||
continue
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if haveDefault {
|
||||
keys = append(keys, "//conditions:default") // must be last
|
||||
}
|
||||
|
||||
squashed := *x
|
||||
squashed.Comments.Before = append(x.Comments.Before, y.Comments.Before...)
|
||||
squashed.Comments.Suffix = append(x.Comments.Suffix, y.Comments.Suffix...)
|
||||
squashed.Comments.After = append(x.Comments.After, y.Comments.After...)
|
||||
squashed.List = make([]bzl.Expr, 0, len(cases))
|
||||
for _, k := range keys {
|
||||
squashed.List = append(squashed.List, cases[k])
|
||||
}
|
||||
return &squashed, nil
|
||||
}
|
||||
|
||||
// listSquasher builds a sorted, deduplicated list of string expressions. If
|
||||
// a string expression is added multiple times, comments are consolidated.
|
||||
// The original expressions are not modified.
|
||||
type listSquasher struct {
|
||||
unique map[string]*bzl.StringExpr
|
||||
seenComments map[elemComment]bool
|
||||
}
|
||||
|
||||
type elemComment struct {
|
||||
elem, com string
|
||||
}
|
||||
|
||||
func makeListSquasher() listSquasher {
|
||||
return listSquasher{
|
||||
unique: make(map[string]*bzl.StringExpr),
|
||||
seenComments: make(map[elemComment]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *listSquasher) add(s *bzl.StringExpr) {
|
||||
sCopy, ok := ls.unique[s.Value]
|
||||
if !ok {
|
||||
// Make a copy of s. We may modify it when we consolidate comments from
|
||||
// duplicate strings. We don't want to modify the original in case this
|
||||
// function fails (due to a later failed pattern match).
|
||||
sCopy = new(bzl.StringExpr)
|
||||
*sCopy = *s
|
||||
sCopy.Comments.Before = make([]bzl.Comment, 0, len(s.Comments.Before))
|
||||
sCopy.Comments.Suffix = make([]bzl.Comment, 0, len(s.Comments.Suffix))
|
||||
ls.unique[s.Value] = sCopy
|
||||
}
|
||||
for _, c := range s.Comment().Before {
|
||||
if key := (elemComment{s.Value, c.Token}); !ls.seenComments[key] {
|
||||
sCopy.Comments.Before = append(sCopy.Comments.Before, c)
|
||||
ls.seenComments[key] = true
|
||||
}
|
||||
}
|
||||
for _, c := range s.Comment().Suffix {
|
||||
if key := (elemComment{s.Value, c.Token}); !ls.seenComments[key] {
|
||||
sCopy.Comments.Suffix = append(sCopy.Comments.Suffix, c)
|
||||
ls.seenComments[key] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *listSquasher) list() *bzl.ListExpr {
|
||||
sortedExprs := make([]bzl.Expr, 0, len(ls.unique))
|
||||
for _, e := range ls.unique {
|
||||
sortedExprs = append(sortedExprs, e)
|
||||
}
|
||||
sort.Slice(sortedExprs, func(i, j int) bool {
|
||||
return sortedExprs[i].(*bzl.StringExpr).Value < sortedExprs[j].(*bzl.StringExpr).Value
|
||||
})
|
||||
return &bzl.ListExpr{List: sortedExprs}
|
||||
}
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Platform represents a GOOS/GOARCH pair. When Platform is used to describe
|
||||
// sources, dependencies, or flags, either OS or Arch may be empty.
|
||||
//
|
||||
// DEPRECATED: do not use outside language/go. This type is Go-specific
|
||||
// and should be moved to the Go extension.
|
||||
type Platform struct {
|
||||
OS, Arch string
|
||||
}
|
||||
|
||||
// String returns OS, Arch, or "OS_Arch" if both are set. This must match
|
||||
// the names of config_setting rules in @io_bazel_rules_go//go/platform.
|
||||
func (p Platform) String() string {
|
||||
switch {
|
||||
case p.OS != "" && p.Arch != "":
|
||||
return p.OS + "_" + p.Arch
|
||||
case p.OS != "":
|
||||
return p.OS
|
||||
case p.Arch != "":
|
||||
return p.Arch
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// KnownPlatforms is the set of target platforms that Go supports. Gazelle
|
||||
// will generate multi-platform build files using these tags. rules_go and
|
||||
// Bazel may not actually support all of these.
|
||||
//
|
||||
// DEPRECATED: do not use outside language/go.
|
||||
var KnownPlatforms = []Platform{
|
||||
{"android", "386"},
|
||||
{"android", "amd64"},
|
||||
{"android", "arm"},
|
||||
{"android", "arm64"},
|
||||
{"darwin", "386"},
|
||||
{"darwin", "amd64"},
|
||||
{"darwin", "arm"},
|
||||
{"darwin", "arm64"},
|
||||
{"dragonfly", "amd64"},
|
||||
{"freebsd", "386"},
|
||||
{"freebsd", "amd64"},
|
||||
{"freebsd", "arm"},
|
||||
{"ios", "386"},
|
||||
{"ios", "amd64"},
|
||||
{"ios", "arm"},
|
||||
{"ios", "arm64"},
|
||||
{"linux", "386"},
|
||||
{"linux", "amd64"},
|
||||
{"linux", "arm"},
|
||||
{"linux", "arm64"},
|
||||
{"linux", "mips"},
|
||||
{"linux", "mips64"},
|
||||
{"linux", "mips64le"},
|
||||
{"linux", "mipsle"},
|
||||
{"linux", "ppc64"},
|
||||
{"linux", "ppc64le"},
|
||||
{"linux", "s390x"},
|
||||
{"nacl", "386"},
|
||||
{"nacl", "amd64p32"},
|
||||
{"nacl", "arm"},
|
||||
{"netbsd", "386"},
|
||||
{"netbsd", "amd64"},
|
||||
{"netbsd", "arm"},
|
||||
{"openbsd", "386"},
|
||||
{"openbsd", "amd64"},
|
||||
{"openbsd", "arm"},
|
||||
{"plan9", "386"},
|
||||
{"plan9", "amd64"},
|
||||
{"plan9", "arm"},
|
||||
{"solaris", "amd64"},
|
||||
{"windows", "386"},
|
||||
{"windows", "amd64"},
|
||||
}
|
||||
|
||||
var OSAliases = map[string][]string{
|
||||
"android": []string{"linux"},
|
||||
"ios": []string{"darwin"},
|
||||
}
|
||||
|
||||
var (
|
||||
// KnownOSs is the sorted list of operating systems that Go supports.
|
||||
KnownOSs []string
|
||||
|
||||
// KnownOSSet is the set of operating systems that Go supports.
|
||||
KnownOSSet map[string]bool
|
||||
|
||||
// KnownArchs is the sorted list of architectures that Go supports.
|
||||
KnownArchs []string
|
||||
|
||||
// KnownArchSet is the set of architectures that Go supports.
|
||||
KnownArchSet map[string]bool
|
||||
|
||||
// KnownOSArchs is a map from OS to the archictures they run on.
|
||||
KnownOSArchs map[string][]string
|
||||
|
||||
// KnownArchOSs is a map from architectures to that OSs that run on them.
|
||||
KnownArchOSs map[string][]string
|
||||
)
|
||||
|
||||
func init() {
|
||||
KnownOSSet = make(map[string]bool)
|
||||
KnownArchSet = make(map[string]bool)
|
||||
KnownOSArchs = make(map[string][]string)
|
||||
KnownArchOSs = make(map[string][]string)
|
||||
for _, p := range KnownPlatforms {
|
||||
KnownOSSet[p.OS] = true
|
||||
KnownArchSet[p.Arch] = true
|
||||
KnownOSArchs[p.OS] = append(KnownOSArchs[p.OS], p.Arch)
|
||||
KnownArchOSs[p.Arch] = append(KnownArchOSs[p.Arch], p.OS)
|
||||
}
|
||||
KnownOSs = make([]string, 0, len(KnownOSSet))
|
||||
KnownArchs = make([]string, 0, len(KnownArchSet))
|
||||
for os := range KnownOSSet {
|
||||
KnownOSs = append(KnownOSs, os)
|
||||
}
|
||||
for arch := range KnownArchSet {
|
||||
KnownArchs = append(KnownArchs, arch)
|
||||
}
|
||||
sort.Strings(KnownOSs)
|
||||
sort.Strings(KnownArchs)
|
||||
}
|
||||
|
|
@ -1,245 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// PlatformStrings contains a set of strings associated with a buildable
|
||||
// target in a package. This is used to store source file names,
|
||||
// import paths, and flags.
|
||||
//
|
||||
// Strings are stored in four sets: generic strings, OS-specific strings,
|
||||
// arch-specific strings, and OS-and-arch-specific strings. A string may not
|
||||
// be duplicated within a list or across sets; however, a string may appear
|
||||
// in more than one list within a set (e.g., in "linux" and "windows" within
|
||||
// the OS set). Strings within each list should be sorted, though this may
|
||||
// not be relied upon.
|
||||
//
|
||||
// DEPRECATED: do not use outside language/go. This type is Go-specific and
|
||||
// should be moved to the Go extension.
|
||||
type PlatformStrings struct {
|
||||
// Generic is a list of strings not specific to any platform.
|
||||
Generic []string
|
||||
|
||||
// OS is a map from OS name (anything in KnownOSs) to
|
||||
// OS-specific strings.
|
||||
OS map[string][]string
|
||||
|
||||
// Arch is a map from architecture name (anything in KnownArchs) to
|
||||
// architecture-specific strings.
|
||||
Arch map[string][]string
|
||||
|
||||
// Platform is a map from platforms to OS and architecture-specific strings.
|
||||
Platform map[Platform][]string
|
||||
}
|
||||
|
||||
// HasExt returns whether this set contains a file with the given extension.
|
||||
func (ps *PlatformStrings) HasExt(ext string) bool {
|
||||
return ps.firstExtFile(ext) != ""
|
||||
}
|
||||
|
||||
func (ps *PlatformStrings) IsEmpty() bool {
|
||||
return len(ps.Generic) == 0 && len(ps.OS) == 0 && len(ps.Arch) == 0 && len(ps.Platform) == 0
|
||||
}
|
||||
|
||||
// Flat returns all the strings in the set, sorted and de-duplicated.
|
||||
func (ps *PlatformStrings) Flat() []string {
|
||||
unique := make(map[string]struct{})
|
||||
for _, s := range ps.Generic {
|
||||
unique[s] = struct{}{}
|
||||
}
|
||||
for _, ss := range ps.OS {
|
||||
for _, s := range ss {
|
||||
unique[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ss := range ps.Arch {
|
||||
for _, s := range ss {
|
||||
unique[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ss := range ps.Platform {
|
||||
for _, s := range ss {
|
||||
unique[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
flat := make([]string, 0, len(unique))
|
||||
for s := range unique {
|
||||
flat = append(flat, s)
|
||||
}
|
||||
sort.Strings(flat)
|
||||
return flat
|
||||
}
|
||||
|
||||
func (ps *PlatformStrings) firstExtFile(ext string) string {
|
||||
for _, f := range ps.Generic {
|
||||
if strings.HasSuffix(f, ext) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
for _, fs := range ps.OS {
|
||||
for _, f := range fs {
|
||||
if strings.HasSuffix(f, ext) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, fs := range ps.Arch {
|
||||
for _, f := range fs {
|
||||
if strings.HasSuffix(f, ext) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, fs := range ps.Platform {
|
||||
for _, f := range fs {
|
||||
if strings.HasSuffix(f, ext) {
|
||||
return f
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Map applies a function that processes individual strings to the strings
|
||||
// in "ps" and returns a new PlatformStrings with the result. Empty strings
|
||||
// returned by the function are dropped.
|
||||
func (ps *PlatformStrings) Map(f func(s string) (string, error)) (PlatformStrings, []error) {
|
||||
var errors []error
|
||||
mapSlice := func(ss []string) ([]string, error) {
|
||||
rs := make([]string, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
if r, err := f(s); err != nil {
|
||||
errors = append(errors, err)
|
||||
} else if r != "" {
|
||||
rs = append(rs, r)
|
||||
}
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
result, _ := ps.MapSlice(mapSlice)
|
||||
return result, errors
|
||||
}
|
||||
|
||||
// MapSlice applies a function that processes slices of strings to the strings
|
||||
// in "ps" and returns a new PlatformStrings with the results.
|
||||
func (ps *PlatformStrings) MapSlice(f func([]string) ([]string, error)) (PlatformStrings, []error) {
|
||||
var errors []error
|
||||
|
||||
mapSlice := func(ss []string) []string {
|
||||
rs, err := f(ss)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
return nil
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
mapStringMap := func(m map[string][]string) map[string][]string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
rm := make(map[string][]string)
|
||||
for k, ss := range m {
|
||||
ss = mapSlice(ss)
|
||||
if len(ss) > 0 {
|
||||
rm[k] = ss
|
||||
}
|
||||
}
|
||||
if len(rm) == 0 {
|
||||
return nil
|
||||
}
|
||||
return rm
|
||||
}
|
||||
|
||||
mapPlatformMap := func(m map[Platform][]string) map[Platform][]string {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
rm := make(map[Platform][]string)
|
||||
for k, ss := range m {
|
||||
ss = mapSlice(ss)
|
||||
if len(ss) > 0 {
|
||||
rm[k] = ss
|
||||
}
|
||||
}
|
||||
if len(rm) == 0 {
|
||||
return nil
|
||||
}
|
||||
return rm
|
||||
}
|
||||
|
||||
result := PlatformStrings{
|
||||
Generic: mapSlice(ps.Generic),
|
||||
OS: mapStringMap(ps.OS),
|
||||
Arch: mapStringMap(ps.Arch),
|
||||
Platform: mapPlatformMap(ps.Platform),
|
||||
}
|
||||
return result, errors
|
||||
}
|
||||
|
||||
func (ps PlatformStrings) BzlExpr() bzl.Expr {
|
||||
var pieces []bzl.Expr
|
||||
if len(ps.Generic) > 0 {
|
||||
pieces = append(pieces, ExprFromValue(ps.Generic))
|
||||
}
|
||||
if len(ps.OS) > 0 {
|
||||
pieces = append(pieces, platformStringsOSArchDictExpr(ps.OS))
|
||||
}
|
||||
if len(ps.Arch) > 0 {
|
||||
pieces = append(pieces, platformStringsOSArchDictExpr(ps.Arch))
|
||||
}
|
||||
if len(ps.Platform) > 0 {
|
||||
pieces = append(pieces, platformStringsPlatformDictExpr(ps.Platform))
|
||||
}
|
||||
if len(pieces) == 0 {
|
||||
return &bzl.ListExpr{}
|
||||
} else if len(pieces) == 1 {
|
||||
return pieces[0]
|
||||
} else {
|
||||
e := pieces[0]
|
||||
if list, ok := e.(*bzl.ListExpr); ok {
|
||||
list.ForceMultiLine = true
|
||||
}
|
||||
for _, piece := range pieces[1:] {
|
||||
e = &bzl.BinaryExpr{X: e, Y: piece, Op: "+"}
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
func platformStringsOSArchDictExpr(m map[string][]string) bzl.Expr {
|
||||
s := make(SelectStringListValue)
|
||||
for key, value := range m {
|
||||
s["@io_bazel_rules_go//go/platform:"+key] = value
|
||||
}
|
||||
s["//conditions:default"] = nil
|
||||
return s.BzlExpr()
|
||||
}
|
||||
|
||||
func platformStringsPlatformDictExpr(m map[Platform][]string) bzl.Expr {
|
||||
s := make(SelectStringListValue)
|
||||
for key, value := range m {
|
||||
s["@io_bazel_rules_go//go/platform:"+key.String()] = value
|
||||
}
|
||||
s["//conditions:default"] = nil
|
||||
return s.BzlExpr()
|
||||
}
|
||||
|
|
@ -1,859 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package rule provides tools for editing Bazel build files. It is intended to
|
||||
// be a more powerful replacement for
|
||||
// github.com/bazelbuild/buildtools/build.Rule, adapted for Gazelle's usage. It
|
||||
// is language agnostic, but it may be used for language-specific rules by
|
||||
// providing configuration.
|
||||
//
|
||||
// File is the primary interface to this package. A File represents an
|
||||
// individual build file. It comprises a list of Rules and a list of Loads.
|
||||
// Rules and Loads may be inserted, modified, or deleted. When all changes
|
||||
// are done, File.Save() may be called to write changes back to a file.
|
||||
package rule
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
bt "github.com/bazelbuild/buildtools/tables"
|
||||
)
|
||||
|
||||
// File provides editing functionality for a build file. You can create a
|
||||
// new file with EmptyFile or load an existing file with LoadFile. After
|
||||
// changes have been made, call Save to write changes back to a file.
|
||||
type File struct {
|
||||
// File is the underlying build file syntax tree. Some editing operations
|
||||
// may modify this, but editing is not complete until Sync() is called.
|
||||
File *bzl.File
|
||||
|
||||
// function is the underlying syntax tree of a bzl file function.
|
||||
// This is used for editing the bzl file function specified by the
|
||||
// update-repos -to_macro option.
|
||||
function *function
|
||||
|
||||
// Pkg is the Bazel package this build file defines.
|
||||
Pkg string
|
||||
|
||||
// Path is the file system path to the build file (same as File.Path).
|
||||
Path string
|
||||
|
||||
// DefName is the name of the function definition this File refers to
|
||||
// if loaded with LoadMacroFile or a similar function. Normally empty.
|
||||
DefName string
|
||||
|
||||
// Directives is a list of configuration directives found in top-level
|
||||
// comments in the file. This should not be modified after the file is read.
|
||||
Directives []Directive
|
||||
|
||||
// Loads is a list of load statements within the file. This should not
|
||||
// be modified directly; use Load methods instead.
|
||||
Loads []*Load
|
||||
|
||||
// Rules is a list of rules within the file (or function calls that look like
|
||||
// rules). This should not be modified directly; use Rule methods instead.
|
||||
Rules []*Rule
|
||||
}
|
||||
|
||||
// EmptyFile creates a File wrapped around an empty syntax tree.
|
||||
func EmptyFile(path, pkg string) *File {
|
||||
return &File{
|
||||
File: &bzl.File{Path: path, Type: bzl.TypeBuild},
|
||||
Path: path,
|
||||
Pkg: pkg,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadFile loads a build file from disk, parses it, and scans for rules and
|
||||
// load statements. The syntax tree within the returned File will be modified
|
||||
// by editing methods.
|
||||
//
|
||||
// This function returns I/O and parse errors without modification. It's safe
|
||||
// to use os.IsNotExist and similar predicates.
|
||||
func LoadFile(path, pkg string) (*File, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadData(path, pkg, data)
|
||||
}
|
||||
|
||||
// LoadWorkspaceFile is similar to LoadFile but parses the file as a WORKSPACE
|
||||
// file.
|
||||
func LoadWorkspaceFile(path, pkg string) (*File, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadWorkspaceData(path, pkg, data)
|
||||
}
|
||||
|
||||
// LoadMacroFile loads a bzl file from disk, parses it, then scans for the load
|
||||
// statements and the rules called from the given Starlark function. If there is
|
||||
// no matching function name, then a new function with that name will be created.
|
||||
// The function's syntax tree will be returned within File and can be modified by
|
||||
// Sync and Save calls.
|
||||
func LoadMacroFile(path, pkg, defName string) (*File, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadMacroData(path, pkg, defName, data)
|
||||
}
|
||||
|
||||
// EmptyMacroFile creates a bzl file at the given path and within the file creates
|
||||
// a Starlark function with the provided name. The function can then be modified
|
||||
// by Sync and Save calls.
|
||||
func EmptyMacroFile(path, pkg, defName string) (*File, error) {
|
||||
_, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadMacroData(path, pkg, defName, nil)
|
||||
}
|
||||
|
||||
// LoadData parses a build file from a byte slice and scans it for rules and
|
||||
// load statements. The syntax tree within the returned File will be modified
|
||||
// by editing methods.
|
||||
func LoadData(path, pkg string, data []byte) (*File, error) {
|
||||
ast, err := bzl.ParseBuild(path, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ScanAST(pkg, ast), nil
|
||||
}
|
||||
|
||||
// LoadWorkspaceData is similar to LoadData but parses the data as a
|
||||
// WORKSPACE file.
|
||||
func LoadWorkspaceData(path, pkg string, data []byte) (*File, error) {
|
||||
ast, err := bzl.ParseWorkspace(path, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ScanAST(pkg, ast), nil
|
||||
}
|
||||
|
||||
// LoadMacroData parses a bzl file from a byte slice and scans for the load
|
||||
// statements and the rules called from the given Starlark function. If there is
|
||||
// no matching function name, then a new function will be created, and added to the
|
||||
// File the next time Sync is called. The function's syntax tree will be returned
|
||||
// within File and can be modified by Sync and Save calls.
|
||||
func LoadMacroData(path, pkg, defName string, data []byte) (*File, error) {
|
||||
ast, err := bzl.ParseBzl(path, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ScanASTBody(pkg, defName, ast), nil
|
||||
}
|
||||
|
||||
// ScanAST creates a File wrapped around the given syntax tree. This tree
|
||||
// will be modified by editing methods.
|
||||
func ScanAST(pkg string, bzlFile *bzl.File) *File {
|
||||
return ScanASTBody(pkg, "", bzlFile)
|
||||
}
|
||||
|
||||
type function struct {
|
||||
stmt *bzl.DefStmt
|
||||
inserted, hasPass bool
|
||||
}
|
||||
|
||||
// ScanASTBody creates a File wrapped around the given syntax tree. It will also
|
||||
// scan the AST for a function matching the given defName, and if the function
|
||||
// does not exist it will create a new one and mark it to be added to the File
|
||||
// the next time Sync is called.
|
||||
func ScanASTBody(pkg, defName string, bzlFile *bzl.File) *File {
|
||||
f := &File{
|
||||
File: bzlFile,
|
||||
Pkg: pkg,
|
||||
Path: bzlFile.Path,
|
||||
DefName: defName,
|
||||
}
|
||||
var defStmt *bzl.DefStmt
|
||||
f.Rules, f.Loads, defStmt = scanExprs(defName, bzlFile.Stmt)
|
||||
if defStmt != nil {
|
||||
f.Rules, _, _ = scanExprs("", defStmt.Body)
|
||||
f.function = &function{
|
||||
stmt: defStmt,
|
||||
inserted: true,
|
||||
}
|
||||
if len(defStmt.Body) == 1 {
|
||||
if v, ok := defStmt.Body[0].(*bzl.BranchStmt); ok && v.Token == "pass" {
|
||||
f.function.hasPass = true
|
||||
}
|
||||
}
|
||||
} else if defName != "" {
|
||||
f.function = &function{
|
||||
stmt: &bzl.DefStmt{Name: defName},
|
||||
inserted: false,
|
||||
}
|
||||
}
|
||||
if f.function != nil {
|
||||
f.Directives = ParseDirectivesFromMacro(f.function.stmt)
|
||||
} else {
|
||||
f.Directives = ParseDirectives(bzlFile)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func scanExprs(defName string, stmt []bzl.Expr) (rules []*Rule, loads []*Load, fn *bzl.DefStmt) {
|
||||
for i, expr := range stmt {
|
||||
switch expr := expr.(type) {
|
||||
case *bzl.LoadStmt:
|
||||
l := loadFromExpr(i, expr)
|
||||
loads = append(loads, l)
|
||||
case *bzl.CallExpr:
|
||||
if r := ruleFromExpr(i, expr); r != nil {
|
||||
rules = append(rules, r)
|
||||
}
|
||||
case *bzl.DefStmt:
|
||||
if expr.Name == defName {
|
||||
fn = expr
|
||||
}
|
||||
}
|
||||
}
|
||||
return rules, loads, fn
|
||||
}
|
||||
|
||||
// MatchBuildFileName looks for a file in files that has a name from names.
|
||||
// If there is at least one matching file, a path will be returned by joining
|
||||
// dir and the first matching name. If there are no matching files, the
|
||||
// empty string is returned.
|
||||
func MatchBuildFileName(dir string, names []string, files []os.FileInfo) string {
|
||||
for _, name := range names {
|
||||
for _, fi := range files {
|
||||
if fi.Name() == name && !fi.IsDir() {
|
||||
return filepath.Join(dir, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SyncMacroFile syncs the file's syntax tree with another file's. This is
|
||||
// useful for keeping multiple macro definitions from the same .bzl file in sync.
|
||||
func (f *File) SyncMacroFile(from *File) {
|
||||
fromFunc := *from.function.stmt
|
||||
_, _, toFunc := scanExprs(from.function.stmt.Name, f.File.Stmt)
|
||||
if toFunc != nil {
|
||||
*toFunc = fromFunc
|
||||
} else {
|
||||
f.File.Stmt = append(f.File.Stmt, &fromFunc)
|
||||
}
|
||||
}
|
||||
|
||||
// MacroName returns the name of the macro function that this file is editing,
|
||||
// or an empty string if a macro function is not being edited.
|
||||
func (f *File) MacroName() string {
|
||||
if f.function != nil && f.function.stmt != nil {
|
||||
return f.function.stmt.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Sync writes all changes back to the wrapped syntax tree. This should be
|
||||
// called after editing operations, before reading the syntax tree again.
|
||||
func (f *File) Sync() {
|
||||
var loadInserts, loadDeletes, loadStmts []*stmt
|
||||
var r, w int
|
||||
for r, w = 0, 0; r < len(f.Loads); r++ {
|
||||
s := f.Loads[r]
|
||||
s.sync()
|
||||
if s.deleted {
|
||||
loadDeletes = append(loadDeletes, &s.stmt)
|
||||
continue
|
||||
}
|
||||
if s.inserted {
|
||||
loadInserts = append(loadInserts, &s.stmt)
|
||||
s.inserted = false
|
||||
} else {
|
||||
loadStmts = append(loadStmts, &s.stmt)
|
||||
}
|
||||
f.Loads[w] = s
|
||||
w++
|
||||
}
|
||||
f.Loads = f.Loads[:w]
|
||||
var ruleInserts, ruleDeletes, ruleStmts []*stmt
|
||||
for r, w = 0, 0; r < len(f.Rules); r++ {
|
||||
s := f.Rules[r]
|
||||
s.sync()
|
||||
if s.deleted {
|
||||
ruleDeletes = append(ruleDeletes, &s.stmt)
|
||||
continue
|
||||
}
|
||||
if s.inserted {
|
||||
ruleInserts = append(ruleInserts, &s.stmt)
|
||||
s.inserted = false
|
||||
} else {
|
||||
ruleStmts = append(ruleStmts, &s.stmt)
|
||||
}
|
||||
f.Rules[w] = s
|
||||
w++
|
||||
}
|
||||
f.Rules = f.Rules[:w]
|
||||
|
||||
if f.function == nil {
|
||||
deletes := append(ruleDeletes, loadDeletes...)
|
||||
inserts := append(ruleInserts, loadInserts...)
|
||||
stmts := append(ruleStmts, loadStmts...)
|
||||
updateStmt(&f.File.Stmt, inserts, deletes, stmts)
|
||||
} else {
|
||||
updateStmt(&f.File.Stmt, loadInserts, loadDeletes, loadStmts)
|
||||
if f.function.hasPass && len(ruleInserts) > 0 {
|
||||
f.function.stmt.Body = []bzl.Expr{}
|
||||
f.function.hasPass = false
|
||||
}
|
||||
updateStmt(&f.function.stmt.Body, ruleInserts, ruleDeletes, ruleStmts)
|
||||
if len(f.function.stmt.Body) == 0 {
|
||||
f.function.stmt.Body = append(f.function.stmt.Body, &bzl.BranchStmt{Token: "pass"})
|
||||
f.function.hasPass = true
|
||||
}
|
||||
if !f.function.inserted {
|
||||
f.File.Stmt = append(f.File.Stmt, f.function.stmt)
|
||||
f.function.inserted = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func updateStmt(oldStmt *[]bzl.Expr, inserts, deletes, stmts []*stmt) {
|
||||
sort.Stable(byIndex(deletes))
|
||||
sort.Stable(byIndex(inserts))
|
||||
sort.Stable(byIndex(stmts))
|
||||
newStmt := make([]bzl.Expr, 0, len(*oldStmt)-len(deletes)+len(inserts))
|
||||
var ii, di, si int
|
||||
for i, stmt := range *oldStmt {
|
||||
for ii < len(inserts) && inserts[ii].index == i {
|
||||
inserts[ii].index = len(newStmt)
|
||||
newStmt = append(newStmt, inserts[ii].expr)
|
||||
ii++
|
||||
}
|
||||
if di < len(deletes) && deletes[di].index == i {
|
||||
di++
|
||||
continue
|
||||
}
|
||||
if si < len(stmts) && stmts[si].expr == stmt {
|
||||
stmts[si].index = len(newStmt)
|
||||
si++
|
||||
}
|
||||
newStmt = append(newStmt, stmt)
|
||||
}
|
||||
for ii < len(inserts) {
|
||||
inserts[ii].index = len(newStmt)
|
||||
newStmt = append(newStmt, inserts[ii].expr)
|
||||
ii++
|
||||
}
|
||||
*oldStmt = newStmt
|
||||
}
|
||||
|
||||
// Format formats the build file in a form that can be written to disk.
|
||||
// This method calls Sync internally.
|
||||
func (f *File) Format() []byte {
|
||||
f.Sync()
|
||||
return bzl.Format(f.File)
|
||||
}
|
||||
|
||||
// Save writes the build file to disk. This method calls Sync internally.
|
||||
func (f *File) Save(path string) error {
|
||||
f.Sync()
|
||||
data := bzl.Format(f.File)
|
||||
return ioutil.WriteFile(path, data, 0666)
|
||||
}
|
||||
|
||||
// HasDefaultVisibility returns whether the File contains a "package" rule with
|
||||
// a "default_visibility" attribute. Rules generated by Gazelle should not
|
||||
// have their own visibility attributes if this is the case.
|
||||
func (f *File) HasDefaultVisibility() bool {
|
||||
for _, r := range f.Rules {
|
||||
if r.Kind() == "package" && r.Attr("default_visibility") != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type stmt struct {
|
||||
index int
|
||||
deleted, inserted, updated bool
|
||||
expr bzl.Expr
|
||||
}
|
||||
|
||||
// Index returns the index for this statement within the build file. For
|
||||
// inserted rules, this is where the rule will be inserted (rules with the
|
||||
// same index will be inserted in the order Insert was called). For existing
|
||||
// rules, this is the index of the original statement.
|
||||
func (s *stmt) Index() int { return s.index }
|
||||
|
||||
// Delete marks this statement for deletion. It will be removed from the
|
||||
// syntax tree when File.Sync is called.
|
||||
func (s *stmt) Delete() { s.deleted = true }
|
||||
|
||||
type byIndex []*stmt
|
||||
|
||||
func (s byIndex) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s byIndex) Less(i, j int) bool {
|
||||
return s[i].index < s[j].index
|
||||
}
|
||||
|
||||
func (s byIndex) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// identPair represents one symbol, with or without remapping, in a load
|
||||
// statement within a build file.
|
||||
type identPair struct {
|
||||
to, from *bzl.Ident
|
||||
}
|
||||
|
||||
// Load represents a load statement within a build file.
|
||||
type Load struct {
|
||||
stmt
|
||||
name string
|
||||
symbols map[string]identPair
|
||||
}
|
||||
|
||||
// NewLoad creates a new, empty load statement for the given file name.
|
||||
func NewLoad(name string) *Load {
|
||||
return &Load{
|
||||
stmt: stmt{
|
||||
expr: &bzl.LoadStmt{
|
||||
Module: &bzl.StringExpr{Value: name},
|
||||
ForceCompact: true,
|
||||
},
|
||||
},
|
||||
name: name,
|
||||
symbols: make(map[string]identPair),
|
||||
}
|
||||
}
|
||||
|
||||
func loadFromExpr(index int, loadStmt *bzl.LoadStmt) *Load {
|
||||
l := &Load{
|
||||
stmt: stmt{index: index, expr: loadStmt},
|
||||
name: loadStmt.Module.Value,
|
||||
symbols: make(map[string]identPair),
|
||||
}
|
||||
for i := range loadStmt.From {
|
||||
to, from := loadStmt.To[i], loadStmt.From[i]
|
||||
l.symbols[to.Name] = identPair{to: to, from: from}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Name returns the name of the file this statement loads.
|
||||
func (l *Load) Name() string {
|
||||
return l.name
|
||||
}
|
||||
|
||||
// Symbols returns a list of symbols this statement loads.
|
||||
func (l *Load) Symbols() []string {
|
||||
syms := make([]string, 0, len(l.symbols))
|
||||
for sym := range l.symbols {
|
||||
syms = append(syms, sym)
|
||||
}
|
||||
sort.Strings(syms)
|
||||
return syms
|
||||
}
|
||||
|
||||
// Has returns true if sym is loaded by this statement.
|
||||
func (l *Load) Has(sym string) bool {
|
||||
_, ok := l.symbols[sym]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Add inserts a new symbol into the load statement. This has no effect if
|
||||
// the symbol is already loaded. Symbols will be sorted, so the order
|
||||
// doesn't matter.
|
||||
func (l *Load) Add(sym string) {
|
||||
if _, ok := l.symbols[sym]; !ok {
|
||||
i := &bzl.Ident{Name: sym}
|
||||
l.symbols[sym] = identPair{to: i, from: i}
|
||||
l.updated = true
|
||||
}
|
||||
}
|
||||
|
||||
// Remove deletes a symbol from the load statement. This has no effect if
|
||||
// the symbol is not loaded.
|
||||
func (l *Load) Remove(sym string) {
|
||||
if _, ok := l.symbols[sym]; ok {
|
||||
delete(l.symbols, sym)
|
||||
l.updated = true
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmpty returns whether this statement loads any symbols.
|
||||
func (l *Load) IsEmpty() bool {
|
||||
return len(l.symbols) == 0
|
||||
}
|
||||
|
||||
// Insert marks this statement for insertion at the given index. If multiple
|
||||
// statements are inserted at the same index, they will be inserted in the
|
||||
// order Insert is called.
|
||||
func (l *Load) Insert(f *File, index int) {
|
||||
l.index = index
|
||||
l.inserted = true
|
||||
f.Loads = append(f.Loads, l)
|
||||
}
|
||||
|
||||
func (l *Load) sync() {
|
||||
if !l.updated {
|
||||
return
|
||||
}
|
||||
l.updated = false
|
||||
|
||||
// args1 and args2 are two different sort groups based on whether a remap of the identifier is present.
|
||||
var args1, args2, args []string
|
||||
for sym, pair := range l.symbols {
|
||||
if pair.from.Name == pair.to.Name {
|
||||
args1 = append(args1, sym)
|
||||
} else {
|
||||
args2 = append(args2, sym)
|
||||
}
|
||||
}
|
||||
sort.Strings(args1)
|
||||
sort.Strings(args2)
|
||||
args = append(args, args1...)
|
||||
args = append(args, args2...)
|
||||
|
||||
loadStmt := l.expr.(*bzl.LoadStmt)
|
||||
loadStmt.Module.Value = l.name
|
||||
loadStmt.From = make([]*bzl.Ident, 0, len(args))
|
||||
loadStmt.To = make([]*bzl.Ident, 0, len(args))
|
||||
for _, sym := range args {
|
||||
pair := l.symbols[sym]
|
||||
loadStmt.From = append(loadStmt.From, pair.from)
|
||||
loadStmt.To = append(loadStmt.To, pair.to)
|
||||
if pair.from.Name != pair.to.Name {
|
||||
loadStmt.ForceCompact = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rule represents a rule statement within a build file.
|
||||
type Rule struct {
|
||||
stmt
|
||||
kind string
|
||||
args []bzl.Expr
|
||||
attrs map[string]*bzl.AssignExpr
|
||||
private map[string]interface{}
|
||||
}
|
||||
|
||||
// NewRule creates a new, empty rule with the given kind and name.
|
||||
func NewRule(kind, name string) *Rule {
|
||||
nameAttr := &bzl.AssignExpr{
|
||||
LHS: &bzl.Ident{Name: "name"},
|
||||
RHS: &bzl.StringExpr{Value: name},
|
||||
Op: "=",
|
||||
}
|
||||
r := &Rule{
|
||||
stmt: stmt{
|
||||
expr: &bzl.CallExpr{
|
||||
X: &bzl.Ident{Name: kind},
|
||||
List: []bzl.Expr{nameAttr},
|
||||
},
|
||||
},
|
||||
kind: kind,
|
||||
attrs: map[string]*bzl.AssignExpr{"name": nameAttr},
|
||||
private: map[string]interface{}{},
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func ruleFromExpr(index int, expr bzl.Expr) *Rule {
|
||||
call, ok := expr.(*bzl.CallExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
x, ok := call.X.(*bzl.Ident)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
kind := x.Name
|
||||
var args []bzl.Expr
|
||||
attrs := make(map[string]*bzl.AssignExpr)
|
||||
for _, arg := range call.List {
|
||||
if attr, ok := arg.(*bzl.AssignExpr); ok {
|
||||
key := attr.LHS.(*bzl.Ident) // required by parser
|
||||
attrs[key.Name] = attr
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
return &Rule{
|
||||
stmt: stmt{
|
||||
index: index,
|
||||
expr: call,
|
||||
},
|
||||
kind: kind,
|
||||
args: args,
|
||||
attrs: attrs,
|
||||
private: map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldKeep returns whether the rule is marked with a "# keep" comment. Rules
|
||||
// that are kept should not be modified. This does not check whether
|
||||
// subexpressions within the rule should be kept.
|
||||
func (r *Rule) ShouldKeep() bool {
|
||||
return ShouldKeep(r.expr)
|
||||
}
|
||||
|
||||
// Kind returns the kind of rule this is (for example, "go_library").
|
||||
func (r *Rule) Kind() string {
|
||||
return r.kind
|
||||
}
|
||||
|
||||
// SetKind changes the kind of rule this is.
|
||||
func (r *Rule) SetKind(kind string) {
|
||||
r.kind = kind
|
||||
r.updated = true
|
||||
}
|
||||
|
||||
// Name returns the value of the rule's "name" attribute if it is a string
|
||||
// or "" if the attribute does not exist or is not a string.
|
||||
func (r *Rule) Name() string {
|
||||
return r.AttrString("name")
|
||||
}
|
||||
|
||||
// SetName sets the value of the rule's "name" attribute.
|
||||
func (r *Rule) SetName(name string) {
|
||||
r.SetAttr("name", name)
|
||||
}
|
||||
|
||||
// AttrKeys returns a sorted list of attribute keys used in this rule.
|
||||
func (r *Rule) AttrKeys() []string {
|
||||
keys := make([]string, 0, len(r.attrs))
|
||||
for k := range r.attrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.SliceStable(keys, func(i, j int) bool {
|
||||
if cmp := bt.NamePriority[keys[i]] - bt.NamePriority[keys[j]]; cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return keys[i] < keys[j]
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
// Attr returns the value of the named attribute. nil is returned when the
|
||||
// attribute is not set.
|
||||
func (r *Rule) Attr(key string) bzl.Expr {
|
||||
attr, ok := r.attrs[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return attr.RHS
|
||||
}
|
||||
|
||||
// AttrString returns the value of the named attribute if it is a scalar string.
|
||||
// "" is returned if the attribute is not set or is not a string.
|
||||
func (r *Rule) AttrString(key string) string {
|
||||
attr, ok := r.attrs[key]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
str, ok := attr.RHS.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return str.Value
|
||||
}
|
||||
|
||||
// AttrStrings returns the string values of an attribute if it is a list.
|
||||
// nil is returned if the attribute is not set or is not a list. Non-string
|
||||
// values within the list won't be returned.
|
||||
func (r *Rule) AttrStrings(key string) []string {
|
||||
attr, ok := r.attrs[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
list, ok := attr.RHS.(*bzl.ListExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
strs := make([]string, 0, len(list.List))
|
||||
for _, e := range list.List {
|
||||
if str, ok := e.(*bzl.StringExpr); ok {
|
||||
strs = append(strs, str.Value)
|
||||
}
|
||||
}
|
||||
return strs
|
||||
}
|
||||
|
||||
// DelAttr removes the named attribute from the rule.
|
||||
func (r *Rule) DelAttr(key string) {
|
||||
delete(r.attrs, key)
|
||||
r.updated = true
|
||||
}
|
||||
|
||||
// SetAttr adds or replaces the named attribute with an expression produced
|
||||
// by ExprFromValue.
|
||||
func (r *Rule) SetAttr(key string, value interface{}) {
|
||||
rhs := ExprFromValue(value)
|
||||
if attr, ok := r.attrs[key]; ok {
|
||||
attr.RHS = rhs
|
||||
} else {
|
||||
r.attrs[key] = &bzl.AssignExpr{
|
||||
LHS: &bzl.Ident{Name: key},
|
||||
RHS: rhs,
|
||||
Op: "=",
|
||||
}
|
||||
}
|
||||
r.updated = true
|
||||
}
|
||||
|
||||
// PrivateAttrKeys returns a sorted list of private attribute names.
|
||||
func (r *Rule) PrivateAttrKeys() []string {
|
||||
keys := make([]string, 0, len(r.private))
|
||||
for k := range r.private {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// PrivateAttr return the private value associated with a key.
|
||||
func (r *Rule) PrivateAttr(key string) interface{} {
|
||||
return r.private[key]
|
||||
}
|
||||
|
||||
// SetPrivateAttr associates a value with a key. Unlike SetAttr, this value
|
||||
// is not converted to a build syntax tree and will not be written to a build
|
||||
// file.
|
||||
func (r *Rule) SetPrivateAttr(key string, value interface{}) {
|
||||
r.private[key] = value
|
||||
}
|
||||
|
||||
// Args returns positional arguments passed to a rule.
|
||||
func (r *Rule) Args() []bzl.Expr {
|
||||
return r.args
|
||||
}
|
||||
|
||||
// Insert marks this statement for insertion at the end of the file. Multiple
|
||||
// statements will be inserted in the order Insert is called.
|
||||
func (r *Rule) Insert(f *File) {
|
||||
// TODO(jayconrod): should rules always be inserted at the end? Should there
|
||||
// be some sort order?
|
||||
var stmt []bzl.Expr
|
||||
if f.function == nil {
|
||||
stmt = f.File.Stmt
|
||||
} else {
|
||||
stmt = f.function.stmt.Body
|
||||
}
|
||||
r.index = len(stmt)
|
||||
r.inserted = true
|
||||
f.Rules = append(f.Rules, r)
|
||||
}
|
||||
|
||||
// IsEmpty returns true when the rule contains none of the attributes in attrs
|
||||
// for its kind. attrs should contain attributes that make the rule buildable
|
||||
// like srcs or deps and not descriptive attributes like name or visibility.
|
||||
func (r *Rule) IsEmpty(info KindInfo) bool {
|
||||
if info.NonEmptyAttrs == nil {
|
||||
return false
|
||||
}
|
||||
for k := range info.NonEmptyAttrs {
|
||||
if _, ok := r.attrs[k]; ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *Rule) sync() {
|
||||
if !r.updated {
|
||||
return
|
||||
}
|
||||
r.updated = false
|
||||
|
||||
for _, k := range []string{"srcs", "deps"} {
|
||||
if attr, ok := r.attrs[k]; ok {
|
||||
bzl.Walk(attr.RHS, sortExprLabels)
|
||||
}
|
||||
}
|
||||
|
||||
call := r.expr.(*bzl.CallExpr)
|
||||
call.X.(*bzl.Ident).Name = r.kind
|
||||
if len(r.attrs) > 1 {
|
||||
call.ForceMultiLine = true
|
||||
}
|
||||
|
||||
list := make([]bzl.Expr, 0, len(r.args)+len(r.attrs))
|
||||
list = append(list, r.args...)
|
||||
for _, attr := range r.attrs {
|
||||
list = append(list, attr)
|
||||
}
|
||||
sortedAttrs := list[len(r.args):]
|
||||
key := func(e bzl.Expr) string { return e.(*bzl.AssignExpr).LHS.(*bzl.Ident).Name }
|
||||
sort.SliceStable(sortedAttrs, func(i, j int) bool {
|
||||
ki := key(sortedAttrs[i])
|
||||
kj := key(sortedAttrs[j])
|
||||
if cmp := bt.NamePriority[ki] - bt.NamePriority[kj]; cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return ki < kj
|
||||
})
|
||||
|
||||
call.List = list
|
||||
r.updated = false
|
||||
}
|
||||
|
||||
// ShouldKeep returns whether e is marked with a "# keep" comment. Kept
|
||||
// expressions should not be removed or modified.
|
||||
func ShouldKeep(e bzl.Expr) bool {
|
||||
for _, c := range append(e.Comment().Before, e.Comment().Suffix...) {
|
||||
text := strings.TrimSpace(strings.TrimPrefix(c.Token, "#"))
|
||||
if text == "keep" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckInternalVisibility overrides the given visibility if the package is
|
||||
// internal.
|
||||
func CheckInternalVisibility(rel, visibility string) string {
|
||||
if i := strings.LastIndex(rel, "/internal/"); i >= 0 {
|
||||
visibility = fmt.Sprintf("//%s:__subpackages__", rel[:i])
|
||||
} else if strings.HasPrefix(rel, "internal/") {
|
||||
visibility = "//:__subpackages__"
|
||||
}
|
||||
return visibility
|
||||
}
|
||||
|
||||
type byAttrName []KeyValue
|
||||
|
||||
var _ sort.Interface = byAttrName{}
|
||||
|
||||
func (s byAttrName) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s byAttrName) Less(i, j int) bool {
|
||||
if cmp := bt.NamePriority[s[i].Key] - bt.NamePriority[s[j].Key]; cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
return s[i].Key < s[j].Key
|
||||
}
|
||||
|
||||
func (s byAttrName) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
/* Copyright 2017 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
bzl "github.com/bazelbuild/buildtools/build"
|
||||
)
|
||||
|
||||
// sortExprLabels sorts lists of strings using the same order as buildifier.
|
||||
// Buildifier also sorts string lists, but not those involved with "select"
|
||||
// expressions. This function is intended to be used with bzl.Walk.
|
||||
func sortExprLabels(e bzl.Expr, _ []bzl.Expr) {
|
||||
list, ok := e.(*bzl.ListExpr)
|
||||
if !ok || len(list.List) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
keys := make([]stringSortKey, len(list.List))
|
||||
for i, elem := range list.List {
|
||||
s, ok := elem.(*bzl.StringExpr)
|
||||
if !ok {
|
||||
return // don't sort lists unless all elements are strings
|
||||
}
|
||||
keys[i] = makeSortKey(i, s)
|
||||
}
|
||||
|
||||
before := keys[0].x.Comment().Before
|
||||
keys[0].x.Comment().Before = nil
|
||||
sort.Sort(byStringExpr(keys))
|
||||
keys[0].x.Comment().Before = append(before, keys[0].x.Comment().Before...)
|
||||
for i, k := range keys {
|
||||
list.List[i] = k.x
|
||||
}
|
||||
}
|
||||
|
||||
// Code below this point is adapted from
|
||||
// github.com/bazelbuild/buildtools/build/rewrite.go
|
||||
|
||||
// A stringSortKey records information about a single string literal to be
|
||||
// sorted. The strings are first grouped into four phases: most strings,
|
||||
// strings beginning with ":", strings beginning with "//", and strings
|
||||
// beginning with "@". The next significant part of the comparison is the list
|
||||
// of elements in the value, where elements are split at `.' and `:'. Finally
|
||||
// we compare by value and break ties by original index.
|
||||
type stringSortKey struct {
|
||||
phase int
|
||||
split []string
|
||||
value string
|
||||
original int
|
||||
x bzl.Expr
|
||||
}
|
||||
|
||||
func makeSortKey(index int, x *bzl.StringExpr) stringSortKey {
|
||||
key := stringSortKey{
|
||||
value: x.Value,
|
||||
original: index,
|
||||
x: x,
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(x.Value, ":"):
|
||||
key.phase = 1
|
||||
case strings.HasPrefix(x.Value, "//"):
|
||||
key.phase = 2
|
||||
case strings.HasPrefix(x.Value, "@"):
|
||||
key.phase = 3
|
||||
}
|
||||
|
||||
key.split = strings.Split(strings.Replace(x.Value, ":", ".", -1), ".")
|
||||
return key
|
||||
}
|
||||
|
||||
// byStringExpr implements sort.Interface for a list of stringSortKey.
|
||||
type byStringExpr []stringSortKey
|
||||
|
||||
func (x byStringExpr) Len() int { return len(x) }
|
||||
func (x byStringExpr) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byStringExpr) Less(i, j int) bool {
|
||||
xi := x[i]
|
||||
xj := x[j]
|
||||
|
||||
if xi.phase != xj.phase {
|
||||
return xi.phase < xj.phase
|
||||
}
|
||||
for k := 0; k < len(xi.split) && k < len(xj.split); k++ {
|
||||
if xi.split[k] != xj.split[k] {
|
||||
return xi.split[k] < xj.split[k]
|
||||
}
|
||||
}
|
||||
if len(xi.split) != len(xj.split) {
|
||||
return len(xi.split) < len(xj.split)
|
||||
}
|
||||
if xi.value != xj.value {
|
||||
return xi.value < xj.value
|
||||
}
|
||||
return xi.original < xj.original
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
/* Copyright 2018 The Bazel Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rule
|
||||
|
||||
// LoadInfo describes a file that Gazelle knows about and the symbols
|
||||
// it defines.
|
||||
type LoadInfo struct {
|
||||
Name string
|
||||
Symbols []string
|
||||
After []string
|
||||
}
|
||||
|
||||
// KindInfo stores metadata for a kind of rule, for example, "go_library".
|
||||
type KindInfo struct {
|
||||
// MatchAny is true if a rule of this kind may be matched with any rule
|
||||
// of the same kind, regardless of attributes, if exactly one rule is
|
||||
// present a build file.
|
||||
MatchAny bool
|
||||
|
||||
// MatchAttrs is a list of attributes used in matching. For example,
|
||||
// for go_library, this list contains "importpath". Attributes are matched
|
||||
// in order.
|
||||
MatchAttrs []string
|
||||
|
||||
// NonEmptyAttrs is a set of attributes that, if present, disqualify a rule
|
||||
// from being deleted after merge.
|
||||
NonEmptyAttrs map[string]bool
|
||||
|
||||
// SubstituteAttrs is a set of attributes that should be substituted
|
||||
// after matching and before merging. For example, suppose generated rule A
|
||||
// references B via an "embed" attribute, and B matches against rule C.
|
||||
// The label for B in A's "embed" must be substituted with a label for C.
|
||||
// "embed" would need to be in this set.
|
||||
SubstituteAttrs map[string]bool
|
||||
|
||||
// MergeableAttrs is a set of attributes that should be merged before
|
||||
// dependency resolution. See rule.Merge.
|
||||
MergeableAttrs map[string]bool
|
||||
|
||||
// ResolveAttrs is a set of attributes that should be merged after
|
||||
// dependency resolution. See rule.Merge.
|
||||
ResolveAttrs map[string]bool
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue