Compare commits
55 Commits
master
...
v1.8.3-dev
Author | SHA1 | Date |
---|---|---|
|
0a8299cf03 | |
|
1d169b4815 | |
|
f2d355ced2 | |
|
2c6c74e71e | |
|
03e63e04e0 | |
|
9e4232ef27 | |
|
b92aeff384 | |
|
c003e29c68 | |
|
6c69d0d810 | |
|
fdeb7376ea | |
|
412091f86a | |
|
723ac6dfe0 | |
|
bfe0d82e3a | |
|
599782297e | |
|
b1f08b3a28 | |
|
6853aca682 | |
|
2913110c1d | |
|
80b1547a85 | |
|
92b94b9799 | |
|
2f2f70d777 | |
|
ff4906a475 | |
|
84a52d80fd | |
|
5531dd96c4 | |
|
d2f91cb92e | |
|
921e27873a | |
|
39bf8c2e22 | |
|
739b3c65e0 | |
|
c90c78eaee | |
|
5f9e19891c | |
|
a358b5553d | |
|
3417339a88 | |
|
7c1673855e | |
|
76affd1c7c | |
|
c9c752d1cb | |
|
7ce4758061 | |
|
e71fd9d317 | |
|
0121ec1618 | |
|
9ba57954c3 | |
|
b507682cfa | |
|
9e82a2c6c0 | |
|
7be7776cdb | |
|
9e0c6c09aa | |
|
9049b2a1e4 | |
|
2f8e326fc9 | |
|
07aa477ece | |
|
718e1d4712 | |
|
e00132222e | |
|
082a1370fc | |
|
725bce048e | |
|
11dafd49ad | |
|
ba5191db91 | |
|
2d49b7844e | |
|
19510aab27 | |
|
bdaa5e390e | |
|
1c25a26fad |
|
@ -10,11 +10,49 @@ on:
|
|||
jobs:
|
||||
build:
|
||||
name: Build binaries
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
version_major: ${{ steps.build_info.outputs.version_major }}
|
||||
version_minor: ${{ steps.build_info.outputs.version_minor }}
|
||||
version_patch: ${{ steps.build_info.outputs.version_patch }}
|
||||
image_tag: ${{ steps.build_info.outputs.image_tag }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- id: build_info
|
||||
name: Declare build info
|
||||
run: |
|
||||
version_major=''
|
||||
version_minor=''
|
||||
version_patch=''
|
||||
image_tag=''
|
||||
|
||||
branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}
|
||||
ref=${{ github.ref }}
|
||||
if [[ "$ref" =~ 'refs/tags/' ]]; then
|
||||
version=$(sed -E 's/^v([0-9]*\.[0-9]*\.[0-9]*).*$/\1/' <<<$ref )
|
||||
version_major=$(cut -d. -f1 <<<$version)
|
||||
version_minor=$(cut -d. -f2 <<<$version)
|
||||
version_patch=$(cut -d. -f3 <<<$version)
|
||||
image_tag=${{ github.ref_name }}
|
||||
elif [[ "$ref" =~ 'refs/heads/' ]]; then
|
||||
image_tag="${branch}-head"
|
||||
fi
|
||||
|
||||
echo "version_major=${version_major}" >>$GITHUB_OUTPUT
|
||||
echo "version_minor=${version_minor}" >>$GITHUB_OUTPUT
|
||||
echo "version_patch=${version_patch}" >>$GITHUB_OUTPUT
|
||||
echo "image_tag=${image_tag}" >>$GITHUB_OUTPUT
|
||||
|
||||
cat <<EOF
|
||||
version_major=${version_major}
|
||||
version_minor=${version_minor}
|
||||
version_patch=${version_patch}
|
||||
image_tag=${image_tag}
|
||||
EOF
|
||||
|
||||
# Build binaries
|
||||
- name: Run ci
|
||||
run: make ci
|
||||
|
@ -25,45 +63,21 @@ jobs:
|
|||
flags: unittests
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
- name: Upload binaries
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: binaries_artifact
|
||||
path: ./bin/*
|
||||
|
||||
build_push_image:
|
||||
name: Build and push image
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') || startsWith(github.ref, 'refs/tags/') }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: binaries_artifact
|
||||
path: ./bin/
|
||||
|
||||
- name: Add executable permission
|
||||
run: |
|
||||
chmod +x ./bin/*
|
||||
|
||||
- name: Copy bin folder to package
|
||||
run: |
|
||||
cp -r ./bin ./package/
|
||||
|
||||
# For multi-platform support
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Declare branch
|
||||
run: |
|
||||
echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
|
@ -71,24 +85,9 @@ jobs:
|
|||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
# longhornio/longhorn-share-manager image
|
||||
- name: docker-publish
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/') }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: longhornio/longhorn-share-manager:${{ env.branch }}-head
|
||||
file: package/Dockerfile
|
||||
sbom: true
|
||||
|
||||
- name: docker-publish-with-tag
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: longhornio/longhorn-share-manager:${{ github.ref_name }}
|
||||
file: package/Dockerfile
|
||||
sbom: true
|
||||
- name: Build and publish image
|
||||
env:
|
||||
REPO: docker.io/longhornio
|
||||
TAG: ${{ needs.build.outputs.image_tag }}
|
||||
TARGET_PLATFORMS: linux/amd64,linux/arm64
|
||||
run: make workflow-image-build-push
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
FROM registry.suse.com/bci/golang:1.23
|
||||
|
||||
ARG DAPPER_HOST_ARCH
|
||||
ARG SRC_BRANCH=main
|
||||
ARG SRC_TAG
|
||||
ARG http_proxy
|
||||
ARG https_proxy
|
||||
|
||||
ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH}
|
||||
ENV PATH /go/bin:$PATH
|
||||
ENV DAPPER_DOCKER_SOCKET true
|
||||
ENV DAPPER_ENV TAG REPO DRONE_REPO DRONE_PULL_REQUEST DRONE_COMMIT_REF
|
||||
ENV DAPPER_ENV TAG REPO DRONE_REPO DRONE_PULL_REQUEST DRONE_COMMIT_REF ARCHS
|
||||
ENV DAPPER_OUTPUT bin coverage.out
|
||||
ENV DAPPER_RUN_ARGS --privileged -v /dev:/host/dev -v /proc:/host/proc
|
||||
ENV DAPPER_SOURCE /go/src/github.com/longhorn/longhorn-share-manager
|
||||
ENV SRC_BRANCH ${SRC_BRANCH}
|
||||
ENV SRC_TAG ${SRC_TAG}
|
||||
|
||||
ENV GOLANGCI_LINT_VERSION="v1.60.3"
|
||||
|
||||
|
|
25
Makefile
25
Makefile
|
@ -1,4 +1,13 @@
|
|||
PROJECT := longhorn-share-manager
|
||||
TARGETS := $(shell ls scripts)
|
||||
MACHINE := longhorn
|
||||
# Define the target platforms that can be used across the ecosystem.
|
||||
# Note that what would actually be used for a given project will be
|
||||
# defined in TARGET_PLATFORMS, and must be a subset of the below:
|
||||
DEFAULT_PLATFORMS := linux/amd64,linux/arm64
|
||||
|
||||
export SRC_BRANCH := $(shell bash -c 'source <(curl -s "https://raw.githubusercontent.com/longhorn/dep-versions/main/scripts/common.sh") && get_branch')
|
||||
export SRC_TAG := $(shell git tag --points-at HEAD | head -n 1)
|
||||
|
||||
.dapper:
|
||||
@echo Downloading dapper
|
||||
|
@ -10,6 +19,22 @@ TARGETS := $(shell ls scripts)
|
|||
$(TARGETS): .dapper
|
||||
./.dapper $@
|
||||
|
||||
.PHONY: buildx-machine
|
||||
buildx-machine:
|
||||
@docker buildx create --name=$(MACHINE) --platform=$(DEFAULT_PLATFORMS) 2>/dev/null || true
|
||||
docker buildx inspect $(MACHINE)
|
||||
|
||||
# variables needed from GHA caller:
|
||||
# - REPO: image repo, include $registry/$repo_path
|
||||
# - TAG: image tag
|
||||
# - TARGET_PLATFORMS: optional, to be passed for buildx's --platform option
|
||||
# - IID_FILE_FLAG: optional, options to generate image ID file
|
||||
.PHONY: workflow-image-build-push workflow-image-build-push-secure
|
||||
workflow-image-build-push: buildx-machine
|
||||
MACHINE=$(MACHINE) PUSH='true' IMAGE_NAME=$(PROJECT) bash scripts/package
|
||||
workflow-image-build-push-secure: buildx-machine
|
||||
MACHINE=$(MACHINE) PUSH='true' IMAGE_NAME=$(PROJECT) IS_SECURE=true bash scripts/package
|
||||
|
||||
.DEFAULT_GOAL := ci
|
||||
|
||||
.PHONY: $(TARGETS)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
|
||||
"github.com/longhorn/longhorn-share-manager/pkg/rpc"
|
||||
"github.com/longhorn/longhorn-share-manager/pkg/server"
|
||||
"github.com/longhorn/longhorn-share-manager/pkg/types"
|
||||
"github.com/longhorn/longhorn-share-manager/pkg/util"
|
||||
"github.com/longhorn/longhorn-share-manager/pkg/volume"
|
||||
)
|
||||
|
@ -33,6 +35,11 @@ func ServerCmd() cli.Command {
|
|||
Usage: "The volume to export via the nfs server",
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "data-engine",
|
||||
Usage: "The volume data engine",
|
||||
Required: true,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "encrypted",
|
||||
Usage: "signals that a volume is encrypted",
|
||||
|
@ -84,6 +91,7 @@ func ServerCmd() cli.Command {
|
|||
Action: func(c *cli.Context) {
|
||||
vol := volume.Volume{
|
||||
Name: c.String("volume"),
|
||||
DataEngine: c.String("data-engine"),
|
||||
Passphrase: c.String("passphrase"),
|
||||
CryptoKeyCipher: c.String("crytpokeycipher"),
|
||||
CryptoKeyHash: c.String("crytpokeyhash"),
|
||||
|
@ -106,6 +114,11 @@ func ServerCmd() cli.Command {
|
|||
|
||||
func start(vol volume.Volume) error {
|
||||
logger := util.NewLogger()
|
||||
if vol.DataEngine != types.DataEngineTypeV1 && vol.DataEngine != types.DataEngineTypeV2 {
|
||||
logger.Errorf("Invalid data engine value: %s", vol.DataEngine)
|
||||
return fmt.Errorf("invalid data engine value: %s", vol.DataEngine)
|
||||
}
|
||||
|
||||
manager, err := server.NewShareManager(logger, vol)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
100
go.mod
100
go.mod
|
@ -2,7 +2,7 @@ module github.com/longhorn/longhorn-share-manager
|
|||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.4
|
||||
toolchain go1.23.6
|
||||
|
||||
// Replace directives are required for dependencies in this section because:
|
||||
// - This module imports k8s.io/kubernetes.
|
||||
|
@ -19,60 +19,60 @@ toolchain go1.23.4
|
|||
// the portions of k8s.io/kubernetes code this module actually uses, not all of the replace directives may strictly be
|
||||
// necessary. However, it is better to include all of them for consistency.
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.32.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.32.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.32.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.32.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.32.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.32.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.32.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.32.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.32.0
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.32.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.0
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.0
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.32.0
|
||||
k8s.io/kms => k8s.io/kms v0.32.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.32.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.32.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.8
|
||||
k8s.io/metrics => k8s.io/metrics v0.32.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.32.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.0
|
||||
k8s.io/api => k8s.io/api v0.32.3
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.3
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.32.3
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.32.3
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.3
|
||||
k8s.io/client-go => k8s.io/client-go v0.32.3
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.3
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.3
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.32.3
|
||||
k8s.io/component-base => k8s.io/component-base v0.32.3
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.32.3
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.32.3
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.32.3
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.32.3
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.3
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.3
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.32.3
|
||||
k8s.io/kms => k8s.io/kms v0.32.3
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.3
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.3
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.3
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.3
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.32.3
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.32.3
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.14
|
||||
k8s.io/metrics => k8s.io/metrics v0.32.3
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.32.3
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.3
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/fscrypt v0.3.5
|
||||
github.com/longhorn/go-common-libs v0.0.0-20241217170247-b8a12cee7f65
|
||||
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1
|
||||
github.com/mitchellh/go-ps v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/urfave/cli v1.22.16
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/sys v0.28.0
|
||||
google.golang.org/grpc v1.69.0
|
||||
google.golang.org/protobuf v1.36.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
k8s.io/kubernetes v1.32.0
|
||||
k8s.io/mount-utils v0.32.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
github.com/urfave/cli v1.22.17
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/sys v0.31.0
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/kubernetes v1.32.3
|
||||
k8s.io/mount-utils v0.32.3
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
|
@ -85,7 +85,7 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
|
@ -102,12 +102,12 @@ require (
|
|||
github.com/shirou/gopsutil/v3 v3.24.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
116
go.sum
116
go.sum
|
@ -1,10 +1,10 @@
|
|||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g=
|
||||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -41,8 +41,8 @@ github.com/google/fscrypt v0.3.5/go.mod h1:HyY8Z/kUPrnIKAwuhjrn2tSTM5/s9zfRRTqRM
|
|||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -63,10 +63,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20241217170247-b8a12cee7f65 h1:8lLw5EGeU3roroeg+fN6B2LCnm0Z425tWpwbA8vzmZk=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20241217170247-b8a12cee7f65/go.mod h1:LiwSR56HRbeCok7CXYUO3lOwgAW7Spg73qIkM3ZyTJw=
|
||||
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf h1:7TFVWpGl0MB9osrliT+j6+Jdr1cAuR/U2noPx0H9yYY=
|
||||
github.com/longhorn/types v0.0.0-20241217083824-2e0ecb487ccf/go.mod h1:3jHuVDtpkXQzpnp4prguDBskVRric2kmF8aSPkRJ4jw=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814 h1:9qcRV8oIOS2J+QWLIk//BxsTMTaM63LceKCXTPG3f8Y=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814/go.mod h1:c7BqzWJP4iuRwl/Di+OPGIfC18SoIXy5+SokJsmmKMg=
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1 h1:Lox/NlebN9jOc9JXokB270iyeMlyUw9gRePBy5LKwz0=
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1/go.mod h1:3bhH8iUZGZT3kA/B1DYMGzpdzfacqeexOt4SHo4/C2I=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
|
@ -115,43 +115,45 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
|
||||
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ=
|
||||
github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4=
|
||||
golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -161,32 +163,32 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -198,22 +200,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/kubernetes v1.32.0 h1:4BDBWSolqPrv8GC3YfZw0CJvh5kA1TPnoX0FxDVd+qc=
|
||||
k8s.io/kubernetes v1.32.0/go.mod h1:tiIKO63GcdPRBHW2WiUFm3C0eoLczl3f7qi56Dm1W8I=
|
||||
k8s.io/mount-utils v0.32.0 h1:KOQAhPzJICATXnc6XCkWoexKbkOexRnMCUW8APFfwg4=
|
||||
k8s.io/mount-utils v0.32.0/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA=
|
||||
k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k=
|
||||
k8s.io/mount-utils v0.32.3 h1:ZPXXHblfBhYP89OnaozpFg9Ojl6HhDfxBLcdWNkaxW8=
|
||||
k8s.io/mount-utils v0.32.3/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
|
|
|
@ -1,6 +1,21 @@
|
|||
# syntax=docker/dockerfile:1.12.1
|
||||
# syntax=docker/dockerfile:1.13.0
|
||||
FROM registry.suse.com/bci/golang:1.23 AS app_builder
|
||||
|
||||
FROM registry.suse.com/bci/bci-base:15.6 AS build
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the build script and source code
|
||||
COPY . /app
|
||||
|
||||
# Make the build script executable
|
||||
RUN chmod +x /app/scripts/build
|
||||
|
||||
# Run the build script
|
||||
RUN /app/scripts/build
|
||||
|
||||
FROM registry.suse.com/bci/bci-base:15.6 AS lib_builder
|
||||
|
||||
ARG SRC_BRANCH=main
|
||||
ARG SRC_TAG
|
||||
|
||||
RUN zypper -n ref && \
|
||||
zypper update -y
|
||||
|
@ -14,43 +29,30 @@ RUN zypper -n addrepo --refresh https://download.opensuse.org/repositories/syste
|
|||
|
||||
RUN zypper -n install autoconf bison curl cmake doxygen make git gcc14 gcc14-c++ flex Mesa-libGL-devel libdbus-1-3 \
|
||||
nfsidmap-devel liburcu-devel libblkid-devel e2fsprogs e2fsprogs-devel xfsprogs xfsprogs-devel \
|
||||
tar gzip dbus-1-devel lsb-release graphviz-devel libnsl-devel libcurl-devel libjson-c-devel libacl-devel && \
|
||||
tar gzip dbus-1-devel lsb-release graphviz-devel libnsl-devel libcurl-devel libjson-c-devel libacl-devel jq && \
|
||||
rm -rf /var/cache/zypp/*
|
||||
|
||||
RUN curl -L https://github.com/rancher/nfs-ganesha/archive/refs/tags/v6_20241216.tar.gz | tar zx \
|
||||
&& curl -L https://github.com/nfs-ganesha/ntirpc/archive/refs/tags/v6.0.1.tar.gz | tar zx \
|
||||
&& mv nfs-ganesha-6_20241216 nfs-ganesha \
|
||||
&& rm -r nfs-ganesha/src/libntirpc \
|
||||
&& mv ntirpc-6.0.1 nfs-ganesha/src/libntirpc
|
||||
WORKDIR /nfs-ganesha
|
||||
RUN git clone https://github.com/longhorn/dep-versions.git -b ${SRC_BRANCH} /usr/src/dep-versions && \
|
||||
cd /usr/src/dep-versions && \
|
||||
if [ -n "${SRC_TAG}" ] && git show-ref --tags ${SRC_TAG} > /dev/null 2>&1; then \
|
||||
echo "Checking out tag ${SRC_TAG}"; \
|
||||
cd /usr/src/dep-versions && git checkout tags/${SRC_TAG}; \
|
||||
fi
|
||||
|
||||
# build ganesha only supporting nfsv4 and vfs
|
||||
# Set NFS_V4_RECOV_ROOT to /tmp we don't support recovery in this release
|
||||
# we disable dbus (-DUSE_DBUS=OFF) for the single share manager since we don't use dynamic exports
|
||||
ENV CC=/usr/bin/gcc-14 \
|
||||
CXX=/usr/bin/g++-14
|
||||
RUN mkdir -p /usr/local \
|
||||
&& cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_CONFIG=vfs_only \
|
||||
-DUSE_DBUS=OFF -DUSE_NLM=OFF -DUSE_RQUOTA=OFF -DUSE_9P=OFF -D_MSPAC_SUPPORT=OFF -DRPCBIND=OFF \
|
||||
-DUSE_RADOS_RECOV=OFF -DRADOS_URLS=OFF -DUSE_FSAL_VFS=ON -DUSE_FSAL_XFS=OFF \
|
||||
-DUSE_FSAL_PROXY_V4=OFF -DUSE_FSAL_PROXY_V3=OFF -DUSE_FSAL_LUSTRE=OFF -DUSE_FSAL_LIZARDFS=OFF \
|
||||
-DUSE_FSAL_KVSFS=OFF -DUSE_FSAL_CEPH=OFF -DUSE_FSAL_GPFS=OFF -DUSE_FSAL_PANFS=OFF -DUSE_FSAL_GLUSTER=OFF \
|
||||
-DUSE_GSS=NO -DHAVE_ACL_GET_FD_NP=ON -DHAVE_ACL_SET_FD_NP=ON \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/local src/ \
|
||||
&& make \
|
||||
&& make install
|
||||
RUN mkdir -p /ganesha-extra \
|
||||
&& mkdir -p /ganesha-extra/etc/dbus-1/system.d \
|
||||
&& cp src/scripts/ganeshactl/org.ganesha.nfsd.conf /ganesha-extra/etc/dbus-1/system.d/
|
||||
# Build nfs-ganesha
|
||||
RUN export REPO_OVERRIDE="" && \
|
||||
export COMMIT_ID_OVERRIDE="" && \
|
||||
bash /usr/src/dep-versions/scripts/build-nfs-ganesha.sh "${REPO_OVERRIDE}" "${COMMIT_ID_OVERRIDE}"
|
||||
|
||||
FROM registry.suse.com/bci/bci-base:15.6
|
||||
|
||||
FROM registry.suse.com/bci/bci-base:15.6 AS release
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN if [ "$TARGETPLATFORM" != "linux/amd64" ] && [ "$TARGETPLATFORM" != "linux/arm64" ]; then \
|
||||
echo "Error: Unsupported TARGETPLATFORM: $TARGETPLATFORM" && \
|
||||
exit 1; \
|
||||
fi
|
||||
ENV ARCH ${TARGETPLATFORM#linux/}
|
||||
ENV ARCH=${TARGETPLATFORM#linux/}
|
||||
|
||||
RUN zypper -n ref && \
|
||||
zypper update -y
|
||||
|
@ -59,7 +61,8 @@ RUN zypper addrepo --refresh https://download.opensuse.org/repositories/system:/
|
|||
zypper --gpg-auto-import-keys ref
|
||||
|
||||
# RUN microdnf install -y nano tar lsof e2fsprogs fuse-libs libss libblkid userspace-rcu dbus-x11 rpcbind hostname nfs-utils xfsprogs jemalloc libnfsidmap && microdnf clean all
|
||||
RUN zypper -n install rpcbind hostname libblkid1 liburcu6 libjson-c* dbus-1-x11 dbus-1 nfsidmap-devel nfs-kernel-server nfs-client nfs4-acl-tools xfsprogs e2fsprogs awk && \
|
||||
RUN zypper -n install rpcbind hostname libblkid1 liburcu6 libjson-c* dbus-1-x11 dbus-1 nfsidmap-devel \
|
||||
nfs-kernel-server nfs-client nfs4-acl-tools xfsprogs e2fsprogs awk && \
|
||||
rm -rf /var/cache/zypp/*
|
||||
|
||||
RUN mkdir -p /var/run/dbus && mkdir -p /export
|
||||
|
@ -73,9 +76,9 @@ RUN sed -i s/systemd// /etc/nsswitch.conf
|
|||
# ganesha reads /etc/mtab for mounted volumes
|
||||
RUN ln -sf /proc/self/mounts /etc/mtab
|
||||
|
||||
COPY --from=build /usr/local /usr/local/
|
||||
COPY --from=build /ganesha-extra /
|
||||
COPY package/bin/longhorn-share-manager-${ARCH} /longhorn-share-manager
|
||||
COPY --from=lib_builder /usr/local /usr/local/
|
||||
COPY --from=lib_builder /ganesha-extra /
|
||||
COPY --from=app_builder /app/bin/longhorn-share-manager-${ARCH} /longhorn-share-manager
|
||||
|
||||
# run ldconfig after libs have been copied
|
||||
RUN ldconfig
|
||||
|
|
|
@ -29,8 +29,9 @@ func EncryptVolume(devicePath, passphrase, keyCipher, keyHash, keySize, pbkdf st
|
|||
}
|
||||
|
||||
// OpenVolume opens volume so that it can be used by the client.
|
||||
func OpenVolume(volume, devicePath, passphrase string) error {
|
||||
devPath := types.GetVolumeDevicePath(volume, true)
|
||||
// devicePath is the path of the volume on the host that will be opened for instance '/dev/longhorn/volume1'
|
||||
func OpenVolume(volume, dataEngine, devicePath, passphrase string) error {
|
||||
devPath := types.GetVolumeDevicePath(volume, dataEngine, true)
|
||||
if isOpen, _ := IsDeviceOpen(devPath); isOpen {
|
||||
logrus.Debugf("Device %s is already opened at %s", devicePath, devPath)
|
||||
return nil
|
||||
|
@ -42,33 +43,36 @@ func OpenVolume(volume, devicePath, passphrase string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("Opening device %s with LUKS on %s", devicePath, volume)
|
||||
_, err = nsexec.LuksOpen(volume, devicePath, passphrase, lhtypes.LuksTimeout)
|
||||
encryptedDevName := types.GetEncryptVolumeName(volume, dataEngine)
|
||||
logrus.Debugf("Opening device %s with LUKS on %s", devicePath, encryptedDevName)
|
||||
_, err = nsexec.LuksOpen(encryptedDevName, devicePath, passphrase, lhtypes.LuksTimeout)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to open LUKS device %s", devicePath)
|
||||
logrus.WithError(err).Warnf("Failed to open LUKS device %s to %s", devicePath, encryptedDevName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CloseVolume closes encrypted volume so it can be detached.
|
||||
func CloseVolume(volume string) error {
|
||||
func CloseVolume(volume, dataEngine string) error {
|
||||
namespaces := []lhtypes.Namespace{lhtypes.NamespaceMnt, lhtypes.NamespaceIpc}
|
||||
nsexec, err := lhns.NewNamespaceExecutor(lhtypes.ProcessNone, lhtypes.HostProcDirectory, namespaces)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("Closing LUKS device %s", volume)
|
||||
_, err = nsexec.LuksClose(volume, lhtypes.LuksTimeout)
|
||||
deviceName := types.GetEncryptVolumeName(volume, dataEngine)
|
||||
logrus.Debugf("Closing LUKS device %s", deviceName)
|
||||
_, err = nsexec.LuksClose(deviceName, lhtypes.LuksTimeout)
|
||||
return err
|
||||
}
|
||||
|
||||
func ResizeEncryptoDevice(volume, passphrase string) error {
|
||||
devPath := types.GetVolumeDevicePath(volume, true)
|
||||
func ResizeEncryptoDevice(volume, dataEngine, passphrase string) error {
|
||||
// devPath is the full path of the encrypted device on the host that will be resized
|
||||
devPath := types.GetVolumeDevicePath(volume, dataEngine, true)
|
||||
if isOpen, err := IsDeviceOpen(devPath); err != nil {
|
||||
return err
|
||||
} else if !isOpen {
|
||||
return fmt.Errorf("volume %v encrypto device is closed for resizing", volume)
|
||||
return fmt.Errorf("volume %v encrypto device %s is closed for resizing", volume, devPath)
|
||||
}
|
||||
|
||||
namespaces := []lhtypes.Namespace{lhtypes.NamespaceMnt, lhtypes.NamespaceIpc}
|
||||
|
@ -77,7 +81,7 @@ func ResizeEncryptoDevice(volume, passphrase string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
_, err = nsexec.LuksResize(volume, passphrase, lhtypes.LuksTimeout)
|
||||
_, err = nsexec.LuksResize(types.GetEncryptVolumeName(volume, dataEngine), passphrase, lhtypes.LuksTimeout)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -101,6 +105,17 @@ func DeviceEncryptionStatus(devicePath string) (mappedDevice, mapper string, err
|
|||
return devicePath, "", err
|
||||
}
|
||||
|
||||
// Check the mapper device using `cryptsetup status`. Sample output:
|
||||
// /dev/mapper/pvc-e2a1c50a-9409-4afd-9e7d-3c1f5a9afa7f is active and is in use.
|
||||
// type: LUKS2
|
||||
// cipher: aes-xts-plain64
|
||||
// keysize: 256 bits
|
||||
// key location: keyring
|
||||
// device: /dev/sda
|
||||
// sector size: 512
|
||||
// offset: 32768 sectors
|
||||
// size: 110592 sectors
|
||||
// mode: read/write
|
||||
volume := strings.TrimPrefix(devicePath, types.MapperDevPath+"/")
|
||||
stdout, err := nsexec.LuksStatus(volume, lhtypes.LuksTimeout)
|
||||
if err != nil {
|
||||
|
@ -111,7 +126,11 @@ func DeviceEncryptionStatus(devicePath string) (mappedDevice, mapper string, err
|
|||
if len(lines) < 1 {
|
||||
return "", "", fmt.Errorf("device encryption status returned no stdout for %s", devicePath)
|
||||
}
|
||||
if !strings.HasSuffix(lines[0], " is active.") {
|
||||
|
||||
// There are two possible cases to an encrypted mapper device:
|
||||
// - "/path/to/device is active.": the mapper device is activated by `cryptsetup luksOpen`
|
||||
// - "/path/to/device is active and is in use.": the activated device is mounted or exposed (e.g., NFS)
|
||||
if !strings.Contains(lines[0], " is active") {
|
||||
// Implies this is not a LUKS device
|
||||
return devicePath, "", nil
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (s *ShareManagerServer) FilesystemTrim(ctx context.Context, req *smrpc.File
|
|||
}
|
||||
}()
|
||||
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, req.EncryptedDevice)
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, req.EncryptedDevice)
|
||||
if !volume.CheckDeviceValid(devicePath) {
|
||||
return &emptypb.Empty{}, grpcstatus.Errorf(grpccodes.FailedPrecondition, "volume %v is not valid", vol.Name)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func (s *ShareManagerServer) FilesystemResize(ctx context.Context, req *emptypb.
|
|||
}
|
||||
}()
|
||||
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, vol.IsEncrypted())
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, vol.IsEncrypted())
|
||||
if !volume.CheckDeviceValid(devicePath) {
|
||||
return &emptypb.Empty{}, grpcstatus.Errorf(grpccodes.FailedPrecondition, "volume %v is not valid", vol.Name)
|
||||
}
|
||||
|
@ -151,17 +151,18 @@ func (s *ShareManagerServer) FilesystemResize(ctx context.Context, req *emptypb.
|
|||
|
||||
// Note that cryptsetup resize is only necessary for volumes resized while online. For offline, it will happen automatically during 'open'.
|
||||
if vol.IsEncrypted() {
|
||||
diskFormat, err := volume.GetDiskFormat(devicePath)
|
||||
rawDevicePath := types.GetRawVolumeDevicePath(vol.Name)
|
||||
diskFormat, err := volume.GetDiskFormat(rawDevicePath)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, grpcstatus.Errorf(grpccodes.Internal, "failed to determine disk format of volume %v: %v", vol.Name, err)
|
||||
}
|
||||
log.Infof("Device %v contains filesystem of format %v", devicePath, diskFormat)
|
||||
log.WithField("mappedDevice", devicePath).Infof("Encrypted volume device %v contains filesystem of format %v", rawDevicePath, diskFormat)
|
||||
|
||||
if diskFormat != "crypto_LUKS" {
|
||||
return &emptypb.Empty{}, grpcstatus.Errorf(grpccodes.InvalidArgument, "unsupported disk encryption format %v", diskFormat)
|
||||
}
|
||||
|
||||
if err = crypto.ResizeEncryptoDevice(vol.Name, vol.Passphrase); err != nil {
|
||||
if err = crypto.ResizeEncryptoDevice(vol.Name, vol.DataEngine, vol.Passphrase); err != nil {
|
||||
return &emptypb.Empty{}, grpcstatus.Errorf(grpccodes.Internal, "failed to resize crypto device %v for volume %v node expansion: %v", devicePath, vol.Name, err)
|
||||
}
|
||||
}
|
||||
|
@ -309,7 +310,7 @@ func (s *ShareManagerServer) Mount(ctx context.Context, req *emptypb.Empty) (res
|
|||
|
||||
log.Info("Mounting and exporting volume")
|
||||
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, false)
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, false)
|
||||
mountPath := types.GetMountPath(vol.Name)
|
||||
|
||||
defer func() {
|
||||
|
@ -388,6 +389,16 @@ func (s *ShareManagerHealthCheckServer) Watch(req *healthpb.HealthCheckRequest,
|
|||
}
|
||||
}
|
||||
|
||||
func (s *ShareManagerHealthCheckServer) List(context.Context, *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
|
||||
return &healthpb.HealthListResponse{
|
||||
Statuses: map[string]*healthpb.HealthCheckResponse{
|
||||
"grpc": {
|
||||
Status: healthpb.HealthCheckResponse_SERVING,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func nfsServerIsRunning() bool {
|
||||
_, err := util.FindProcessByName("ganesha.nfsd")
|
||||
return err == nil
|
||||
|
|
|
@ -103,7 +103,7 @@ func NewShareManager(logger logrus.FieldLogger, volume volume.Volume) (*ShareMan
|
|||
func (m *ShareManager) Run() error {
|
||||
vol := m.volume
|
||||
mountPath := types.GetMountPath(vol.Name)
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, false)
|
||||
devicePath := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, false)
|
||||
|
||||
defer func() {
|
||||
// if the server is exiting, try to unmount & teardown device before we terminate the container
|
||||
|
@ -201,9 +201,9 @@ func (m *ShareManager) setupDevice(vol volume.Volume, devicePath string) (string
|
|||
}
|
||||
}
|
||||
|
||||
cryptoDevice := types.GetVolumeDevicePath(vol.Name, true)
|
||||
cryptoDevice := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, true)
|
||||
m.logger.Infof("Volume %s requires crypto device %s", vol.Name, cryptoDevice)
|
||||
if err := crypto.OpenVolume(vol.Name, devicePath, vol.Passphrase); err != nil {
|
||||
if err := crypto.OpenVolume(vol.Name, vol.DataEngine, devicePath, vol.Passphrase); err != nil {
|
||||
m.logger.WithError(err).Error("Failed to open encrypted volume")
|
||||
return "", err
|
||||
}
|
||||
|
@ -217,12 +217,12 @@ func (m *ShareManager) setupDevice(vol volume.Volume, devicePath string) (string
|
|||
|
||||
func (m *ShareManager) tearDownDevice(vol volume.Volume) error {
|
||||
// close any matching crypto device for this volume
|
||||
cryptoDevice := types.GetVolumeDevicePath(vol.Name, true)
|
||||
cryptoDevice := types.GetVolumeDevicePath(vol.Name, vol.DataEngine, true)
|
||||
if isOpen, err := crypto.IsDeviceOpen(cryptoDevice); err != nil {
|
||||
return err
|
||||
} else if isOpen {
|
||||
m.logger.Infof("Volume %s has active crypto device %s", vol.Name, cryptoDevice)
|
||||
if err := crypto.CloseVolume(vol.Name); err != nil {
|
||||
if err := crypto.CloseVolume(vol.Name, vol.DataEngine); err != nil {
|
||||
return err
|
||||
}
|
||||
m.logger.Infof("Volume %s closed active crypto device %s", vol.Name, cryptoDevice)
|
||||
|
|
|
@ -12,13 +12,35 @@ const (
|
|||
DevPath = "/dev"
|
||||
MapperDevPath = "/dev/mapper"
|
||||
|
||||
// The suffix for dm device name of the encrypted v2 volume.
|
||||
MapperV2VolumeSuffix = "-encrypted"
|
||||
|
||||
ExportPath = "/export"
|
||||
|
||||
DataEngineTypeV1 = "v1"
|
||||
DataEngineTypeV2 = "v2"
|
||||
)
|
||||
|
||||
func GetVolumeDevicePath(volumeName string, EncryptedDevice bool) string {
|
||||
func GetEncryptVolumeName(volume, dataEngine string) string {
|
||||
if dataEngine == DataEngineTypeV2 {
|
||||
return volume + MapperV2VolumeSuffix
|
||||
}
|
||||
return volume
|
||||
}
|
||||
|
||||
func GetVolumeDevicePath(volumeName, dataEngine string, EncryptedDevice bool) string {
|
||||
if EncryptedDevice {
|
||||
if dataEngine == DataEngineTypeV2 {
|
||||
// v2 volume will use a dm device as default to control IO path when attaching. This dm device will be created with the same name as the volume name.
|
||||
// The encrypted volume will be created with the volume name with "-encrypted" suffix to resolve the naming conflict.
|
||||
return path.Join(MapperDevPath, GetEncryptVolumeName(volumeName, dataEngine))
|
||||
}
|
||||
return path.Join(MapperDevPath, volumeName)
|
||||
}
|
||||
return GetRawVolumeDevicePath(volumeName)
|
||||
}
|
||||
|
||||
func GetRawVolumeDevicePath(volumeName string) string {
|
||||
return filepath.Join(DevPath, "longhorn", volumeName)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
type Volume struct {
|
||||
Name string
|
||||
DataEngine string
|
||||
Passphrase string
|
||||
CryptoKeyCipher string
|
||||
CryptoKeyHash string
|
||||
|
|
|
@ -19,7 +19,23 @@ fi
|
|||
cd $(dirname $0)/..
|
||||
|
||||
mkdir -p bin
|
||||
archs=("amd64" "arm64")
|
||||
for arch in "${archs[@]}"; do
|
||||
CGO_ENABLED=0 GOARCH="$arch" go build -o "bin/longhorn-share-manager-$arch" -ldflags "$LINKFLAGS" $COVER $COVERPKG
|
||||
|
||||
ARCHS=${ARCHS:-''}
|
||||
if [[ -z "${ARCHS}" ]]; then
|
||||
case $(uname -m) in
|
||||
aarch64 | arm64)
|
||||
ARCHS=(arm64)
|
||||
;;
|
||||
x86_64)
|
||||
ARCHS=(amd64)
|
||||
;;
|
||||
*)
|
||||
echo "$(uname -a): unsupported architecture"
|
||||
exit 1
|
||||
esac
|
||||
else
|
||||
IFS=' ' read -r -a ARCHS <<<"${ARCHS}"
|
||||
fi
|
||||
for arch in "${ARCHS[@]}"; do
|
||||
CGO_ENABLED=0 GOARCH="${arch}" go build -o "bin/longhorn-share-manager-${arch}" -ldflags "${LINKFLAGS}" ${COVER} ${COVERPKG}
|
||||
done
|
||||
|
|
|
@ -5,25 +5,92 @@ source $(dirname $0)/version
|
|||
|
||||
cd $(dirname $0)/..
|
||||
|
||||
PROJECT=`basename "$PWD"`
|
||||
PROJECT=$(basename "${PWD}")
|
||||
|
||||
if [ ! -x ./bin/longhorn ]; then
|
||||
./scripts/build
|
||||
command -v buildx >/dev/null && BUILD_CMD=(buildx) || BUILD_CMD=(docker buildx)
|
||||
|
||||
# read configurable parameters
|
||||
REPO=${REPO:-longhornio}
|
||||
IMAGE_NAME=${IMAGE_NAME:-${PROJECT}}
|
||||
TAG=${TAG:-''}
|
||||
PUSH=${PUSH:-'false'}
|
||||
IS_SECURE=${IS_SECURE:-'false'}
|
||||
MACHINE=${MACHINE:-''}
|
||||
TARGET_PLATFORMS=${TARGET_PLATFORMS:-''}
|
||||
IID_FILE=${IID_FILE:-''}
|
||||
IID_FILE_FLAG=${IID_FILE_FLAG:-''}
|
||||
SRC_BRANCH=${SRC_BRANCH:-''}
|
||||
SRC_TAG=${SRC_TAG:-''}
|
||||
|
||||
BUILDER_ARGS=()
|
||||
[[ $MACHINE ]] && BUILDER_ARGS+=('--builder' "$MACHINE")
|
||||
|
||||
BUILDX_ARGS=()
|
||||
|
||||
case $(uname -m) in
|
||||
aarch64 | arm64)
|
||||
BUILDERARCH=arm64
|
||||
;;
|
||||
x86_64)
|
||||
BUILDERARCH=amd64
|
||||
;;
|
||||
*)
|
||||
echo "$(uname -a): unsupported architecture"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
if [[ ${TARGET_PLATFORMS} ]] ; then
|
||||
IFS='/' read -r OS ARCH <<<"${TARGET_PLATFORMS}"
|
||||
BUILDX_ARGS+=('--platform' "${TARGET_PLATFORMS}")
|
||||
else
|
||||
BUILDX_ARGS+=('--platform' "linux/${BUILDERARCH}")
|
||||
fi
|
||||
|
||||
cp -r bin package/
|
||||
if [[ -z $TAG ]]; then
|
||||
if API_VERSION=$(./bin/longhorn-share-manager-"${BUILDERARCH}" version --client-only | jq ".clientVersion.apiVersion"); then
|
||||
TAG="v${API_VERSION}_$(date -u +%Y%m%d)"
|
||||
else
|
||||
TAG="${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
APIVERSION=`./bin/longhorn-share-manager version --client-only|jq ".clientVersion.apiVersion"`
|
||||
TAG=${TAG:-"v${APIVERSION}_`date -u +%Y%m%d`"}
|
||||
REPO=${REPO:-longhornio}
|
||||
IMAGE=${REPO}/${PROJECT}:${TAG}
|
||||
IMAGE="${REPO}/${IMAGE_NAME}:${TAG}"
|
||||
|
||||
# update base image to get latest changes
|
||||
BASE_IMAGE=`grep FROM package/Dockerfile | grep -v AS | awk '{print $2}'`
|
||||
docker pull ${BASE_IMAGE}
|
||||
IFS=' ' read -r -a IID_FILE_ARGS <<<"${IID_FILE_FLAG}"
|
||||
[[ -n "${IID_FILE}" && ${#IID_FILE_ARGS} == 0 ]] && IID_FILE_ARGS=('--iidfile' "${IID_FILE}")
|
||||
|
||||
buildx build --load -t ${IMAGE} -f package/Dockerfile .
|
||||
if [[ "${PUSH}" == 'true' ]]; then
|
||||
BUILDX_ARGS+=('--push')
|
||||
else
|
||||
BUILDX_ARGS+=('--load')
|
||||
fi
|
||||
|
||||
echo Built ${IMAGE}
|
||||
[[ $IS_SECURE == 'true' ]] && BUILDX_ARGS+=('--sbom=true' '--attest' 'type=provenance,mode=max')
|
||||
|
||||
echo ${IMAGE} > ./bin/latest_image
|
||||
IMAGE_ARGS=()
|
||||
[[ -n "${SRC_BRANCH}" ]] && IMAGE_ARGS+=(--build-arg SRC_BRANCH="${SRC_BRANCH}")
|
||||
[[ -n "${SRC_TAG}" ]] && IMAGE_ARGS+=(--build-arg SRC_TAG="${SRC_TAG}")
|
||||
|
||||
# update base IMAGE to get latest changes
|
||||
grep 'FROM.*/' package/Dockerfile | awk '{print $2}' | while read -r STAGE_BASE_IMAGE
|
||||
do
|
||||
docker pull "${STAGE_BASE_IMAGE}"
|
||||
done
|
||||
BASE_IMAGE=$(grep 'FROM.*AS release' package/Dockerfile | awk '{print $2}')
|
||||
|
||||
echo "Building image ${IMAGE} based on ${BASE_IMAGE} with ARCH=${ARCH} SRC_BRANCH=${SRC_BRANCH} SRC_TAG=${SRC_TAG}"
|
||||
IMAGE_BUILD_CMD_ARGS=(
|
||||
build --no-cache \
|
||||
"${BUILDER_ARGS[@]}" \
|
||||
"${IID_FILE_ARGS[@]}" \
|
||||
"${BUILDX_ARGS[@]}" \
|
||||
"${IMAGE_ARGS[@]}" \
|
||||
-t "${IMAGE}" -f package/Dockerfile .
|
||||
)
|
||||
echo "${BUILD_CMD[@]}" "${IMAGE_BUILD_CMD_ARGS[@]}"
|
||||
"${BUILD_CMD[@]}" "${IMAGE_BUILD_CMD_ARGS[@]}"
|
||||
|
||||
echo "Built ${IMAGE}"
|
||||
|
||||
mkdir -p ./bin
|
||||
echo "${IMAGE}" > ./bin/latest_image
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// Package md2man aims in converting markdown into roff (man pages).
|
||||
package md2man
|
||||
|
||||
import (
|
||||
|
|
|
@ -47,13 +47,13 @@ const (
|
|||
tableStart = "\n.TS\nallbox;\n"
|
||||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
tableCellEnd = "\nT}\n"
|
||||
tableCellEnd = "\nT}"
|
||||
tablePreprocessor = `'\" t`
|
||||
)
|
||||
|
||||
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||
// from markdown
|
||||
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||
func NewRoffRenderer() *roffRenderer {
|
||||
return &roffRenderer{}
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
|
|||
node.Parent.Prev.Type == blackfriday.Heading &&
|
||||
node.Parent.Prev.FirstChild != nil &&
|
||||
bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
|
||||
before, after, found := bytes.Cut(node.Literal, []byte(" - "))
|
||||
before, after, found := bytesCut(node.Literal, []byte(" - "))
|
||||
escapeSpecialChars(w, before)
|
||||
if found {
|
||||
out(w, ` \- `)
|
||||
|
@ -316,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
|
|||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
if node.Next == nil && end != tableCellEnd {
|
||||
// Last cell: need to carriage return if we are at the end of the
|
||||
// header row and content isn't wrapped in a "tablecell"
|
||||
if node.Next == nil {
|
||||
// Last cell: need to carriage return if we are at the end of the header row.
|
||||
end += crTag
|
||||
}
|
||||
out(w, end)
|
||||
|
@ -356,7 +355,7 @@ func countColumns(node *blackfriday.Node) int {
|
|||
}
|
||||
|
||||
func out(w io.Writer, output string) {
|
||||
io.WriteString(w, output) // nolint: errcheck
|
||||
io.WriteString(w, output) //nolint:errcheck
|
||||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
|
@ -395,7 +394,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
|||
i++
|
||||
}
|
||||
if i > org {
|
||||
w.Write(text[org:i]) // nolint: errcheck
|
||||
w.Write(text[org:i]) //nolint:errcheck
|
||||
}
|
||||
|
||||
// escape a character
|
||||
|
@ -403,6 +402,15 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
|||
break
|
||||
}
|
||||
|
||||
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||
w.Write([]byte{'\\', text[i]}) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
|
||||
// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
|
||||
// and older. We can remove this once we drop support for go1.17 and older.
|
||||
func bytesCut(s, sep []byte) (before, after []byte, found bool) {
|
||||
if i := bytes.Index(s, sep); i >= 0 {
|
||||
return s[:i], s[i+len(sep):], true
|
||||
}
|
||||
return s, nil, false
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ const (
|
|||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
ttiFunc // func(T, T) int
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
@ -28,11 +29,13 @@ const (
|
|||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
Compare = ttiFunc // func(T, T) int
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
var intType = reflect.TypeOf(0)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
|
@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
|
|||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttiFunc: // func(T, T) int
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
|
|
|
@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
|
|||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
isProtoMessage := func(t reflect.Type) bool {
|
||||
m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
|
||||
return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
|
||||
m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
|
||||
m.Type.Out(0).Name() == "Message"
|
||||
}
|
||||
if isProtoMessage(t) {
|
||||
help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
|
||||
} else if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
} else if t.Comparable() {
|
||||
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
|
||||
|
|
|
@ -122,7 +122,11 @@ func CopyFile(sourcePath, destinationPath string, overWrite bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
defer func() {
|
||||
if errClose := sourceFile.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close source file %v", sourcePath)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = CreateDirectory(filepath.Dir(destinationPath), sourceFileInfo.ModTime())
|
||||
if err != nil {
|
||||
|
@ -133,7 +137,11 @@ func CopyFile(sourcePath, destinationPath string, overWrite bool) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destinationFile.Close()
|
||||
defer func() {
|
||||
if errClose := destinationFile.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close destination file %v", destinationPath)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(destinationFile, sourceFile)
|
||||
if err != nil {
|
||||
|
@ -217,7 +225,11 @@ func SyncFile(filePath string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
defer func() {
|
||||
if errClose := file.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close file %v", filePath)
|
||||
}
|
||||
}()
|
||||
|
||||
return file.Sync()
|
||||
}
|
||||
|
@ -315,7 +327,11 @@ func IsDirectoryEmpty(directory string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
if errClose := f.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close directory %v", directory)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = f.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
package ns
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/longhorn/go-common-libs/types"
|
||||
)
|
||||
|
||||
// LuksOpen runs cryptsetup luksOpen with the given passphrase and
|
||||
// returns the stdout and error.
|
||||
func (nsexec *Executor) LuksOpen(volume, devicePath, passphrase string, timeout time.Duration) (stdout string, err error) {
|
||||
args := []string{"luksOpen", devicePath, volume, "-d", "/dev/stdin"}
|
||||
args := []string{"luksOpen", devicePath, volume, "-d", "-"}
|
||||
return nsexec.CryptsetupWithPassphrase(passphrase, args, timeout)
|
||||
}
|
||||
|
||||
|
@ -29,7 +32,7 @@ func (nsexec *Executor) LuksFormat(devicePath, passphrase, keyCipher, keyHash, k
|
|||
"--hash", keyHash,
|
||||
"--key-size", keySize,
|
||||
"--pbkdf", pbkdf,
|
||||
devicePath, "-d", "/dev/stdin",
|
||||
devicePath, "-d", "-",
|
||||
}
|
||||
return nsexec.CryptsetupWithPassphrase(passphrase, args, timeout)
|
||||
}
|
||||
|
@ -47,6 +50,24 @@ func (nsexec *Executor) LuksStatus(volume string, timeout time.Duration) (stdout
|
|||
return nsexec.Cryptsetup(args, timeout)
|
||||
}
|
||||
|
||||
// IsLuks checks if the device is encrypted with LUKS.
|
||||
func (nsexec *Executor) IsLuks(devicePath string, timeout time.Duration) (bool, error) {
|
||||
args := []string{"isLuks", devicePath}
|
||||
_, err := nsexec.Cryptsetup(args, timeout)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) {
|
||||
if exitErr.ExitCode() == 1 {
|
||||
// The device is not encrypted if exit code of 1 is returned
|
||||
// Ref https://gitlab.com/cryptsetup/cryptsetup/-/blob/main/FAQ.md?plain=1#L2848
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Cryptsetup runs cryptsetup without passphrase. It will return
|
||||
// 0 on success and a non-zero value on error.
|
||||
func (nsexec *Executor) Cryptsetup(args []string, timeout time.Duration) (stdout string, err error) {
|
||||
|
|
|
@ -10,6 +10,31 @@ import (
|
|||
"github.com/longhorn/go-common-libs/types"
|
||||
)
|
||||
|
||||
// GetArch switches to the host namespace and retrieves the system architecture.
|
||||
func GetArch() (string, error) {
|
||||
var err error
|
||||
defer func() {
|
||||
err = errors.Wrap(err, "failed to get system architecture")
|
||||
}()
|
||||
|
||||
fn := func() (interface{}, error) {
|
||||
return sys.GetArch()
|
||||
}
|
||||
|
||||
rawResult, err := RunFunc(fn, 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var result string
|
||||
var ableToCast bool
|
||||
result, ableToCast = rawResult.(string)
|
||||
if !ableToCast {
|
||||
return "", errors.Errorf(types.ErrNamespaceCastResultFmt, result, rawResult)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetKernelRelease switches to the host namespace and retrieves the kernel release.
|
||||
func GetKernelRelease() (string, error) {
|
||||
var err error
|
||||
|
|
|
@ -19,7 +19,31 @@ import (
|
|||
commonio "github.com/longhorn/go-common-libs/io"
|
||||
)
|
||||
|
||||
// GetKernelRelease returns the kernel release string.
|
||||
// GetArch retrieves the system architecture by calling the unix.Uname function
|
||||
// and extracting the architecture information from the Utsname structure.
|
||||
// It returns the architecture as a string and an error if the operation fails.
|
||||
func GetArch() (string, error) {
|
||||
utsname := &unix.Utsname{}
|
||||
if err := unix.Uname(utsname); err != nil {
|
||||
logrus.WithError(err).Warn("Failed to get system architecture")
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract the architecture from the Utsname structure
|
||||
arch := make([]byte, 0, len(utsname.Machine))
|
||||
for _, b := range utsname.Machine {
|
||||
if b == 0x00 {
|
||||
logrus.Trace("Found end of architecture string [0x00]")
|
||||
break
|
||||
}
|
||||
arch = append(arch, byte(b))
|
||||
}
|
||||
return string(arch), nil
|
||||
}
|
||||
|
||||
// GetKernelRelease retrieves the kernel release by calling the unix.Uname function
|
||||
// and extracting the release information from the Utsname structure.
|
||||
// It returns the kernel release as a string and an error if the operation fails.
|
||||
func GetKernelRelease() (string, error) {
|
||||
utsname := &unix.Utsname{}
|
||||
if err := unix.Uname(utsname); err != nil {
|
||||
|
@ -172,12 +196,20 @@ func GetProcKernelConfigMap(procDir string) (configMap map[string]string, err er
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer configFile.Close()
|
||||
defer func() {
|
||||
if errClose := configFile.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close config file %s", configPath)
|
||||
}
|
||||
}()
|
||||
gzReader, err := gzip.NewReader(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gzReader.Close()
|
||||
defer func() {
|
||||
if errClose := gzReader.Close(); errClose != nil {
|
||||
logrus.WithError(errClose).Errorf("Failed to close gzip reader for config file %s", configPath)
|
||||
}
|
||||
}()
|
||||
return parseKernelModuleConfigMap(gzReader)
|
||||
}
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ func RandomID(randomIDLenth int) string {
|
|||
randomIDLenth = types.RandomIDDefaultLength
|
||||
}
|
||||
|
||||
uuid := strings.Replace(UUID(), "-", "", -1)
|
||||
uuid := strings.ReplaceAll(UUID(), "-", "")
|
||||
|
||||
if len(uuid) > randomIDLenth {
|
||||
uuid = uuid[:randomIDLenth]
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
// with type parameters.
|
||||
package constraints
|
||||
|
||||
import "cmp"
|
||||
|
||||
// Signed is a constraint that permits any signed integer type.
|
||||
// If future releases of Go add new predeclared signed integer types,
|
||||
// this constraint will be modified to include them.
|
||||
|
@ -47,6 +49,6 @@ type Complex interface {
|
|||
// this constraint will be modified to include them.
|
||||
//
|
||||
// This type is redundant since Go 1.21 introduced [cmp.Ordered].
|
||||
type Ordered interface {
|
||||
Integer | Float | ~string
|
||||
}
|
||||
//
|
||||
//go:fix inline
|
||||
type Ordered = cmp.Ordered
|
||||
|
|
|
@ -3,29 +3,31 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// cancellation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
// name [context], and migrating to it can be done automatically with [go fix].
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
// Incoming requests to a server should create a [Context], and outgoing
|
||||
// calls to servers should accept a Context. The chain of function
|
||||
// calls between them must propagate the Context, optionally replacing
|
||||
// it with a derived Context created using [WithCancel], [WithDeadline],
|
||||
// [WithTimeout], or [WithValue].
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// explicitly to each function that needs it. This is discussed further in
|
||||
// https://go.dev/blog/context-and-structs. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
|
@ -34,9 +36,30 @@
|
|||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// See https://go.dev/blog/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
//
|
||||
// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Context carries a deadline, a cancellation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// Canceled is the error returned by [Context.Err] when the context is canceled
|
||||
// for some reason other than its deadline passing.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
|
||||
// due to its deadline passing.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
|
@ -49,8 +72,73 @@ func Background() Context {
|
|||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
// parameter).
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
||||
|
||||
var (
|
||||
background = context.Background()
|
||||
todo = context.TODO()
|
||||
)
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// A CancelFunc may be called by multiple goroutines simultaneously.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
||||
|
||||
// WithCancel returns a derived context that points to the parent context
|
||||
// but has a new Done channel. The returned context's Done channel is closed
|
||||
// when the returned cancel function is called or when the parent context's
|
||||
// Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
return context.WithCancel(parent)
|
||||
}
|
||||
|
||||
// WithDeadline returns a derived context that points to the parent context
|
||||
// but has the deadline adjusted to be no later than d. If the parent's
|
||||
// deadline is already earlier than d, WithDeadline(parent, d) is semantically
|
||||
// equivalent to parent. The returned [Context.Done] channel is closed when
|
||||
// the deadline expires, when the returned cancel function is called,
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
|
||||
return context.WithDeadline(parent, d)
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
||||
|
||||
// WithValue returns a derived context that points to the parent Context.
|
||||
// In the derived context, the value associated with key is val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The provided key must be comparable and should not be of type
|
||||
// string or any other built-in type to avoid collisions between
|
||||
// packages using context. Users of WithValue should define their own
|
||||
// types for keys. To avoid allocating when assigning to an
|
||||
// interface{}, context keys often have concrete type
|
||||
// struct{}. Alternatively, exported context key variables' static
|
||||
// type should be a pointer or interface.
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, f
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
|
@ -1,300 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
|
@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
|||
return conf
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// (the net/http Transport).
|
||||
func configFromTransport(h2 *Transport) http2Config {
|
||||
conf := http2Config{
|
||||
|
|
|
@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
|
|||
fillNetHTTPConfig(conf, srv.HTTP2)
|
||||
}
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
|
||||
// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
|
||||
fillNetHTTPConfig(conf, tr.HTTP2)
|
||||
}
|
||||
|
|
|
@ -225,6 +225,11 @@ var fhBytes = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
func invalidHTTP1LookingFrameHeader() FrameHeader {
|
||||
fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
|
||||
return fh
|
||||
}
|
||||
|
||||
// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
|
||||
// Most users should use Framer.ReadFrame instead.
|
||||
func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
|
||||
|
@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
|||
return nil, err
|
||||
}
|
||||
if fh.Length > fr.maxReadSize {
|
||||
if fh == invalidHTTP1LookingFrameHeader() {
|
||||
return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
|
||||
}
|
||||
return nil, ErrFrameTooLarge
|
||||
}
|
||||
payload := fr.getReadBuf(fh.Length)
|
||||
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
||||
if fh == invalidHTTP1LookingFrameHeader() {
|
||||
return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
|
||||
|
|
|
@ -34,11 +34,19 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
disableExtendedConnectProtocol bool
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
inTests bool
|
||||
|
||||
// Enabling extended CONNECT by causes browsers to attempt to use
|
||||
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
|
||||
// package doesn't support extended CONNECT.
|
||||
//
|
||||
// Disable extended CONNECT by default for now.
|
||||
//
|
||||
// Issue #71128.
|
||||
disableExtendedConnectProtocol = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -51,8 +59,8 @@ func init() {
|
|||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
if strings.Contains(e, "http2xconnect=0") {
|
||||
disableExtendedConnectProtocol = true
|
||||
if strings.Contains(e, "http2xconnect=1") {
|
||||
disableExtendedConnectProtocol = false
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
|
|||
s.v = save
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// - a non-empty string starting with '/'
|
||||
// - the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
|
|
|
@ -50,6 +50,7 @@ import (
|
|||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
|
|||
|
||||
func (sc *serverConn) canonicalHeader(v string) string {
|
||||
sc.serveG.check()
|
||||
buildCommonHeaderMapsOnce()
|
||||
cv, ok := commonCanonHeader[v]
|
||||
cv, ok := httpcommon.CachedCanonicalHeader(v)
|
||||
if ok {
|
||||
return cv
|
||||
}
|
||||
|
@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) {
|
|||
|
||||
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
|
||||
if sc.pingSent {
|
||||
sc.vlogf("timeout waiting for PING response")
|
||||
sc.logf("timeout waiting for PING response")
|
||||
if f := sc.countErrorFunc; f != nil {
|
||||
f("conn_close_lost_ping")
|
||||
}
|
||||
sc.conn.Close()
|
||||
return
|
||||
}
|
||||
|
@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
|||
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
|
||||
sc.serveG.check()
|
||||
|
||||
rp := requestParam{
|
||||
method: f.PseudoValue("method"),
|
||||
scheme: f.PseudoValue("scheme"),
|
||||
authority: f.PseudoValue("authority"),
|
||||
path: f.PseudoValue("path"),
|
||||
protocol: f.PseudoValue("protocol"),
|
||||
rp := httpcommon.ServerRequestParam{
|
||||
Method: f.PseudoValue("method"),
|
||||
Scheme: f.PseudoValue("scheme"),
|
||||
Authority: f.PseudoValue("authority"),
|
||||
Path: f.PseudoValue("path"),
|
||||
Protocol: f.PseudoValue("protocol"),
|
||||
}
|
||||
|
||||
// extended connect is disabled, so we should not see :protocol
|
||||
if disableExtendedConnectProtocol && rp.protocol != "" {
|
||||
if disableExtendedConnectProtocol && rp.Protocol != "" {
|
||||
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
isConnect := rp.method == "CONNECT"
|
||||
isConnect := rp.Method == "CONNECT"
|
||||
if isConnect {
|
||||
if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
|
||||
if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
|
||||
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
||||
} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
|
||||
// See 8.1.2.6 Malformed Requests and Responses:
|
||||
//
|
||||
// Malformed requests or responses that are detected
|
||||
|
@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
|||
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
rp.header = make(http.Header)
|
||||
header := make(http.Header)
|
||||
rp.Header = header
|
||||
for _, hf := range f.RegularFields() {
|
||||
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
|
||||
header.Add(sc.canonicalHeader(hf.Name), hf.Value)
|
||||
}
|
||||
if rp.authority == "" {
|
||||
rp.authority = rp.header.Get("Host")
|
||||
if rp.Authority == "" {
|
||||
rp.Authority = header.Get("Host")
|
||||
}
|
||||
if rp.protocol != "" {
|
||||
rp.header.Set(":protocol", rp.protocol)
|
||||
if rp.Protocol != "" {
|
||||
header.Set(":protocol", rp.Protocol)
|
||||
}
|
||||
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
|
||||
|
@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
|||
}
|
||||
bodyOpen := !f.StreamEnded()
|
||||
if bodyOpen {
|
||||
if vv, ok := rp.header["Content-Length"]; ok {
|
||||
if vv, ok := rp.Header["Content-Length"]; ok {
|
||||
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
|
||||
req.ContentLength = int64(cl)
|
||||
} else {
|
||||
|
@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
|||
return rw, req, nil
|
||||
}
|
||||
|
||||
type requestParam struct {
|
||||
method string
|
||||
scheme, authority, path string
|
||||
protocol string
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
|
||||
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
|
||||
sc.serveG.check()
|
||||
|
||||
var tlsState *tls.ConnectionState // nil if not scheme https
|
||||
if rp.scheme == "https" {
|
||||
if rp.Scheme == "https" {
|
||||
tlsState = sc.tlsState
|
||||
}
|
||||
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
rp.header.Del("Expect")
|
||||
}
|
||||
// Merge Cookie headers into one "; "-delimited value.
|
||||
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
|
||||
rp.header.Set("Cookie", strings.Join(cookies, "; "))
|
||||
}
|
||||
|
||||
// Setup Trailers
|
||||
var trailer http.Header
|
||||
for _, v := range rp.header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = http.CanonicalHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
// Ignore.
|
||||
default:
|
||||
if trailer == nil {
|
||||
trailer = make(http.Header)
|
||||
}
|
||||
trailer[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(rp.header, "Trailer")
|
||||
|
||||
var url_ *url.URL
|
||||
var requestURI string
|
||||
if rp.method == "CONNECT" && rp.protocol == "" {
|
||||
url_ = &url.URL{Host: rp.authority}
|
||||
requestURI = rp.authority // mimic HTTP/1 server behavior
|
||||
} else {
|
||||
var err error
|
||||
url_, err = url.ParseRequestURI(rp.path)
|
||||
if err != nil {
|
||||
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
|
||||
}
|
||||
requestURI = rp.path
|
||||
res := httpcommon.NewServerRequest(rp)
|
||||
if res.InvalidReason != "" {
|
||||
return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
body := &requestBody{
|
||||
conn: sc,
|
||||
stream: st,
|
||||
needsContinue: needsContinue,
|
||||
needsContinue: res.NeedsContinue,
|
||||
}
|
||||
req := &http.Request{
|
||||
Method: rp.method,
|
||||
URL: url_,
|
||||
req := (&http.Request{
|
||||
Method: rp.Method,
|
||||
URL: res.URL,
|
||||
RemoteAddr: sc.remoteAddrStr,
|
||||
Header: rp.header,
|
||||
RequestURI: requestURI,
|
||||
Header: rp.Header,
|
||||
RequestURI: res.RequestURI,
|
||||
Proto: "HTTP/2.0",
|
||||
ProtoMajor: 2,
|
||||
ProtoMinor: 0,
|
||||
TLS: tlsState,
|
||||
Host: rp.authority,
|
||||
Host: rp.Authority,
|
||||
Body: body,
|
||||
Trailer: trailer,
|
||||
}
|
||||
req = req.WithContext(st.ctx)
|
||||
|
||||
Trailer: res.Trailer,
|
||||
}).WithContext(st.ctx)
|
||||
rw := sc.newResponseWriter(st, req)
|
||||
return rw, req, nil
|
||||
}
|
||||
|
@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
|||
// we start in "half closed (remote)" for simplicity.
|
||||
// See further comments at the definition of stateHalfClosedRemote.
|
||||
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
|
||||
method: msg.method,
|
||||
scheme: msg.url.Scheme,
|
||||
authority: msg.url.Host,
|
||||
path: msg.url.RequestURI(),
|
||||
header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
|
||||
rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
|
||||
Method: msg.method,
|
||||
Scheme: msg.url.Scheme,
|
||||
Authority: msg.url.Host,
|
||||
Path: msg.url.RequestURI(),
|
||||
Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
|
||||
})
|
||||
if err != nil {
|
||||
// Should not happen, since we've already validated msg.url.
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -35,6 +34,7 @@ import (
|
|||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -375,6 +375,7 @@ type ClientConn struct {
|
|||
doNotReuse bool // whether conn is marked to not be reused for any future requests
|
||||
closing bool
|
||||
closed bool
|
||||
closedOnIdle bool // true if conn was closed for idleness
|
||||
seenSettings bool // true if we've seen a settings frame, false otherwise
|
||||
seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
|
||||
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
|
||||
|
@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
|||
|
||||
// If this connection has never been used for a request and is closed,
|
||||
// then let it take a request (which will fail).
|
||||
// If the conn was closed for idleness, we're racing the idle timer;
|
||||
// don't try to use the conn. (Issue #70515.)
|
||||
//
|
||||
// This avoids a situation where an error early in a connection's lifetime
|
||||
// goes unreported.
|
||||
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
|
||||
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
|
||||
st.canTakeNewRequest = true
|
||||
}
|
||||
|
||||
|
@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() {
|
|||
return
|
||||
}
|
||||
cc.closed = true
|
||||
cc.closedOnIdle = true
|
||||
nextID := cc.nextStreamID
|
||||
// TODO: do clients send GOAWAY too? maybe? Just Close:
|
||||
cc.mu.Unlock()
|
||||
|
@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
|
|||
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
|
||||
var errRequestCanceled = errors.New("net/http: request canceled")
|
||||
|
||||
func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
keys := make([]string, 0, len(req.Trailer))
|
||||
for k := range req.Trailer {
|
||||
k = canonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
return strings.Join(keys, ","), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
||||
if cc.t.t1 != nil {
|
||||
return cc.t.t1.ResponseHeaderTimeout
|
||||
|
@ -1299,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
|
|||
return 0
|
||||
}
|
||||
|
||||
// checkConnHeaders checks whether req has any invalid connection-level headers.
|
||||
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
func checkConnHeaders(req *http.Request) error {
|
||||
if v := req.Header.Get("Upgrade"); v != "" {
|
||||
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
|
||||
}
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// actualContentLength returns a sanitized version of
|
||||
// req.ContentLength, where 0 actually means zero (not unknown) and -1
|
||||
// means unknown.
|
||||
|
@ -1360,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
|||
donec: make(chan struct{}),
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
!cs.isHead {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
cs.requestedGzip = true
|
||||
}
|
||||
cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
|
||||
|
||||
go cs.doRequest(req, streamf)
|
||||
|
||||
|
@ -1492,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
|
|||
cc := cs.cc
|
||||
ctx := cs.ctx
|
||||
|
||||
if err := checkConnHeaders(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for setting frames to be received, a server can change this value later,
|
||||
// but we just wait for the first settings frame
|
||||
var isExtendedConnect bool
|
||||
|
@ -1659,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
|
|||
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
|
||||
// sent by writeRequestBody below, along with any Trailers,
|
||||
// again in form HEADERS{1}, CONTINUATION{0,})
|
||||
trailers, err := commaSeparatedTrailers(req)
|
||||
cc.hbuf.Reset()
|
||||
res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
|
||||
cc.writeHeader(name, value)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasTrailers := trailers != ""
|
||||
contentLen := actualContentLength(req)
|
||||
hasBody := contentLen != 0
|
||||
hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("http2: %w", err)
|
||||
}
|
||||
hdrs := cc.hbuf.Bytes()
|
||||
|
||||
// Write the request.
|
||||
endStream := !hasBody && !hasTrailers
|
||||
endStream := !res.HasBody && !res.HasTrailers
|
||||
cs.sentHeaders = true
|
||||
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
|
||||
traceWroteHeaders(cs.trace)
|
||||
return err
|
||||
}
|
||||
|
||||
func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
|
||||
return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
|
||||
Request: httpcommon.Request{
|
||||
Header: req.Header,
|
||||
Trailer: req.Trailer,
|
||||
URL: req.URL,
|
||||
Host: req.Host,
|
||||
Method: req.Method,
|
||||
ActualContentLength: actualContentLength(req),
|
||||
},
|
||||
AddGzipHeader: addGzipHeader,
|
||||
PeerMaxHeaderListSize: peerMaxHeaderListSize,
|
||||
DefaultUserAgent: defaultUserAgent,
|
||||
}, headerf)
|
||||
}
|
||||
|
||||
// cleanupWriteRequest performs post-request tasks.
|
||||
//
|
||||
// If err (the result of writeRequest) is non-nil and the stream is not closed,
|
||||
|
@ -2066,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
|||
}
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs http.Header) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var errNilRequestURL = errors.New("http2: Request.URI is nil")
|
||||
|
||||
func isNormalConnect(req *http.Request) bool {
|
||||
return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
|
||||
}
|
||||
|
||||
// requires cc.wmu be held.
|
||||
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
if req.URL == nil {
|
||||
return nil, errNilRequestURL
|
||||
}
|
||||
|
||||
host := req.Host
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httpguts.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return nil, errors.New("http2: invalid Host header")
|
||||
}
|
||||
|
||||
var path string
|
||||
if !isNormalConnect(req) {
|
||||
path = req.URL.RequestURI()
|
||||
if !validPseudoPath(path) {
|
||||
orig := path
|
||||
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
|
||||
if !validPseudoPath(path) {
|
||||
if req.URL.Opaque != "" {
|
||||
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid request :path %q", orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
// 8.1.2.3 Request Pseudo-Header Fields
|
||||
// The :path pseudo-header field includes the path and query parts of the
|
||||
// target URI (the path-absolute production and optionally a '?' character
|
||||
// followed by the query production, see Sections 3.3 and 3.4 of
|
||||
// [RFC3986]).
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
if m == "" {
|
||||
m = http.MethodGet
|
||||
}
|
||||
f(":method", m)
|
||||
if !isNormalConnect(req) {
|
||||
f(":path", path)
|
||||
f(":scheme", req.URL.Scheme)
|
||||
}
|
||||
if trailers != "" {
|
||||
f("trailer", trailers)
|
||||
}
|
||||
|
||||
var didUA bool
|
||||
for k, vv := range req.Header {
|
||||
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
|
||||
// Host is :authority, already sent.
|
||||
// Content-Length is automatic, set below.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "connection") ||
|
||||
asciiEqualFold(k, "proxy-connection") ||
|
||||
asciiEqualFold(k, "transfer-encoding") ||
|
||||
asciiEqualFold(k, "upgrade") ||
|
||||
asciiEqualFold(k, "keep-alive") {
|
||||
// Per 8.1.2.2 Connection-Specific Header
|
||||
// Fields, don't send connection-specific
|
||||
// fields. We have already checked if any
|
||||
// are error-worthy so just ignore the rest.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "user-agent") {
|
||||
// Match Go's http1 behavior: at most one
|
||||
// User-Agent. If set to nil or empty string,
|
||||
// then omit it. Otherwise if not mentioned,
|
||||
// include the default (below).
|
||||
didUA = true
|
||||
if len(vv) < 1 {
|
||||
continue
|
||||
}
|
||||
vv = vv[:1]
|
||||
if vv[0] == "" {
|
||||
continue
|
||||
}
|
||||
} else if asciiEqualFold(k, "cookie") {
|
||||
// Per 8.1.2.5 To allow for better compression efficiency, the
|
||||
// Cookie header field MAY be split into separate header fields,
|
||||
// each with one or more cookie-pairs.
|
||||
for _, v := range vv {
|
||||
for {
|
||||
p := strings.IndexByte(v, ';')
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
f("cookie", v[:p])
|
||||
p++
|
||||
// strip space after semicolon if any.
|
||||
for p+1 <= len(v) && v[p] == ' ' {
|
||||
p++
|
||||
}
|
||||
v = v[p:]
|
||||
}
|
||||
if len(v) > 0 {
|
||||
f("cookie", v)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range vv {
|
||||
f(k, v)
|
||||
}
|
||||
}
|
||||
if shouldSendReqContentLength(req.Method, contentLength) {
|
||||
f("content-length", strconv.FormatInt(contentLength, 10))
|
||||
}
|
||||
if addGzipHeader {
|
||||
f("accept-encoding", "gzip")
|
||||
}
|
||||
if !didUA {
|
||||
f("user-agent", defaultUserAgent)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a first pass over the headers counting bytes to ensure
|
||||
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
|
||||
// separate pass before encoding the headers to prevent
|
||||
// modifying the hpack state.
|
||||
hlSize := uint64(0)
|
||||
enumerateHeaders(func(name, value string) {
|
||||
hf := hpack.HeaderField{Name: name, Value: value}
|
||||
hlSize += uint64(hf.Size())
|
||||
})
|
||||
|
||||
if hlSize > cc.peerMaxHeaderListSize {
|
||||
return nil, errRequestHeaderListSize
|
||||
}
|
||||
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
traceHeaders := traceHasWroteHeaderField(trace)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := lowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
return
|
||||
}
|
||||
cc.writeHeader(name, value)
|
||||
if traceHeaders {
|
||||
traceWroteHeaderField(trace, name, value)
|
||||
}
|
||||
})
|
||||
|
||||
return cc.hbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
// shouldSendReqContentLength reports whether the http2.Transport should send
|
||||
// a "content-length" request header. This logic is basically a copy of the net/http
|
||||
// transferWriter.shouldSendContentLength.
|
||||
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
|
||||
// -1 means unknown.
|
||||
func shouldSendReqContentLength(method string, contentLength int64) bool {
|
||||
if contentLength > 0 {
|
||||
return true
|
||||
}
|
||||
if contentLength < 0 {
|
||||
return false
|
||||
}
|
||||
// For zero bodies, whether we send a content-length depends on the method.
|
||||
// It also kinda doesn't matter for http2 either way, with END_STREAM.
|
||||
switch method {
|
||||
case "POST", "PUT", "PATCH":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// requires cc.wmu be held.
|
||||
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
|
@ -2294,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
|
|||
}
|
||||
|
||||
for k, vv := range trailer {
|
||||
lowKey, ascii := lowerHeader(k)
|
||||
lowKey, ascii := httpcommon.LowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
|
@ -2434,9 +2184,12 @@ func (rl *clientConnReadLoop) cleanup() {
|
|||
// This avoids a situation where new connections are constantly created,
|
||||
// added to the pool, fail, and are removed from the pool, without any error
|
||||
// being surfaced to the user.
|
||||
const unusedWaitTime = 5 * time.Second
|
||||
unusedWaitTime := 5 * time.Second
|
||||
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
|
||||
unusedWaitTime = cc.idleTimeout
|
||||
}
|
||||
idleTime := cc.t.now().Sub(cc.lastActive)
|
||||
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
|
||||
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
|
||||
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
|
||||
cc.t.connPool().MarkDead(cc)
|
||||
})
|
||||
|
@ -2457,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
|
|||
}
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
|
||||
if !cc.seenSettings {
|
||||
// If we have a pending request that wants extended CONNECT,
|
||||
// let it continue and fail with the connection error.
|
||||
cc.extendedConnectAllowed = true
|
||||
close(cc.seenSettingsChan)
|
||||
}
|
||||
}
|
||||
|
||||
// countReadFrameError calls Transport.CountError with a string
|
||||
|
@ -2549,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
|
|||
if VerboseLogs {
|
||||
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
|
||||
}
|
||||
if !cc.seenSettings {
|
||||
close(cc.seenSettingsChan)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -2646,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
|||
Status: status + " " + http.StatusText(statusCode),
|
||||
}
|
||||
for _, hf := range regularFields {
|
||||
key := canonicalHeader(hf.Name)
|
||||
key := httpcommon.CanonicalHeader(hf.Name)
|
||||
if key == "Trailer" {
|
||||
t := res.Trailer
|
||||
if t == nil {
|
||||
|
@ -2654,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
|||
res.Trailer = t
|
||||
}
|
||||
foreachHeaderElement(hf.Value, func(v string) {
|
||||
t[canonicalHeader(v)] = nil
|
||||
t[httpcommon.CanonicalHeader(v)] = nil
|
||||
})
|
||||
} else {
|
||||
vv := header[key]
|
||||
|
@ -2778,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
|
|||
|
||||
trailer := make(http.Header)
|
||||
for _, hf := range f.RegularFields() {
|
||||
key := canonicalHeader(hf.Name)
|
||||
key := httpcommon.CanonicalHeader(hf.Name)
|
||||
trailer[key] = append(trailer[key], hf.Value)
|
||||
}
|
||||
cs.trailer = trailer
|
||||
|
@ -3324,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
|
|||
|
||||
var (
|
||||
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
|
||||
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
|
||||
errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
|
||||
)
|
||||
|
||||
func (cc *ClientConn) logf(format string, args ...interface{}) {
|
||||
|
@ -3508,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
|
|||
}
|
||||
}
|
||||
|
||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
|
@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
|||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k, ascii := lowerHeader(k)
|
||||
k, ascii := httpcommon.LowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpcommon
|
||||
|
||||
import "strings"
|
||||
|
||||
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
|
||||
// contains helper functions which may use Unicode-aware functions which would
|
||||
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
|
||||
|
||||
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
||||
// are equal, ASCII-case-insensitively.
|
||||
func asciiEqualFold(s, t string) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if lower(s[i]) != lower(t[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lower returns the ASCII lowercase version of b.
|
||||
func lower(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// isASCIIPrint returns whether s is ASCII and printable according to
|
||||
// https://tools.ietf.org/html/rfc20#section-4.2.
|
||||
func isASCIIPrint(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] < ' ' || s[i] > '~' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
|
||||
// and whether or not it was.
|
||||
func asciiToLower(s string) (lower string, ok bool) {
|
||||
if !isASCIIPrint(s) {
|
||||
return "", false
|
||||
}
|
||||
return strings.ToLower(s), true
|
||||
}
|
|
@ -1,11 +1,11 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -82,13 +82,15 @@ func buildCommonHeaderMaps() {
|
|||
commonLowerHeader = make(map[string]string, len(common))
|
||||
commonCanonHeader = make(map[string]string, len(common))
|
||||
for _, v := range common {
|
||||
chk := http.CanonicalHeaderKey(v)
|
||||
chk := textproto.CanonicalMIMEHeaderKey(v)
|
||||
commonLowerHeader[chk] = v
|
||||
commonCanonHeader[v] = chk
|
||||
}
|
||||
}
|
||||
|
||||
func lowerHeader(v string) (lower string, ascii bool) {
|
||||
// LowerHeader returns the lowercase form of a header name,
|
||||
// used on the wire for HTTP/2 and HTTP/3 requests.
|
||||
func LowerHeader(v string) (lower string, ascii bool) {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonLowerHeader[v]; ok {
|
||||
return s, true
|
||||
|
@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
|
|||
return asciiToLower(v)
|
||||
}
|
||||
|
||||
func canonicalHeader(v string) string {
|
||||
// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
|
||||
func CanonicalHeader(v string) string {
|
||||
buildCommonHeaderMapsOnce()
|
||||
if s, ok := commonCanonHeader[v]; ok {
|
||||
return s
|
||||
}
|
||||
return http.CanonicalHeaderKey(v)
|
||||
return textproto.CanonicalMIMEHeaderKey(v)
|
||||
}
|
||||
|
||||
// CachedCanonicalHeader returns the canonical form of a well-known header name.
|
||||
func CachedCanonicalHeader(v string) (string, bool) {
|
||||
buildCommonHeaderMapsOnce()
|
||||
s, ok := commonCanonHeader[v]
|
||||
return s, ok
|
||||
}
|
|
@ -0,0 +1,467 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpcommon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
|
||||
)
|
||||
|
||||
// Request is a subset of http.Request.
|
||||
// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
|
||||
// without creating a dependency cycle.
|
||||
type Request struct {
|
||||
URL *url.URL
|
||||
Method string
|
||||
Host string
|
||||
Header map[string][]string
|
||||
Trailer map[string][]string
|
||||
ActualContentLength int64 // 0 means 0, -1 means unknown
|
||||
}
|
||||
|
||||
// EncodeHeadersParam is parameters to EncodeHeaders.
|
||||
type EncodeHeadersParam struct {
|
||||
Request Request
|
||||
|
||||
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
|
||||
// added to the request.
|
||||
AddGzipHeader bool
|
||||
|
||||
// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
|
||||
PeerMaxHeaderListSize uint64
|
||||
|
||||
// DefaultUserAgent is the User-Agent header to send when the request
|
||||
// neither contains a User-Agent nor disables it.
|
||||
DefaultUserAgent string
|
||||
}
|
||||
|
||||
// EncodeHeadersParam is the result of EncodeHeaders.
|
||||
type EncodeHeadersResult struct {
|
||||
HasBody bool
|
||||
HasTrailers bool
|
||||
}
|
||||
|
||||
// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
|
||||
// It validates a request and calls headerf with each pseudo-header and header
|
||||
// for the request.
|
||||
// The headerf function is called with the validated, canonicalized header name.
|
||||
func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
|
||||
req := param.Request
|
||||
|
||||
// Check for invalid connection-level headers.
|
||||
if err := checkConnHeaders(req.Header); err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
if req.URL == nil {
|
||||
return res, errors.New("Request.URL is nil")
|
||||
}
|
||||
|
||||
host := req.Host
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httpguts.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return res, errors.New("invalid Host header")
|
||||
}
|
||||
|
||||
// isNormalConnect is true if this is a non-extended CONNECT request.
|
||||
isNormalConnect := false
|
||||
var protocol string
|
||||
if vv := req.Header[":protocol"]; len(vv) > 0 {
|
||||
protocol = vv[0]
|
||||
}
|
||||
if req.Method == "CONNECT" && protocol == "" {
|
||||
isNormalConnect = true
|
||||
} else if protocol != "" && req.Method != "CONNECT" {
|
||||
return res, errors.New("invalid :protocol header in non-CONNECT request")
|
||||
}
|
||||
|
||||
// Validate the path, except for non-extended CONNECT requests which have no path.
|
||||
var path string
|
||||
if !isNormalConnect {
|
||||
path = req.URL.RequestURI()
|
||||
if !validPseudoPath(path) {
|
||||
orig := path
|
||||
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
|
||||
if !validPseudoPath(path) {
|
||||
if req.URL.Opaque != "" {
|
||||
return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
|
||||
} else {
|
||||
return res, fmt.Errorf("invalid request :path %q", orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any invalid headers+trailers and return an error before we
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
if err := validateHeaders(req.Header); err != "" {
|
||||
return res, fmt.Errorf("invalid HTTP header %s", err)
|
||||
}
|
||||
if err := validateHeaders(req.Trailer); err != "" {
|
||||
return res, fmt.Errorf("invalid HTTP trailer %s", err)
|
||||
}
|
||||
|
||||
trailers, err := commaSeparatedTrailers(req.Trailer)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
enumerateHeaders := func(f func(name, value string)) {
|
||||
// 8.1.2.3 Request Pseudo-Header Fields
|
||||
// The :path pseudo-header field includes the path and query parts of the
|
||||
// target URI (the path-absolute production and optionally a '?' character
|
||||
// followed by the query production, see Sections 3.3 and 3.4 of
|
||||
// [RFC3986]).
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
if m == "" {
|
||||
m = "GET"
|
||||
}
|
||||
f(":method", m)
|
||||
if !isNormalConnect {
|
||||
f(":path", path)
|
||||
f(":scheme", req.URL.Scheme)
|
||||
}
|
||||
if protocol != "" {
|
||||
f(":protocol", protocol)
|
||||
}
|
||||
if trailers != "" {
|
||||
f("trailer", trailers)
|
||||
}
|
||||
|
||||
var didUA bool
|
||||
for k, vv := range req.Header {
|
||||
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
|
||||
// Host is :authority, already sent.
|
||||
// Content-Length is automatic, set below.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "connection") ||
|
||||
asciiEqualFold(k, "proxy-connection") ||
|
||||
asciiEqualFold(k, "transfer-encoding") ||
|
||||
asciiEqualFold(k, "upgrade") ||
|
||||
asciiEqualFold(k, "keep-alive") {
|
||||
// Per 8.1.2.2 Connection-Specific Header
|
||||
// Fields, don't send connection-specific
|
||||
// fields. We have already checked if any
|
||||
// are error-worthy so just ignore the rest.
|
||||
continue
|
||||
} else if asciiEqualFold(k, "user-agent") {
|
||||
// Match Go's http1 behavior: at most one
|
||||
// User-Agent. If set to nil or empty string,
|
||||
// then omit it. Otherwise if not mentioned,
|
||||
// include the default (below).
|
||||
didUA = true
|
||||
if len(vv) < 1 {
|
||||
continue
|
||||
}
|
||||
vv = vv[:1]
|
||||
if vv[0] == "" {
|
||||
continue
|
||||
}
|
||||
} else if asciiEqualFold(k, "cookie") {
|
||||
// Per 8.1.2.5 To allow for better compression efficiency, the
|
||||
// Cookie header field MAY be split into separate header fields,
|
||||
// each with one or more cookie-pairs.
|
||||
for _, v := range vv {
|
||||
for {
|
||||
p := strings.IndexByte(v, ';')
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
f("cookie", v[:p])
|
||||
p++
|
||||
// strip space after semicolon if any.
|
||||
for p+1 <= len(v) && v[p] == ' ' {
|
||||
p++
|
||||
}
|
||||
v = v[p:]
|
||||
}
|
||||
if len(v) > 0 {
|
||||
f("cookie", v)
|
||||
}
|
||||
}
|
||||
continue
|
||||
} else if k == ":protocol" {
|
||||
// :protocol pseudo-header was already sent above.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range vv {
|
||||
f(k, v)
|
||||
}
|
||||
}
|
||||
if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
|
||||
f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
|
||||
}
|
||||
if param.AddGzipHeader {
|
||||
f("accept-encoding", "gzip")
|
||||
}
|
||||
if !didUA {
|
||||
f("user-agent", param.DefaultUserAgent)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a first pass over the headers counting bytes to ensure
|
||||
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
|
||||
// separate pass before encoding the headers to prevent
|
||||
// modifying the hpack state.
|
||||
if param.PeerMaxHeaderListSize > 0 {
|
||||
hlSize := uint64(0)
|
||||
enumerateHeaders(func(name, value string) {
|
||||
hf := hpack.HeaderField{Name: name, Value: value}
|
||||
hlSize += uint64(hf.Size())
|
||||
})
|
||||
|
||||
if hlSize > param.PeerMaxHeaderListSize {
|
||||
return res, ErrRequestHeaderListSize
|
||||
}
|
||||
}
|
||||
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name, ascii := LowerHeader(name)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
return
|
||||
}
|
||||
|
||||
headerf(name, value)
|
||||
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(name, []string{value})
|
||||
}
|
||||
})
|
||||
|
||||
res.HasBody = req.ActualContentLength != 0
|
||||
res.HasTrailers = trailers != ""
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
|
||||
// for a request.
|
||||
func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !disableCompression &&
|
||||
len(header["Accept-Encoding"]) == 0 &&
|
||||
len(header["Range"]) == 0 &&
|
||||
method != "HEAD" {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkConnHeaders checks whether req has any invalid connection-level headers.
|
||||
//
|
||||
// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
|
||||
//
|
||||
// Certain headers are special-cased as okay but not transmitted later.
|
||||
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
|
||||
func checkConnHeaders(h map[string][]string) error {
|
||||
if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("invalid Upgrade request header: %q", vv)
|
||||
}
|
||||
if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
|
||||
return fmt.Errorf("invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
|
||||
keys := make([]string, 0, len(trailer))
|
||||
for k := range trailer {
|
||||
k = CanonicalHeader(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
return strings.Join(keys, ","), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||
// value. It must be either:
|
||||
//
|
||||
// - a non-empty string starting with '/'
|
||||
// - the string '*', for OPTIONS requests.
|
||||
//
|
||||
// For now this is only used a quick check for deciding when to clean
|
||||
// up Opaque URLs before sending requests from the Transport.
|
||||
// See golang.org/issue/16847
|
||||
//
|
||||
// We used to enforce that the path also didn't start with "//", but
|
||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||
// that part of the spec. See golang.org/issue/19103.
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
func validateHeaders(hdrs map[string][]string) string {
|
||||
for k, vv := range hdrs {
|
||||
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
|
||||
return fmt.Sprintf("name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// Don't include the value in the error,
|
||||
// because it may be sensitive.
|
||||
return fmt.Sprintf("value for header %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// shouldSendReqContentLength reports whether we should send
|
||||
// a "content-length" request header. This logic is basically a copy of the net/http
|
||||
// transferWriter.shouldSendContentLength.
|
||||
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
|
||||
// -1 means unknown.
|
||||
func shouldSendReqContentLength(method string, contentLength int64) bool {
|
||||
if contentLength > 0 {
|
||||
return true
|
||||
}
|
||||
if contentLength < 0 {
|
||||
return false
|
||||
}
|
||||
// For zero bodies, whether we send a content-length depends on the method.
|
||||
// It also kinda doesn't matter for http2 either way, with END_STREAM.
|
||||
switch method {
|
||||
case "POST", "PUT", "PATCH":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ServerRequestParam is parameters to NewServerRequest.
|
||||
type ServerRequestParam struct {
|
||||
Method string
|
||||
Scheme, Authority, Path string
|
||||
Protocol string
|
||||
Header map[string][]string
|
||||
}
|
||||
|
||||
// ServerRequestResult is the result of NewServerRequest.
|
||||
type ServerRequestResult struct {
|
||||
// Various http.Request fields.
|
||||
URL *url.URL
|
||||
RequestURI string
|
||||
Trailer map[string][]string
|
||||
|
||||
NeedsContinue bool // client provided an "Expect: 100-continue" header
|
||||
|
||||
// If the request should be rejected, this is a short string suitable for passing
|
||||
// to the http2 package's CountError function.
|
||||
// It might be a bit odd to return errors this way rather than returing an error,
|
||||
// but this ensures we don't forget to include a CountError reason.
|
||||
InvalidReason string
|
||||
}
|
||||
|
||||
func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
delete(rp.Header, "Expect")
|
||||
}
|
||||
// Merge Cookie headers into one "; "-delimited value.
|
||||
if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
|
||||
rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
|
||||
}
|
||||
|
||||
// Setup Trailers
|
||||
var trailer map[string][]string
|
||||
for _, v := range rp.Header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
// Ignore.
|
||||
default:
|
||||
if trailer == nil {
|
||||
trailer = make(map[string][]string)
|
||||
}
|
||||
trailer[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(rp.Header, "Trailer")
|
||||
|
||||
// "':authority' MUST NOT include the deprecated userinfo subcomponent
|
||||
// for "http" or "https" schemed URIs."
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
|
||||
if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
|
||||
return ServerRequestResult{
|
||||
InvalidReason: "userinfo_in_authority",
|
||||
}
|
||||
}
|
||||
|
||||
var url_ *url.URL
|
||||
var requestURI string
|
||||
if rp.Method == "CONNECT" && rp.Protocol == "" {
|
||||
url_ = &url.URL{Host: rp.Authority}
|
||||
requestURI = rp.Authority // mimic HTTP/1 server behavior
|
||||
} else {
|
||||
var err error
|
||||
url_, err = url.ParseRequestURI(rp.Path)
|
||||
if err != nil {
|
||||
return ServerRequestResult{
|
||||
InvalidReason: "bad_path",
|
||||
}
|
||||
}
|
||||
requestURI = rp.Path
|
||||
}
|
||||
|
||||
return ServerRequestResult{
|
||||
URL: url_,
|
||||
NeedsContinue: needsContinue,
|
||||
RequestURI: requestURI,
|
||||
Trailer: trailer,
|
||||
}
|
||||
}
|
|
@ -5,15 +5,6 @@
|
|||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
Or you can manually git clone the repository to
|
||||
`$(go env GOPATH)/src/golang.org/x/oauth2`.
|
||||
|
||||
See pkg.go.dev for further documentation and examples.
|
||||
|
||||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||
|
@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at
|
|||
https://github.com/golang/oauth2/issues.
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html. In particular:
|
||||
this repository, see https://go.dev/doc/contribute.
|
||||
|
||||
The git repository is https://go.googlesource.com/oauth2.
|
||||
|
||||
Note:
|
||||
|
||||
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
||||
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
||||
|
|
|
@ -56,7 +56,7 @@ type Config struct {
|
|||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
// Scopes specifies optional requested permissions.
|
||||
Scopes []string
|
||||
|
||||
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is
|
||||
|
@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
|
|||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
return tk, nil
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
|
@ -356,11 +356,15 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
|||
if src == nil {
|
||||
return internal.ContextClient(ctx)
|
||||
}
|
||||
cc := internal.ContextClient(ctx)
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextClient(ctx).Transport,
|
||||
Base: cc.Transport,
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
CheckRedirect: cc.CheckRedirect,
|
||||
Jar: cc.Jar,
|
||||
Timeout: cc.Timeout,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ const (
|
|||
//
|
||||
// A fresh verifier should be generated for each authorization.
|
||||
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
|
||||
// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAccessToken).
|
||||
func GenerateVerifier() string {
|
||||
// "RECOMMENDED that the output of a suitable random number generator be
|
||||
|
@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string {
|
|||
}
|
||||
|
||||
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
|
||||
// only.
|
||||
func S256ChallengeOption(verifier string) AuthCodeOption {
|
||||
return challengeOption{
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:linkname runtime_getAuxv runtime.getAuxv
|
||||
func runtime_getAuxv() []uintptr
|
||||
|
||||
// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
|
||||
// The returned slice is always a fresh copy, owned by the caller.
|
||||
// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
|
||||
// which happens in some locked-down environments and build modes.
|
||||
func Auxv() ([][2]uintptr, error) {
|
||||
vec := runtime_getAuxv()
|
||||
vecLen := len(vec)
|
||||
|
||||
if vecLen == 0 {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
if vecLen%2 != 0 {
|
||||
return nil, syscall.EINVAL
|
||||
}
|
||||
|
||||
result := make([]uintptr, vecLen)
|
||||
copy(result, vec)
|
||||
return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
func Auxv() ([][2]uintptr, error) {
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
|
@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
|||
return sendfile(outfd, infd, offset, count)
|
||||
}
|
||||
|
||||
func Dup3(oldfd, newfd, flags int) error {
|
||||
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
|
||||
return EINVAL
|
||||
}
|
||||
how := F_DUP2FD
|
||||
if flags&O_CLOEXEC != 0 {
|
||||
how = F_DUP2FD_CLOEXEC
|
||||
}
|
||||
_, err := fcntl(oldfd, how, newfd)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* Exposed directly
|
||||
*/
|
||||
|
|
|
@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
|
|||
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
|
||||
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
|
||||
}
|
||||
|
||||
// Ucred Helpers
|
||||
// See ucred(3c) and getpeerucred(3c)
|
||||
|
||||
//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
|
||||
//sys ucredFree(ucred uintptr) = ucred_free
|
||||
//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
|
||||
//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
|
||||
//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
|
||||
//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
|
||||
//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
|
||||
//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
|
||||
//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
|
||||
//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
|
||||
|
||||
// Ucred is an opaque struct that holds user credentials.
|
||||
type Ucred struct {
|
||||
ucred uintptr
|
||||
}
|
||||
|
||||
// We need to ensure that ucredFree is called on the underlying ucred
|
||||
// when the Ucred is garbage collected.
|
||||
func ucredFinalizer(u *Ucred) {
|
||||
ucredFree(u.ucred)
|
||||
}
|
||||
|
||||
func GetPeerUcred(fd uintptr) (*Ucred, error) {
|
||||
var ucred uintptr
|
||||
err := getpeerucred(fd, &ucred)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &Ucred{
|
||||
ucred: ucred,
|
||||
}
|
||||
// set the finalizer on the result so that the ucred will be freed
|
||||
runtime.SetFinalizer(result, ucredFinalizer)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func UcredGet(pid int) (*Ucred, error) {
|
||||
ucred, err := ucredGet(pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &Ucred{
|
||||
ucred: ucred,
|
||||
}
|
||||
// set the finalizer on the result so that the ucred will be freed
|
||||
runtime.SetFinalizer(result, ucredFinalizer)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (u *Ucred) Geteuid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGeteuid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getruid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetruid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getsuid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetsuid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getegid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetegid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getrgid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetrgid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getsgid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetsgid(u.ucred)
|
||||
}
|
||||
|
||||
func (u *Ucred) Getpid() int {
|
||||
defer runtime.KeepAlive(u)
|
||||
return ucredGetpid(u.ucred)
|
||||
}
|
||||
|
|
|
@ -1245,6 +1245,7 @@ const (
|
|||
FAN_REPORT_DFID_NAME = 0xc00
|
||||
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
|
||||
FAN_REPORT_DIR_FID = 0x400
|
||||
FAN_REPORT_FD_ERROR = 0x2000
|
||||
FAN_REPORT_FID = 0x200
|
||||
FAN_REPORT_NAME = 0x800
|
||||
FAN_REPORT_PIDFD = 0x80
|
||||
|
@ -1330,8 +1331,10 @@ const (
|
|||
FUSE_SUPER_MAGIC = 0x65735546
|
||||
FUTEXFS_SUPER_MAGIC = 0xbad1dea
|
||||
F_ADD_SEALS = 0x409
|
||||
F_CREATED_QUERY = 0x404
|
||||
F_DUPFD = 0x0
|
||||
F_DUPFD_CLOEXEC = 0x406
|
||||
F_DUPFD_QUERY = 0x403
|
||||
F_EXLCK = 0x4
|
||||
F_GETFD = 0x1
|
||||
F_GETFL = 0x3
|
||||
|
@ -1551,6 +1554,7 @@ const (
|
|||
IPPROTO_ROUTING = 0x2b
|
||||
IPPROTO_RSVP = 0x2e
|
||||
IPPROTO_SCTP = 0x84
|
||||
IPPROTO_SMC = 0x100
|
||||
IPPROTO_TCP = 0x6
|
||||
IPPROTO_TP = 0x1d
|
||||
IPPROTO_UDP = 0x11
|
||||
|
@ -1623,6 +1627,8 @@ const (
|
|||
IPV6_UNICAST_IF = 0x4c
|
||||
IPV6_USER_FLOW = 0xe
|
||||
IPV6_V6ONLY = 0x1a
|
||||
IPV6_VERSION = 0x60
|
||||
IPV6_VERSION_MASK = 0xf0
|
||||
IPV6_XFRM_POLICY = 0x23
|
||||
IP_ADD_MEMBERSHIP = 0x23
|
||||
IP_ADD_SOURCE_MEMBERSHIP = 0x27
|
||||
|
@ -1867,6 +1873,7 @@ const (
|
|||
MADV_UNMERGEABLE = 0xd
|
||||
MADV_WILLNEED = 0x3
|
||||
MADV_WIPEONFORK = 0x12
|
||||
MAP_DROPPABLE = 0x8
|
||||
MAP_FILE = 0x0
|
||||
MAP_FIXED = 0x10
|
||||
MAP_FIXED_NOREPLACE = 0x100000
|
||||
|
@ -1967,6 +1974,7 @@ const (
|
|||
MSG_PEEK = 0x2
|
||||
MSG_PROXY = 0x10
|
||||
MSG_RST = 0x1000
|
||||
MSG_SOCK_DEVMEM = 0x2000000
|
||||
MSG_SYN = 0x400
|
||||
MSG_TRUNC = 0x20
|
||||
MSG_TRYHARD = 0x4
|
||||
|
@ -2083,6 +2091,7 @@ const (
|
|||
NFC_ATR_REQ_MAXSIZE = 0x40
|
||||
NFC_ATR_RES_GB_MAXSIZE = 0x2f
|
||||
NFC_ATR_RES_MAXSIZE = 0x40
|
||||
NFC_ATS_MAXSIZE = 0x14
|
||||
NFC_COMM_ACTIVE = 0x0
|
||||
NFC_COMM_PASSIVE = 0x1
|
||||
NFC_DEVICE_NAME_MAXSIZE = 0x8
|
||||
|
@ -2163,6 +2172,7 @@ const (
|
|||
NFNL_SUBSYS_QUEUE = 0x3
|
||||
NFNL_SUBSYS_ULOG = 0x4
|
||||
NFS_SUPER_MAGIC = 0x6969
|
||||
NFT_BITWISE_BOOL = 0x0
|
||||
NFT_CHAIN_FLAGS = 0x7
|
||||
NFT_CHAIN_MAXNAMELEN = 0x100
|
||||
NFT_CT_MAX = 0x17
|
||||
|
@ -2491,6 +2501,7 @@ const (
|
|||
PR_GET_PDEATHSIG = 0x2
|
||||
PR_GET_SECCOMP = 0x15
|
||||
PR_GET_SECUREBITS = 0x1b
|
||||
PR_GET_SHADOW_STACK_STATUS = 0x4a
|
||||
PR_GET_SPECULATION_CTRL = 0x34
|
||||
PR_GET_TAGGED_ADDR_CTRL = 0x38
|
||||
PR_GET_THP_DISABLE = 0x2a
|
||||
|
@ -2499,6 +2510,7 @@ const (
|
|||
PR_GET_TIMING = 0xd
|
||||
PR_GET_TSC = 0x19
|
||||
PR_GET_UNALIGN = 0x5
|
||||
PR_LOCK_SHADOW_STACK_STATUS = 0x4c
|
||||
PR_MCE_KILL = 0x21
|
||||
PR_MCE_KILL_CLEAR = 0x0
|
||||
PR_MCE_KILL_DEFAULT = 0x2
|
||||
|
@ -2525,6 +2537,8 @@ const (
|
|||
PR_PAC_GET_ENABLED_KEYS = 0x3d
|
||||
PR_PAC_RESET_KEYS = 0x36
|
||||
PR_PAC_SET_ENABLED_KEYS = 0x3c
|
||||
PR_PMLEN_MASK = 0x7f000000
|
||||
PR_PMLEN_SHIFT = 0x18
|
||||
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
|
||||
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
|
||||
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
|
||||
|
@ -2592,6 +2606,7 @@ const (
|
|||
PR_SET_PTRACER = 0x59616d61
|
||||
PR_SET_SECCOMP = 0x16
|
||||
PR_SET_SECUREBITS = 0x1c
|
||||
PR_SET_SHADOW_STACK_STATUS = 0x4b
|
||||
PR_SET_SPECULATION_CTRL = 0x35
|
||||
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
|
||||
PR_SET_TAGGED_ADDR_CTRL = 0x37
|
||||
|
@ -2602,6 +2617,9 @@ const (
|
|||
PR_SET_UNALIGN = 0x6
|
||||
PR_SET_VMA = 0x53564d41
|
||||
PR_SET_VMA_ANON_NAME = 0x0
|
||||
PR_SHADOW_STACK_ENABLE = 0x1
|
||||
PR_SHADOW_STACK_PUSH = 0x4
|
||||
PR_SHADOW_STACK_WRITE = 0x2
|
||||
PR_SME_GET_VL = 0x40
|
||||
PR_SME_SET_VL = 0x3f
|
||||
PR_SME_SET_VL_ONEXEC = 0x40000
|
||||
|
@ -2911,7 +2929,6 @@ const (
|
|||
RTM_NEWNEXTHOP = 0x68
|
||||
RTM_NEWNEXTHOPBUCKET = 0x74
|
||||
RTM_NEWNSID = 0x58
|
||||
RTM_NEWNVLAN = 0x70
|
||||
RTM_NEWPREFIX = 0x34
|
||||
RTM_NEWQDISC = 0x24
|
||||
RTM_NEWROUTE = 0x18
|
||||
|
@ -2920,6 +2937,7 @@ const (
|
|||
RTM_NEWTCLASS = 0x28
|
||||
RTM_NEWTFILTER = 0x2c
|
||||
RTM_NEWTUNNEL = 0x78
|
||||
RTM_NEWVLAN = 0x70
|
||||
RTM_NR_FAMILIES = 0x1b
|
||||
RTM_NR_MSGTYPES = 0x6c
|
||||
RTM_SETDCB = 0x4f
|
||||
|
|
|
@ -116,6 +116,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -304,6 +306,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -116,6 +116,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -305,6 +307,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -310,6 +312,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -109,6 +109,7 @@ const (
|
|||
F_SETOWN = 0x8
|
||||
F_UNLCK = 0x2
|
||||
F_WRLCK = 0x1
|
||||
GCS_MAGIC = 0x47435300
|
||||
HIDIOCGRAWINFO = 0x80084803
|
||||
HIDIOCGRDESC = 0x90044802
|
||||
HIDIOCGRDESCSIZE = 0x80044801
|
||||
|
@ -119,6 +120,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -302,6 +305,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -116,6 +116,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -297,6 +299,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x80
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -303,6 +305,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x80
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -303,6 +305,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x80
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -303,6 +305,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x80
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -303,6 +305,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x80
|
||||
IUCLC = 0x1000
|
||||
IXOFF = 0x400
|
||||
|
@ -358,6 +360,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x80
|
||||
IUCLC = 0x1000
|
||||
IXOFF = 0x400
|
||||
|
@ -362,6 +364,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x80
|
||||
IUCLC = 0x1000
|
||||
IXOFF = 0x400
|
||||
|
@ -362,6 +364,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xffffff0f
|
||||
IPV6_FLOWLABEL_MASK = 0xffff0f00
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -294,6 +296,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -115,6 +115,8 @@ const (
|
|||
IN_CLOEXEC = 0x80000
|
||||
IN_NONBLOCK = 0x800
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -366,6 +368,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x36
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3a
|
||||
SCM_TIMESTAMPNS = 0x23
|
||||
SCM_TS_OPT_ID = 0x51
|
||||
SCM_TXTIME = 0x3d
|
||||
SCM_WIFI_STATUS = 0x29
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
|
||||
|
|
|
@ -119,6 +119,8 @@ const (
|
|||
IN_CLOEXEC = 0x400000
|
||||
IN_NONBLOCK = 0x4000
|
||||
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
|
||||
IPV6_FLOWINFO_MASK = 0xfffffff
|
||||
IPV6_FLOWLABEL_MASK = 0xfffff
|
||||
ISIG = 0x1
|
||||
IUCLC = 0x200
|
||||
IXOFF = 0x1000
|
||||
|
@ -357,6 +359,7 @@ const (
|
|||
SCM_TIMESTAMPING_OPT_STATS = 0x38
|
||||
SCM_TIMESTAMPING_PKTINFO = 0x3c
|
||||
SCM_TIMESTAMPNS = 0x21
|
||||
SCM_TS_OPT_ID = 0x5a
|
||||
SCM_TXTIME = 0x3f
|
||||
SCM_WIFI_STATUS = 0x25
|
||||
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
|
||||
|
|
|
@ -141,6 +141,16 @@ import (
|
|||
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
|
||||
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
|
||||
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
|
||||
//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
|
||||
//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
|
||||
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
|
||||
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
|
||||
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
|
||||
|
@ -280,6 +290,16 @@ import (
|
|||
//go:linkname procgetpeername libc_getpeername
|
||||
//go:linkname procsetsockopt libc_setsockopt
|
||||
//go:linkname procrecvfrom libc_recvfrom
|
||||
//go:linkname procgetpeerucred libc_getpeerucred
|
||||
//go:linkname procucred_get libc_ucred_get
|
||||
//go:linkname procucred_geteuid libc_ucred_geteuid
|
||||
//go:linkname procucred_getegid libc_ucred_getegid
|
||||
//go:linkname procucred_getruid libc_ucred_getruid
|
||||
//go:linkname procucred_getrgid libc_ucred_getrgid
|
||||
//go:linkname procucred_getsuid libc_ucred_getsuid
|
||||
//go:linkname procucred_getsgid libc_ucred_getsgid
|
||||
//go:linkname procucred_getpid libc_ucred_getpid
|
||||
//go:linkname procucred_free libc_ucred_free
|
||||
//go:linkname procport_create libc_port_create
|
||||
//go:linkname procport_associate libc_port_associate
|
||||
//go:linkname procport_dissociate libc_port_dissociate
|
||||
|
@ -420,6 +440,16 @@ var (
|
|||
procgetpeername,
|
||||
procsetsockopt,
|
||||
procrecvfrom,
|
||||
procgetpeerucred,
|
||||
procucred_get,
|
||||
procucred_geteuid,
|
||||
procucred_getegid,
|
||||
procucred_getruid,
|
||||
procucred_getrgid,
|
||||
procucred_getsuid,
|
||||
procucred_getsgid,
|
||||
procucred_getpid,
|
||||
procucred_free,
|
||||
procport_create,
|
||||
procport_associate,
|
||||
procport_dissociate,
|
||||
|
@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
|
||||
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGet(pid int) (ucred uintptr, err error) {
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
|
||||
ucred = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGeteuid(ucred uintptr) (uid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
uid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetegid(ucred uintptr) (gid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
gid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetruid(ucred uintptr) (uid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
uid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetrgid(ucred uintptr) (gid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
gid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetsuid(ucred uintptr) (uid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
uid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetsgid(ucred uintptr) (gid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
gid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredGetpid(ucred uintptr) (pid int) {
|
||||
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
pid = int(r0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func ucredFree(ucred uintptr) {
|
||||
sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func port_create() (n int, err error) {
|
||||
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
|
||||
n = int(r0)
|
||||
|
|
|
@ -458,4 +458,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -381,4 +381,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -422,4 +422,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -325,4 +325,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -321,4 +321,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -442,4 +442,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 4460
|
||||
SYS_LSM_LIST_MODULES = 4461
|
||||
SYS_MSEAL = 4462
|
||||
SYS_SETXATTRAT = 4463
|
||||
SYS_GETXATTRAT = 4464
|
||||
SYS_LISTXATTRAT = 4465
|
||||
SYS_REMOVEXATTRAT = 4466
|
||||
)
|
||||
|
|
|
@ -372,4 +372,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 5460
|
||||
SYS_LSM_LIST_MODULES = 5461
|
||||
SYS_MSEAL = 5462
|
||||
SYS_SETXATTRAT = 5463
|
||||
SYS_GETXATTRAT = 5464
|
||||
SYS_LISTXATTRAT = 5465
|
||||
SYS_REMOVEXATTRAT = 5466
|
||||
)
|
||||
|
|
|
@ -372,4 +372,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 5460
|
||||
SYS_LSM_LIST_MODULES = 5461
|
||||
SYS_MSEAL = 5462
|
||||
SYS_SETXATTRAT = 5463
|
||||
SYS_GETXATTRAT = 5464
|
||||
SYS_LISTXATTRAT = 5465
|
||||
SYS_REMOVEXATTRAT = 5466
|
||||
)
|
||||
|
|
|
@ -442,4 +442,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 4460
|
||||
SYS_LSM_LIST_MODULES = 4461
|
||||
SYS_MSEAL = 4462
|
||||
SYS_SETXATTRAT = 4463
|
||||
SYS_GETXATTRAT = 4464
|
||||
SYS_LISTXATTRAT = 4465
|
||||
SYS_REMOVEXATTRAT = 4466
|
||||
)
|
||||
|
|
|
@ -449,4 +449,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -421,4 +421,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -421,4 +421,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -326,4 +326,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -387,4 +387,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -400,4 +400,8 @@ const (
|
|||
SYS_LSM_SET_SELF_ATTR = 460
|
||||
SYS_LSM_LIST_MODULES = 461
|
||||
SYS_MSEAL = 462
|
||||
SYS_SETXATTRAT = 463
|
||||
SYS_GETXATTRAT = 464
|
||||
SYS_LISTXATTRAT = 465
|
||||
SYS_REMOVEXATTRAT = 466
|
||||
)
|
||||
|
|
|
@ -4747,7 +4747,7 @@ const (
|
|||
NL80211_ATTR_MAC_HINT = 0xc8
|
||||
NL80211_ATTR_MAC_MASK = 0xd7
|
||||
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
|
||||
NL80211_ATTR_MAX = 0x14c
|
||||
NL80211_ATTR_MAX = 0x14d
|
||||
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
|
||||
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
|
||||
NL80211_ATTR_MAX_MATCH_SETS = 0x85
|
||||
|
@ -5519,7 +5519,7 @@ const (
|
|||
NL80211_MNTR_FLAG_CONTROL = 0x3
|
||||
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
|
||||
NL80211_MNTR_FLAG_FCSFAIL = 0x1
|
||||
NL80211_MNTR_FLAG_MAX = 0x6
|
||||
NL80211_MNTR_FLAG_MAX = 0x7
|
||||
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
|
||||
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
|
||||
NL80211_MPATH_FLAG_ACTIVE = 0x1
|
||||
|
@ -6174,3 +6174,5 @@ type SockDiagReq struct {
|
|||
Family uint8
|
||||
Protocol uint8
|
||||
}
|
||||
|
||||
const RTM_NEWNVLAN = 0x70
|
||||
|
|
|
@ -43,8 +43,8 @@ type DLL struct {
|
|||
// LoadDLL loads DLL file into memory.
|
||||
//
|
||||
// Warning: using LoadDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use LazyDLL
|
||||
// with System set to true, or use LoadLibraryEx directly.
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
|
||||
// or use [LoadLibraryEx] directly.
|
||||
func LoadDLL(name string) (dll *DLL, err error) {
|
||||
namep, err := UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
|
@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
|
|||
}
|
||||
|
||||
// NewLazyDLL creates new LazyDLL associated with DLL file.
|
||||
//
|
||||
// Warning: using NewLazyDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
|
||||
func NewLazyDLL(name string) *LazyDLL {
|
||||
return &LazyDLL{Name: name}
|
||||
}
|
||||
|
@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
|
|||
}
|
||||
return &DLL{Name: name, Handle: h}, nil
|
||||
}
|
||||
|
||||
type errString string
|
||||
|
||||
func (s errString) Error() string { return string(s) }
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 Google LLC
|
||||
// Copyright 2025 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,73 +1,102 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome your patches and contributions to gRPC! Please read the gRPC
|
||||
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
|
||||
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
|
||||
We welcome your patches and contributions to gRPC! Please read the gRPC
|
||||
organization's [governance
|
||||
rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before
|
||||
proceeding.
|
||||
|
||||
If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
|
||||
|
||||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
|
||||
[Contributor License
|
||||
Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create
|
||||
your first PR, a link will be added as a comment that contains the steps needed
|
||||
to complete this process.
|
||||
|
||||
## Getting Started
|
||||
|
||||
A great way to start is by searching through our open issues. [Unassigned issues
|
||||
labeled as "help
|
||||
wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee)
|
||||
are especially nice for first-time contributors, as they should be well-defined
|
||||
problems that already have agreed-upon solutions.
|
||||
|
||||
## Code Style
|
||||
|
||||
We follow [Google's published Go style
|
||||
guide](https://google.github.io/styleguide/go/). Note that there are three
|
||||
primary documents that make up this style guide; please follow them as closely
|
||||
as possible. If a reviewer recommends something that contradicts those
|
||||
guidelines, there may be valid reasons to do so, but it should be rare.
|
||||
|
||||
## Guidelines for Pull Requests
|
||||
How to get your contributions merged smoothly and quickly.
|
||||
|
||||
How to get your contributions merged smoothly and quickly:
|
||||
|
||||
- Create **small PRs** that are narrowly focused on **addressing a single
|
||||
concern**. We often times receive PRs that are trying to fix several things at
|
||||
a time, but only one fix is considered acceptable, nothing gets merged and
|
||||
both author's & review's time is wasted. Create more PRs to address different
|
||||
concerns and everyone will be happy.
|
||||
concern**. We often receive PRs that attempt to fix several things at the same
|
||||
time, and if one part of the PR has a problem, that will hold up the entire
|
||||
PR.
|
||||
|
||||
- If you are searching for features to work on, issues labeled [Status: Help
|
||||
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
|
||||
is a great place to start. These issues are well-documented and usually can be
|
||||
resolved with a single pull request.
|
||||
- For **speculative changes**, consider opening an issue and discussing it
|
||||
first. If you are suggesting a behavioral or API change, consider starting
|
||||
with a [gRFC proposal](https://github.com/grpc/proposal). Many new features
|
||||
that are not bug fixes will require cross-language agreement.
|
||||
|
||||
- If you are adding a new file, make sure it has the copyright message template
|
||||
at the top as a comment. You can copy over the message from an existing file
|
||||
and update the year.
|
||||
- If you want to fix **formatting or style**, consider whether your changes are
|
||||
an obvious improvement or might be considered a personal preference. If a
|
||||
style change is based on preference, it likely will not be accepted. If it
|
||||
corrects widely agreed-upon anti-patterns, then please do create a PR and
|
||||
explain the benefits of the change.
|
||||
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. If your contribution introduces new dependencies which are NOT
|
||||
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
||||
discussion with gRPC-Go authors and consultants.
|
||||
|
||||
- For speculative changes, consider opening an issue and discussing it first. If
|
||||
you are suggesting a behavioral or API change, consider starting with a [gRFC
|
||||
proposal](https://github.com/grpc/proposal).
|
||||
- For correcting **misspellings**, please be aware that we use some terms that
|
||||
are sometimes flagged by spell checkers. As an example, "if an only if" is
|
||||
often written as "iff". Please do not make spelling correction changes unless
|
||||
you are certain they are misspellings.
|
||||
|
||||
- Provide a good **PR description** as a record of **what** change is being made
|
||||
and **why** it was made. Link to a GitHub issue if it exists.
|
||||
|
||||
- If you want to fix formatting or style, consider whether your changes are an
|
||||
obvious improvement or might be considered a personal preference. If a style
|
||||
change is based on preference, it likely will not be accepted. If it corrects
|
||||
widely agreed-upon anti-patterns, then please do create a PR and explain the
|
||||
benefits of the change.
|
||||
|
||||
- Unless your PR is trivial, you should expect there will be reviewer comments
|
||||
that you'll need to address before merging. We'll mark it as `Status: Requires
|
||||
Reporter Clarification` if we expect you to respond to these comments in a
|
||||
timely manner. If the PR remains inactive for 6 days, it will be marked as
|
||||
`stale` and automatically close 7 days after that if we don't hear back from
|
||||
you.
|
||||
|
||||
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
||||
with messy commit history are difficult to review and won't be merged. Use
|
||||
`rebase -i upstream/master` to curate your commit history and/or to bring in
|
||||
latest changes from master (but avoid rebasing in the middle of a code
|
||||
review).
|
||||
|
||||
- Keep your PR up to date with upstream/master (if there are merge conflicts, we
|
||||
can't really merge your change).
|
||||
- Maintain a **clean commit history** and use **meaningful commit messages**.
|
||||
PRs with messy commit histories are difficult to review and won't be merged.
|
||||
Before sending your PR, ensure your changes are based on top of the latest
|
||||
`upstream/master` commits, and avoid rebasing in the middle of a code review.
|
||||
You should **never use `git push -f`** unless absolutely necessary during a
|
||||
review, as it can interfere with GitHub's tracking of comments.
|
||||
|
||||
- **All tests need to be passing** before your change can be merged. We
|
||||
recommend you **run tests locally** before creating your PR to catch breakages
|
||||
early on.
|
||||
- `./scripts/vet.sh` to catch vet errors
|
||||
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
|
||||
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
|
||||
recommend you run tests locally before creating your PR to catch breakages
|
||||
early on:
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
- `./scripts/vet.sh` to catch vet errors.
|
||||
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests.
|
||||
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode.
|
||||
|
||||
Note that we have a multi-module repo, so `go test` commands may need to be
|
||||
run from the root of each module in order to cause all tests to run.
|
||||
|
||||
*Alternatively*, you may find it easier to push your changes to your fork on
|
||||
GitHub, which will trigger a GitHub Actions run that you can use to verify
|
||||
everything is passing.
|
||||
|
||||
- If you are adding a new file, make sure it has the **copyright message**
|
||||
template at the top as a comment. You can copy the message from an existing
|
||||
file and update the year.
|
||||
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. **If your contribution introduces new dependencies**, you will
|
||||
need a discussion with gRPC-Go maintainers. A GitHub action check will run on
|
||||
every PR, and will flag any transitive dependency changes from any public
|
||||
package.
|
||||
|
||||
- Unless your PR is trivial, you should **expect reviewer comments** that you
|
||||
will need to address before merging. We'll label the PR as `Status: Requires
|
||||
Reporter Clarification` if we expect you to respond to these comments in a
|
||||
timely manner. If the PR remains inactive for 6 days, it will be marked as
|
||||
`stale`, and we will automatically close it after 7 days if we don't hear back
|
||||
from you. Please feel free to ping issues or bugs if you do not get a response
|
||||
within a week.
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason to do so.
|
||||
|
|
|
@ -32,6 +32,7 @@ import "google.golang.org/grpc"
|
|||
- [Low-level technical docs](Documentation) from this repository
|
||||
- [Performance benchmark][]
|
||||
- [Examples](examples)
|
||||
- [Contribution guidelines](CONTRIBUTING.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
|
|
|
@ -129,6 +129,13 @@ type State struct {
|
|||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
//
|
||||
// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
|
||||
// by custom load balancing polices. Users should not need their own complete
|
||||
// implementation of this interface -- they should always delegate to a
|
||||
// ClientConn passed to Builder.Build() by embedding it in their
|
||||
// implementations. An embedded ClientConn must never be nil, or runtime panics
|
||||
// will occur.
|
||||
type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
|
@ -167,6 +174,17 @@ type ClientConn interface {
|
|||
//
|
||||
// Deprecated: Use the Target field in the BuildOptions instead.
|
||||
Target() string
|
||||
|
||||
// MetricsRecorder provides the metrics recorder that balancers can use to
|
||||
// record metrics. Balancer implementations which do not register metrics on
|
||||
// metrics registry and record on them can ignore this method. The returned
|
||||
// MetricsRecorder is guaranteed to never be nil.
|
||||
MetricsRecorder() estats.MetricsRecorder
|
||||
|
||||
// EnforceClientConnEmbedding is included to force implementers to embed
|
||||
// another implementation of this interface, allowing gRPC to add methods
|
||||
// without breaking users.
|
||||
internal.EnforceClientConnEmbedding
|
||||
}
|
||||
|
||||
// BuildOptions contains additional information for Build.
|
||||
|
@ -198,10 +216,6 @@ type BuildOptions struct {
|
|||
// same resolver.Target as passed to the resolver. See the documentation for
|
||||
// the resolver.Target type for details about what it contains.
|
||||
Target resolver.Target
|
||||
// MetricsRecorder is the metrics recorder that balancers can use to record
|
||||
// metrics. Balancer implementations which do not register metrics on
|
||||
// metrics registry and record on them can ignore this field.
|
||||
MetricsRecorder estats.MetricsRecorder
|
||||
}
|
||||
|
||||
// Builder creates a balancer.
|
||||
|
|
|
@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba
|
|||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
|
||||
subConns: resolver.NewAddressMap(),
|
||||
subConns: resolver.NewAddressMapV2[balancer.SubConn](),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
|
@ -65,7 +65,7 @@ type baseBalancer struct {
|
|||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns *resolver.AddressMap
|
||||
subConns *resolver.AddressMapV2[balancer.SubConn]
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
config Config
|
||||
|
@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := resolver.NewAddressMap()
|
||||
addrsSet := resolver.NewAddressMapV2[any]()
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
addrsSet.Set(a, nil)
|
||||
if _, ok := b.subConns.Get(a); !ok {
|
||||
|
@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
|||
}
|
||||
}
|
||||
for _, a := range b.subConns.Keys() {
|
||||
sci, _ := b.subConns.Get(a)
|
||||
sc := sci.(balancer.SubConn)
|
||||
sc, _ := b.subConns.Get(a)
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet.Get(a); !ok {
|
||||
sc.Shutdown()
|
||||
|
@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() {
|
|||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for _, addr := range b.subConns.Keys() {
|
||||
sci, _ := b.subConns.Get(addr)
|
||||
sc := sci.(balancer.SubConn)
|
||||
sc, _ := b.subConns.Get(addr)
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
}
|
||||
|
|
356
vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
generated
vendored
Normal file
356
vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2024 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package endpointsharding implements a load balancing policy that manages
|
||||
// homogeneous child policies each owning a single endpoint.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package endpointsharding
|
||||
|
||||
import (
|
||||
"errors"
|
||||
rand "math/rand/v2"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ChildState is the balancer state of a child along with the endpoint which
|
||||
// identifies the child balancer.
|
||||
type ChildState struct {
|
||||
Endpoint resolver.Endpoint
|
||||
State balancer.State
|
||||
|
||||
// Balancer exposes only the ExitIdler interface of the child LB policy.
|
||||
// Other methods of the child policy are called only by endpointsharding.
|
||||
Balancer balancer.ExitIdler
|
||||
}
|
||||
|
||||
// Options are the options to configure the behaviour of the
|
||||
// endpointsharding balancer.
|
||||
type Options struct {
|
||||
// DisableAutoReconnect allows the balancer to keep child balancer in the
|
||||
// IDLE state until they are explicitly triggered to exit using the
|
||||
// ChildState obtained from the endpointsharding picker. When set to false,
|
||||
// the endpointsharding balancer will automatically call ExitIdle on child
|
||||
// connections that report IDLE.
|
||||
DisableAutoReconnect bool
|
||||
}
|
||||
|
||||
// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
|
||||
// type as the balancer.Builder.Build method.
|
||||
type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
|
||||
|
||||
// NewBalancer returns a load balancing policy that manages homogeneous child
|
||||
// policies each owning a single endpoint. The endpointsharding balancer
|
||||
// forwards the LoadBalancingConfig in ClientConn state updates to its children.
|
||||
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
|
||||
es := &endpointSharding{
|
||||
cc: cc,
|
||||
bOpts: opts,
|
||||
esOpts: esOpts,
|
||||
childBuilder: childBuilder,
|
||||
}
|
||||
es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
|
||||
return es
|
||||
}
|
||||
|
||||
// endpointSharding is a balancer that wraps child balancers. It creates a child
|
||||
// balancer with child config for every unique Endpoint received. It updates the
|
||||
// child states on any update from parent or child.
|
||||
type endpointSharding struct {
|
||||
cc balancer.ClientConn
|
||||
bOpts balancer.BuildOptions
|
||||
esOpts Options
|
||||
childBuilder ChildBuilderFunc
|
||||
|
||||
// childMu synchronizes calls to any single child. It must be held for all
|
||||
// calls into a child. To avoid deadlocks, do not acquire childMu while
|
||||
// holding mu.
|
||||
childMu sync.Mutex
|
||||
children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
|
||||
|
||||
// inhibitChildUpdates is set during UpdateClientConnState/ResolverError
|
||||
// calls (calls to children will each produce an update, only want one
|
||||
// update).
|
||||
inhibitChildUpdates atomic.Bool
|
||||
|
||||
// mu synchronizes access to the state stored in balancerWrappers in the
|
||||
// children field. mu must not be held during calls into a child since
|
||||
// synchronous calls back from the child may require taking mu, causing a
|
||||
// deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// UpdateClientConnState creates a child for new endpoints and deletes children
|
||||
// for endpoints that are no longer present. It also updates all the children,
|
||||
// and sends a single synchronous update of the childrens' aggregated state at
|
||||
// the end of the UpdateClientConnState operation. If any endpoint has no
|
||||
// addresses it will ignore that endpoint. Otherwise, returns first error found
|
||||
// from a child, but fully processes the new update.
|
||||
func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
es.childMu.Lock()
|
||||
defer es.childMu.Unlock()
|
||||
|
||||
es.inhibitChildUpdates.Store(true)
|
||||
defer func() {
|
||||
es.inhibitChildUpdates.Store(false)
|
||||
es.updateState()
|
||||
}()
|
||||
var ret error
|
||||
|
||||
children := es.children.Load()
|
||||
newChildren := resolver.NewEndpointMap[*balancerWrapper]()
|
||||
|
||||
// Update/Create new children.
|
||||
for _, endpoint := range state.ResolverState.Endpoints {
|
||||
if _, ok := newChildren.Get(endpoint); ok {
|
||||
// Endpoint child was already created, continue to avoid duplicate
|
||||
// update.
|
||||
continue
|
||||
}
|
||||
childBalancer, ok := children.Get(endpoint)
|
||||
if ok {
|
||||
// Endpoint attributes may have changed, update the stored endpoint.
|
||||
es.mu.Lock()
|
||||
childBalancer.childState.Endpoint = endpoint
|
||||
es.mu.Unlock()
|
||||
} else {
|
||||
childBalancer = &balancerWrapper{
|
||||
childState: ChildState{Endpoint: endpoint},
|
||||
ClientConn: es.cc,
|
||||
es: es,
|
||||
}
|
||||
childBalancer.childState.Balancer = childBalancer
|
||||
childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
|
||||
}
|
||||
newChildren.Set(endpoint, childBalancer)
|
||||
if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
|
||||
BalancerConfig: state.BalancerConfig,
|
||||
ResolverState: resolver.State{
|
||||
Endpoints: []resolver.Endpoint{endpoint},
|
||||
Attributes: state.ResolverState.Attributes,
|
||||
},
|
||||
}); err != nil && ret == nil {
|
||||
// Return first error found, and always commit full processing of
|
||||
// updating children. If desired to process more specific errors
|
||||
// across all endpoints, caller should make these specific
|
||||
// validations, this is a current limitation for simplicity sake.
|
||||
ret = err
|
||||
}
|
||||
}
|
||||
// Delete old children that are no longer present.
|
||||
for _, e := range children.Keys() {
|
||||
child, _ := children.Get(e)
|
||||
if _, ok := newChildren.Get(e); !ok {
|
||||
child.closeLocked()
|
||||
}
|
||||
}
|
||||
es.children.Store(newChildren)
|
||||
if newChildren.Len() == 0 {
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ResolverError forwards the resolver error to all of the endpointSharding's
|
||||
// children and sends a single synchronous update of the childStates at the end
|
||||
// of the ResolverError operation.
|
||||
func (es *endpointSharding) ResolverError(err error) {
|
||||
es.childMu.Lock()
|
||||
defer es.childMu.Unlock()
|
||||
es.inhibitChildUpdates.Store(true)
|
||||
defer func() {
|
||||
es.inhibitChildUpdates.Store(false)
|
||||
es.updateState()
|
||||
}()
|
||||
children := es.children.Load()
|
||||
for _, child := range children.Values() {
|
||||
child.resolverErrorLocked(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
|
||||
// UpdateSubConnState is deprecated.
|
||||
}
|
||||
|
||||
func (es *endpointSharding) Close() {
|
||||
es.childMu.Lock()
|
||||
defer es.childMu.Unlock()
|
||||
children := es.children.Load()
|
||||
for _, child := range children.Values() {
|
||||
child.closeLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// updateState updates this component's state. It sends the aggregated state,
|
||||
// and a picker with round robin behavior with all the child states present if
|
||||
// needed.
|
||||
func (es *endpointSharding) updateState() {
|
||||
if es.inhibitChildUpdates.Load() {
|
||||
return
|
||||
}
|
||||
var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
|
||||
|
||||
es.mu.Lock()
|
||||
defer es.mu.Unlock()
|
||||
|
||||
children := es.children.Load()
|
||||
childStates := make([]ChildState, 0, children.Len())
|
||||
|
||||
for _, child := range children.Values() {
|
||||
childState := child.childState
|
||||
childStates = append(childStates, childState)
|
||||
childPicker := childState.State.Picker
|
||||
switch childState.State.ConnectivityState {
|
||||
case connectivity.Ready:
|
||||
readyPickers = append(readyPickers, childPicker)
|
||||
case connectivity.Connecting:
|
||||
connectingPickers = append(connectingPickers, childPicker)
|
||||
case connectivity.Idle:
|
||||
idlePickers = append(idlePickers, childPicker)
|
||||
case connectivity.TransientFailure:
|
||||
transientFailurePickers = append(transientFailurePickers, childPicker)
|
||||
// connectivity.Shutdown shouldn't appear.
|
||||
}
|
||||
}
|
||||
|
||||
// Construct the round robin picker based off the aggregated state. Whatever
|
||||
// the aggregated state, use the pickers present that are currently in that
|
||||
// state only.
|
||||
var aggState connectivity.State
|
||||
var pickers []balancer.Picker
|
||||
if len(readyPickers) >= 1 {
|
||||
aggState = connectivity.Ready
|
||||
pickers = readyPickers
|
||||
} else if len(connectingPickers) >= 1 {
|
||||
aggState = connectivity.Connecting
|
||||
pickers = connectingPickers
|
||||
} else if len(idlePickers) >= 1 {
|
||||
aggState = connectivity.Idle
|
||||
pickers = idlePickers
|
||||
} else if len(transientFailurePickers) >= 1 {
|
||||
aggState = connectivity.TransientFailure
|
||||
pickers = transientFailurePickers
|
||||
} else {
|
||||
aggState = connectivity.TransientFailure
|
||||
pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
|
||||
} // No children (resolver error before valid update).
|
||||
p := &pickerWithChildStates{
|
||||
pickers: pickers,
|
||||
childStates: childStates,
|
||||
next: uint32(rand.IntN(len(pickers))),
|
||||
}
|
||||
es.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: aggState,
|
||||
Picker: p,
|
||||
})
|
||||
}
|
||||
|
||||
// pickerWithChildStates delegates to the pickers it holds in a round robin
|
||||
// fashion. It also contains the childStates of all the endpointSharding's
|
||||
// children.
|
||||
type pickerWithChildStates struct {
|
||||
pickers []balancer.Picker
|
||||
childStates []ChildState
|
||||
next uint32
|
||||
}
|
||||
|
||||
func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
nextIndex := atomic.AddUint32(&p.next, 1)
|
||||
picker := p.pickers[nextIndex%uint32(len(p.pickers))]
|
||||
return picker.Pick(info)
|
||||
}
|
||||
|
||||
// ChildStatesFromPicker returns the state of all the children managed by the
|
||||
// endpoint sharding balancer that created this picker.
|
||||
func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
|
||||
p, ok := picker.(*pickerWithChildStates)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return p.childStates
|
||||
}
|
||||
|
||||
// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
|
||||
// endpoint, and persists recent child balancer state.
|
||||
type balancerWrapper struct {
|
||||
// The following fields are initialized at build time and read-only after
|
||||
// that and therefore do not need to be guarded by a mutex.
|
||||
|
||||
// child contains the wrapped balancer. Access its methods only through
|
||||
// methods on balancerWrapper to ensure proper synchronization
|
||||
child balancer.Balancer
|
||||
balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
|
||||
|
||||
es *endpointSharding
|
||||
|
||||
// Access to the following fields is guarded by es.mu.
|
||||
|
||||
childState ChildState
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||
bw.es.mu.Lock()
|
||||
bw.childState.State = state
|
||||
bw.es.mu.Unlock()
|
||||
if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
|
||||
bw.ExitIdle()
|
||||
}
|
||||
bw.es.updateState()
|
||||
}
|
||||
|
||||
// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
|
||||
// avoid deadlocks due to synchronous balancer state updates.
|
||||
func (bw *balancerWrapper) ExitIdle() {
|
||||
if ei, ok := bw.child.(balancer.ExitIdler); ok {
|
||||
go func() {
|
||||
bw.es.childMu.Lock()
|
||||
if !bw.isClosed {
|
||||
ei.ExitIdle()
|
||||
}
|
||||
bw.es.childMu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// updateClientConnStateLocked delivers the ClientConnState to the child
|
||||
// balancer. Callers must hold the child mutex of the parent endpointsharding
|
||||
// balancer.
|
||||
func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
|
||||
return bw.child.UpdateClientConnState(ccs)
|
||||
}
|
||||
|
||||
// closeLocked closes the child balancer. Callers must hold the child mutext of
|
||||
// the parent endpointsharding balancer.
|
||||
func (bw *balancerWrapper) closeLocked() {
|
||||
bw.child.Close()
|
||||
bw.isClosed = true
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) resolverErrorLocked(err error) {
|
||||
bw.child.ResolverError(err)
|
||||
}
|
72
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
generated
vendored
72
vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
generated
vendored
|
@ -54,9 +54,18 @@ func init() {
|
|||
balancer.Register(pickfirstBuilder{})
|
||||
}
|
||||
|
||||
// enableHealthListenerKeyType is a unique key type used in resolver attributes
|
||||
// to indicate whether the health listener usage is enabled.
|
||||
type enableHealthListenerKeyType struct{}
|
||||
type (
|
||||
// enableHealthListenerKeyType is a unique key type used in resolver
|
||||
// attributes to indicate whether the health listener usage is enabled.
|
||||
enableHealthListenerKeyType struct{}
|
||||
// managedByPickfirstKeyType is an attribute key type to inform Outlier
|
||||
// Detection that the generic health listener is being used.
|
||||
// TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when
|
||||
// implementing the dualstack design. This is a hack. Once Dualstack is
|
||||
// completed, outlier detection will stop sending ejection updates through
|
||||
// the connectivity listener.
|
||||
managedByPickfirstKeyType struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
logger = grpclog.Component("pick-first-leaf-lb")
|
||||
|
@ -111,9 +120,9 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions)
|
|||
b := &pickfirstBalancer{
|
||||
cc: cc,
|
||||
target: bo.Target.String(),
|
||||
metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder.
|
||||
metricsRecorder: cc.MetricsRecorder(),
|
||||
|
||||
subConns: resolver.NewAddressMap(),
|
||||
subConns: resolver.NewAddressMapV2[*scData](),
|
||||
state: connectivity.Connecting,
|
||||
cancelConnectionTimer: func() {},
|
||||
}
|
||||
|
@ -140,6 +149,17 @@ func EnableHealthListener(state resolver.State) resolver.State {
|
|||
return state
|
||||
}
|
||||
|
||||
// IsManagedByPickfirst returns whether an address belongs to a SubConn
|
||||
// managed by the pickfirst LB policy.
|
||||
// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable
|
||||
// outlier_detection via the with connectivity listener when using pick_first.
|
||||
// Once Dualstack changes are complete, all SubConns will be created by
|
||||
// pick_first and outlier detection will only use the health listener for
|
||||
// ejection. This hack can then be removed.
|
||||
func IsManagedByPickfirst(addr resolver.Address) bool {
|
||||
return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil
|
||||
}
|
||||
|
||||
type pfConfig struct {
|
||||
serviceconfig.LoadBalancingConfig `json:"-"`
|
||||
|
||||
|
@ -166,6 +186,7 @@ type scData struct {
|
|||
}
|
||||
|
||||
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
|
||||
addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true)
|
||||
sd := &scData{
|
||||
rawConnectivityState: connectivity.Idle,
|
||||
effectiveState: connectivity.Idle,
|
||||
|
@ -199,7 +220,7 @@ type pickfirstBalancer struct {
|
|||
// updates.
|
||||
state connectivity.State
|
||||
// scData for active subonns mapped by address.
|
||||
subConns *resolver.AddressMap
|
||||
subConns *resolver.AddressMapV2[*scData]
|
||||
addressList addressList
|
||||
firstPass bool
|
||||
numTF int
|
||||
|
@ -298,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||
prevAddr := b.addressList.currentAddress()
|
||||
prevSCData, found := b.subConns.Get(prevAddr)
|
||||
prevAddrsCount := b.addressList.size()
|
||||
isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
|
||||
isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
|
||||
b.addressList.updateAddrs(newAddrs)
|
||||
|
||||
// If the previous ready SubConn exists in new address list,
|
||||
|
@ -360,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() {
|
|||
b.numTF = 0
|
||||
// Reset the connection attempt record for existing SubConns.
|
||||
for _, sd := range b.subConns.Values() {
|
||||
sd.(*scData).connectionFailedInFirstPass = false
|
||||
sd.connectionFailedInFirstPass = false
|
||||
}
|
||||
b.requestConnectionLocked()
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) closeSubConnsLocked() {
|
||||
for _, sd := range b.subConns.Values() {
|
||||
sd.(*scData).subConn.Shutdown()
|
||||
sd.subConn.Shutdown()
|
||||
}
|
||||
b.subConns = resolver.NewAddressMap()
|
||||
b.subConns = resolver.NewAddressMapV2[*scData]()
|
||||
}
|
||||
|
||||
// deDupAddresses ensures that each address appears only once in the slice.
|
||||
func deDupAddresses(addrs []resolver.Address) []resolver.Address {
|
||||
seenAddrs := resolver.NewAddressMap()
|
||||
seenAddrs := resolver.NewAddressMapV2[*scData]()
|
||||
retAddrs := []resolver.Address{}
|
||||
|
||||
for _, addr := range addrs {
|
||||
|
@ -460,7 +481,7 @@ func addressFamily(address string) ipAddrFamily {
|
|||
// This ensures that the subchannel map accurately reflects the current set of
|
||||
// addresses received from the name resolver.
|
||||
func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
|
||||
newAddrsMap := resolver.NewAddressMap()
|
||||
newAddrsMap := resolver.NewAddressMapV2[bool]()
|
||||
for _, addr := range newAddrs {
|
||||
newAddrsMap.Set(addr, true)
|
||||
}
|
||||
|
@ -470,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
|
|||
continue
|
||||
}
|
||||
val, _ := b.subConns.Get(oldAddr)
|
||||
val.(*scData).subConn.Shutdown()
|
||||
val.subConn.Shutdown()
|
||||
b.subConns.Delete(oldAddr)
|
||||
}
|
||||
}
|
||||
|
@ -479,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
|
|||
// becomes ready, which means that all other subConn must be shutdown.
|
||||
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
|
||||
b.cancelConnectionTimer()
|
||||
for _, v := range b.subConns.Values() {
|
||||
sd := v.(*scData)
|
||||
for _, sd := range b.subConns.Values() {
|
||||
if sd.subConn != selected.subConn {
|
||||
sd.subConn.Shutdown()
|
||||
}
|
||||
}
|
||||
b.subConns = resolver.NewAddressMap()
|
||||
b.subConns = resolver.NewAddressMapV2[*scData]()
|
||||
b.subConns.Set(selected.addr, selected)
|
||||
}
|
||||
|
||||
|
@ -518,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
|
|||
b.subConns.Set(curAddr, sd)
|
||||
}
|
||||
|
||||
scd := sd.(*scData)
|
||||
switch scd.rawConnectivityState {
|
||||
switch sd.rawConnectivityState {
|
||||
case connectivity.Idle:
|
||||
scd.subConn.Connect()
|
||||
sd.subConn.Connect()
|
||||
b.scheduleNextConnectionLocked()
|
||||
return
|
||||
case connectivity.TransientFailure:
|
||||
// The SubConn is being re-used and failed during a previous pass
|
||||
// over the addressList. It has not completed backoff yet.
|
||||
// Mark it as having failed and try the next address.
|
||||
scd.connectionFailedInFirstPass = true
|
||||
lastErr = scd.lastErr
|
||||
sd.connectionFailedInFirstPass = true
|
||||
lastErr = sd.lastErr
|
||||
continue
|
||||
case connectivity.Connecting:
|
||||
// Wait for the connection attempt to complete or the timer to fire
|
||||
|
@ -537,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
|
|||
b.scheduleNextConnectionLocked()
|
||||
return
|
||||
default:
|
||||
b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
|
||||
b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
|
||||
return
|
||||
|
||||
}
|
||||
|
@ -732,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
|
|||
}
|
||||
// Connect() has been called on all the SubConns. The first pass can be
|
||||
// ended if all the SubConns have reported a failure.
|
||||
for _, v := range b.subConns.Values() {
|
||||
sd := v.(*scData)
|
||||
for _, sd := range b.subConns.Values() {
|
||||
if !sd.connectionFailedInFirstPass {
|
||||
return
|
||||
}
|
||||
|
@ -744,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
|
|||
Picker: &picker{err: lastErr},
|
||||
})
|
||||
// Start re-connecting all the SubConns that are already in IDLE.
|
||||
for _, v := range b.subConns.Values() {
|
||||
sd := v.(*scData)
|
||||
for _, sd := range b.subConns.Values() {
|
||||
if sd.rawConnectivityState == connectivity.Idle {
|
||||
sd.subConn.Connect()
|
||||
}
|
||||
|
@ -906,6 +923,5 @@ func (al *addressList) hasNext() bool {
|
|||
// fields that are meaningful to the SubConn.
|
||||
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
|
||||
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
|
||||
a.Attributes.Equal(b.Attributes) &&
|
||||
a.Metadata == b.Metadata
|
||||
a.Attributes.Equal(b.Attributes)
|
||||
}
|
||||
|
|
|
@ -22,12 +22,13 @@
|
|||
package roundrobin
|
||||
|
||||
import (
|
||||
rand "math/rand/v2"
|
||||
"sync/atomic"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/balancer/endpointsharding"
|
||||
"google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
|
@ -35,47 +36,44 @@ const Name = "round_robin"
|
|||
|
||||
var logger = grpclog.Component("roundrobin")
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newBuilder())
|
||||
balancer.Register(builder{})
|
||||
}
|
||||
|
||||
type rrPickerBuilder struct{}
|
||||
type builder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
logger.Infof("roundrobinPicker: Build called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
func (bb builder) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||
childBuilder := balancer.Get(pickfirstleaf.Name).Build
|
||||
bal := &rrBalancer{
|
||||
cc: cc,
|
||||
Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
|
||||
}
|
||||
scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
|
||||
for sc := range info.ReadySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
subConns: scs,
|
||||
// Start at a random index, as the same RR balancer rebuilds a new
|
||||
// picker when SubConn states change, and we don't want to apply excess
|
||||
// load to the first server in the list.
|
||||
next: uint32(rand.IntN(len(scs))),
|
||||
bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
|
||||
bal.logger.Infof("Created")
|
||||
return bal
|
||||
}
|
||||
|
||||
type rrBalancer struct {
|
||||
balancer.Balancer
|
||||
cc balancer.ClientConn
|
||||
logger *internalgrpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
|
||||
return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
|
||||
// Enable the health listener in pickfirst children for client side health
|
||||
// checks and outlier detection, if configured.
|
||||
ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
|
||||
})
|
||||
}
|
||||
|
||||
func (b *rrBalancer) ExitIdle() {
|
||||
// Should always be ok, as child is endpoint sharding.
|
||||
if ei, ok := b.Balancer.(balancer.ExitIdler); ok {
|
||||
ei.ExitIdle()
|
||||
}
|
||||
}
|
||||
|
||||
type rrPicker struct {
|
||||
// subConns is the snapshot of the roundrobin balancer when this picker was
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
next uint32
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
subConnsLen := uint32(len(p.subConns))
|
||||
nextIndex := atomic.AddUint32(&p.next, 1)
|
||||
|
||||
sc := p.subConns[nextIndex%subConnsLen]
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ import (
|
|||
// should only use a single address.
|
||||
//
|
||||
// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
|
||||
// by custom load balancing poilices. Users should not need their own complete
|
||||
// by custom load balancing polices. Users should not need their own complete
|
||||
// implementation of this interface -- they should always delegate to a SubConn
|
||||
// returned by ClientConn.NewSubConn() by embedding it in their implementations.
|
||||
// An embedded SubConn must never be nil, or runtime panics will occur.
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/experimental/stats"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
|
@ -34,7 +35,15 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
|
||||
var (
|
||||
setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
|
||||
// noOpRegisterHealthListenerFn is used when client side health checking is
|
||||
// disabled. It sends a single READY update on the registered listener.
|
||||
noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
|
||||
listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||
return func() {}
|
||||
}
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
//
|
||||
|
@ -51,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt
|
|||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
internal.EnforceClientConnEmbedding
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
|
@ -84,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
|
|||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||
ChannelzParent: cc.channelz,
|
||||
Target: cc.parsedTarget,
|
||||
MetricsRecorder: cc.metricsRecorderList,
|
||||
},
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
|
@ -93,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
|
|||
return ccb
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
|
||||
return ccb.cc.metricsRecorderList
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer. This is always executed from the serializer, so
|
||||
// it is safe to call into the balancer here.
|
||||
|
@ -277,10 +290,17 @@ type healthData struct {
|
|||
// to the LB policy. This is stored to avoid sending updates when the
|
||||
// SubConn has already exited connectivity state READY.
|
||||
connectivityState connectivity.State
|
||||
// closeHealthProducer stores function to close the ref counted health
|
||||
// producer. The health producer is automatically closed when the SubConn
|
||||
// state changes.
|
||||
closeHealthProducer func()
|
||||
}
|
||||
|
||||
func newHealthData(s connectivity.State) *healthData {
|
||||
return &healthData{connectivityState: s}
|
||||
return &healthData{
|
||||
connectivityState: s,
|
||||
closeHealthProducer: func() {},
|
||||
}
|
||||
}
|
||||
|
||||
// updateState is invoked by grpc to push a subConn state update to the
|
||||
|
@ -400,7 +420,7 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
|
|||
}
|
||||
acbw.producersMu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
return pData.producer, sync.OnceFunc(unref)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) closeProducers() {
|
||||
|
@ -413,6 +433,37 @@ func (acbw *acBalancerWrapper) closeProducers() {
|
|||
}
|
||||
}
|
||||
|
||||
// healthProducerRegisterFn is a type alias for the health producer's function
|
||||
// for registering listeners.
|
||||
type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
|
||||
|
||||
// healthListenerRegFn returns a function to register a listener for health
|
||||
// updates. If client side health checks are disabled, the registered listener
|
||||
// will get a single READY (raw connectivity state) update.
|
||||
//
|
||||
// Client side health checking is enabled when all the following
|
||||
// conditions are satisfied:
|
||||
// 1. Health checking is not disabled using the dial option.
|
||||
// 2. The health package is imported.
|
||||
// 3. The health check config is present in the service config.
|
||||
func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
|
||||
if acbw.ccb.cc.dopts.disableHealthCheck {
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
regHealthLisFn := internal.RegisterClientHealthCheckListener
|
||||
if regHealthLisFn == nil {
|
||||
// The health package is not imported.
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
cfg := acbw.ac.cc.healthCheckConfig()
|
||||
if cfg == nil {
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
|
||||
return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterHealthListener accepts a health listener from the LB policy. It sends
|
||||
// updates to the health listener as long as the SubConn's connectivity state
|
||||
// doesn't change and a new health listener is not registered. To invalidate
|
||||
|
@ -421,6 +472,7 @@ func (acbw *acBalancerWrapper) closeProducers() {
|
|||
func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
acbw.healthData.closeHealthProducer()
|
||||
// listeners should not be registered when the connectivity state
|
||||
// isn't Ready. This may happen when the balancer registers a listener
|
||||
// after the connectivityState is updated, but before it is notified
|
||||
|
@ -436,6 +488,7 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub
|
|||
return
|
||||
}
|
||||
|
||||
registerFn := acbw.healthListenerRegFn()
|
||||
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
|
@ -443,10 +496,25 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub
|
|||
// Don't send updates if a new listener is registered.
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
curHD := acbw.healthData
|
||||
if curHD != hd {
|
||||
if acbw.healthData != hd {
|
||||
return
|
||||
}
|
||||
listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||
// Serialize the health updates from the health producer with
|
||||
// other calls into the LB policy.
|
||||
listenerWrapper := func(scs balancer.SubConnState) {
|
||||
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
if acbw.healthData != hd {
|
||||
return
|
||||
}
|
||||
listener(scs)
|
||||
})
|
||||
}
|
||||
|
||||
hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc v5.27.1
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
|||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) {
|
|||
|
||||
// Log entry we store in binary logs
|
||||
type GrpcLogEntry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// The timestamp of the binary log message
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
|
||||
|
@ -255,7 +253,7 @@ type GrpcLogEntry struct {
|
|||
// The logger uses one of the following fields to record the payload,
|
||||
// according to the type of the log entry.
|
||||
//
|
||||
// Types that are assignable to Payload:
|
||||
// Types that are valid to be assigned to Payload:
|
||||
//
|
||||
// *GrpcLogEntry_ClientHeader
|
||||
// *GrpcLogEntry_ServerHeader
|
||||
|
@ -269,7 +267,9 @@ type GrpcLogEntry struct {
|
|||
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
|
||||
// the case of trailers-only. On server side, peer is always
|
||||
// logged on EVENT_TYPE_CLIENT_HEADER.
|
||||
Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
|
||||
Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GrpcLogEntry) Reset() {
|
||||
|
@ -337,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
|
|||
return GrpcLogEntry_LOGGER_UNKNOWN
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
|
||||
if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
|
||||
return x.ClientHeader
|
||||
if x != nil {
|
||||
if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
|
||||
return x.ClientHeader
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
|
||||
if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
|
||||
return x.ServerHeader
|
||||
if x != nil {
|
||||
if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
|
||||
return x.ServerHeader
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GrpcLogEntry) GetMessage() *Message {
|
||||
if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
|
||||
return x.Message
|
||||
if x != nil {
|
||||
if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
|
||||
return x.Message
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GrpcLogEntry) GetTrailer() *Trailer {
|
||||
if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
|
||||
return x.Trailer
|
||||
if x != nil {
|
||||
if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
|
||||
return x.Trailer
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -416,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
|
|||
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
|
||||
|
||||
type ClientHeader struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
// The name of the RPC method, which looks something like:
|
||||
|
@ -433,7 +438,9 @@ type ClientHeader struct {
|
|||
// <host> or <host>:<port> .
|
||||
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
|
||||
// the RPC timeout
|
||||
Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||
Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ClientHeader) Reset() {
|
||||
|
@ -495,12 +502,11 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration {
|
|||
}
|
||||
|
||||
type ServerHeader struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *ServerHeader) Reset() {
|
||||
|
@ -541,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata {
|
|||
}
|
||||
|
||||
type Trailer struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
// The gRPC status code.
|
||||
|
@ -555,6 +558,8 @@ type Trailer struct {
|
|||
// The value of the 'grpc-status-details-bin' metadata key. If
|
||||
// present, this is always an encoded 'google.rpc.Status' message.
|
||||
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Trailer) Reset() {
|
||||
|
@ -617,15 +622,14 @@ func (x *Trailer) GetStatusDetails() []byte {
|
|||
|
||||
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
|
||||
type Message struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// Length of the message. It may not be the same as the length of the
|
||||
// data field, as the logging payload can be truncated or omitted.
|
||||
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
|
||||
// May be truncated or omitted.
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Message) Reset() {
|
||||
|
@ -694,11 +698,10 @@ func (x *Message) GetData() []byte {
|
|||
// header is just a normal metadata key.
|
||||
// The pair will not count towards the size limit.
|
||||
type Metadata struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Metadata) Reset() {
|
||||
|
@ -740,12 +743,11 @@ func (x *Metadata) GetEntry() []*MetadataEntry {
|
|||
|
||||
// A metadata key value pair
|
||||
type MetadataEntry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *MetadataEntry) Reset() {
|
||||
|
@ -794,14 +796,13 @@ func (x *MetadataEntry) GetValue() []byte {
|
|||
|
||||
// Address information
|
||||
type Address struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
|
||||
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
|
||||
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||
// only for TYPE_IPV4 and TYPE_IPV6
|
||||
IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
|
||||
IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *Address) Reset() {
|
||||
|
@ -857,142 +858,77 @@ func (x *Address) GetIpPort() uint32 {
|
|||
|
||||
var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
|
||||
0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
|
||||
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
|
||||
0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
|
||||
0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
|
||||
0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
|
||||
0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
|
||||
0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
|
||||
0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
|
||||
0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
|
||||
0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
|
||||
0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
|
||||
0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
|
||||
0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
|
||||
0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
|
||||
0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
|
||||
0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
|
||||
0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
|
||||
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
|
||||
0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
|
||||
0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
|
||||
0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
|
||||
0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
|
||||
0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
|
||||
0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
|
||||
0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
|
||||
0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
|
||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
|
||||
0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
|
||||
0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
|
||||
0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
|
||||
0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
|
||||
0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
|
||||
0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
|
||||
0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
|
||||
0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
|
||||
0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
|
||||
0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
|
||||
0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
|
||||
0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
|
||||
0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
|
||||
0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
|
||||
0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
|
||||
0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
|
||||
0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
|
||||
0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
|
||||
0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
|
||||
0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
|
||||
0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
|
||||
0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||
0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
|
||||
0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
|
||||
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
|
||||
0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
|
||||
0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
|
||||
0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
|
||||
0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
|
||||
0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
|
||||
0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
|
||||
0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
|
||||
0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
|
||||
0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
|
||||
0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
|
||||
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
|
||||
0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
|
||||
0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
|
||||
0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
|
||||
0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||
0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
|
||||
0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
|
||||
0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
|
||||
0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
|
||||
0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
|
||||
0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
|
||||
0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
|
||||
0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
|
||||
0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
|
||||
0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
|
||||
0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
|
||||
0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
|
||||
0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
|
||||
0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
|
||||
0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
|
||||
0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" +
|
||||
"\fGrpcLogEntry\x128\n" +
|
||||
"\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" +
|
||||
"\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" +
|
||||
"\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" +
|
||||
"\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" +
|
||||
"\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" +
|
||||
"\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" +
|
||||
"\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" +
|
||||
"\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" +
|
||||
"\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" +
|
||||
"\x11payload_truncated\x18\n" +
|
||||
" \x01(\bR\x10payloadTruncated\x12.\n" +
|
||||
"\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" +
|
||||
"\tEventType\x12\x16\n" +
|
||||
"\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" +
|
||||
"\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" +
|
||||
"\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" +
|
||||
"\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" +
|
||||
"\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" +
|
||||
"\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" +
|
||||
"\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" +
|
||||
"\x11EVENT_TYPE_CANCEL\x10\a\"B\n" +
|
||||
"\x06Logger\x12\x12\n" +
|
||||
"\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" +
|
||||
"\rLOGGER_CLIENT\x10\x01\x12\x11\n" +
|
||||
"\rLOGGER_SERVER\x10\x02B\t\n" +
|
||||
"\apayload\"\xbb\x01\n" +
|
||||
"\fClientHeader\x127\n" +
|
||||
"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
|
||||
"\vmethod_name\x18\x02 \x01(\tR\n" +
|
||||
"methodName\x12\x1c\n" +
|
||||
"\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" +
|
||||
"\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" +
|
||||
"\fServerHeader\x127\n" +
|
||||
"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" +
|
||||
"\aTrailer\x127\n" +
|
||||
"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
|
||||
"\vstatus_code\x18\x02 \x01(\rR\n" +
|
||||
"statusCode\x12%\n" +
|
||||
"\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" +
|
||||
"\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" +
|
||||
"\aMessage\x12\x16\n" +
|
||||
"\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" +
|
||||
"\x04data\x18\x02 \x01(\fR\x04data\"B\n" +
|
||||
"\bMetadata\x126\n" +
|
||||
"\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" +
|
||||
"\rMetadataEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
||||
"\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" +
|
||||
"\aAddress\x123\n" +
|
||||
"\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" +
|
||||
"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" +
|
||||
"\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" +
|
||||
"\x04Type\x12\x10\n" +
|
||||
"\fTYPE_UNKNOWN\x10\x00\x12\r\n" +
|
||||
"\tTYPE_IPV4\x10\x01\x12\r\n" +
|
||||
"\tTYPE_IPV6\x10\x02\x12\r\n" +
|
||||
"\tTYPE_UNIX\x10\x03B\\\n" +
|
||||
"\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
|
||||
})
|
||||
return file_grpc_binlog_v1_binarylog_proto_rawDescData
|
||||
}
|
||||
|
@ -1051,7 +987,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
|
|||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
|
||||
NumEnums: 3,
|
||||
NumMessages: 8,
|
||||
NumExtensions: 0,
|
||||
|
@ -1063,7 +999,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
|
|||
MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_binlog_v1_binarylog_proto = out.File
|
||||
file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
|
||||
file_grpc_binlog_v1_binarylog_proto_goTypes = nil
|
||||
file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
|
|||
|
||||
// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
|
||||
// is performed. Use of the ClientConn for RPCs will automatically cause it to
|
||||
// connect. Connect may be used to manually create a connection, but for most
|
||||
// users this is unnecessary.
|
||||
// connect. The Connect method may be called to manually create a connection,
|
||||
// but for most users this should be unnecessary.
|
||||
//
|
||||
// The target name syntax is defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns
|
||||
// resolver, a "dns:///" prefix should be applied to the target.
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns
|
||||
// name resolver, a "dns:///" prefix may be applied to the target. The default
|
||||
// name resolver will be used if no scheme is detected, or if the parsed scheme
|
||||
// is not a registered name resolver. The default resolver is "dns" but can be
|
||||
// overridden using the resolver package's SetDefaultScheme.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// - "foo.googleapis.com:8080"
|
||||
// - "dns:///foo.googleapis.com:8080"
|
||||
// - "dns:///foo.googleapis.com"
|
||||
// - "dns:///10.0.0.213:8080"
|
||||
// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
|
||||
// - "dns://8.8.8.8/foo.googleapis.com:8080"
|
||||
// - "dns://8.8.8.8/foo.googleapis.com"
|
||||
// - "zookeeper://zk.example.com:9900/example_service"
|
||||
//
|
||||
// The DialOptions returned by WithBlock, WithTimeout,
|
||||
// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
|
||||
|
@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
|
|||
}
|
||||
cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
|
||||
}
|
||||
cc.mkp = cc.dopts.copts.KeepaliveParams
|
||||
cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
|
||||
|
||||
if err = cc.initAuthority(); err != nil {
|
||||
return nil, err
|
||||
|
@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
|||
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
||||
// At the end of this method, we kick the channel out of idle, rather than
|
||||
// waiting for the first rpc.
|
||||
opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
|
||||
//
|
||||
// WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
|
||||
// preserves behavior: when default scheme passthrough is used, skip
|
||||
// hostname resolution, when "dns" is used for resolution, perform
|
||||
// resolution on the client.
|
||||
opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
|
||||
cc, err := NewClient(target, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -618,7 +637,7 @@ type ClientConn struct {
|
|||
balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
|
||||
sc *ServiceConfig // Latest service config received from the resolver.
|
||||
conns map[*addrConn]struct{} // Set to nil on close.
|
||||
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
||||
keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
||||
// firstResolveEvent is used to track whether the name resolver sent us at
|
||||
// least one update. RPCs block on this event. May be accessed without mu
|
||||
// if we know we cannot be asked to enter idle mode while accessing it (e.g.
|
||||
|
@ -670,22 +689,31 @@ func (cc *ClientConn) Connect() {
|
|||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
// waitForResolvedAddrs blocks until the resolver has provided addresses or the
|
||||
// context expires. Returns nil unless the context expires first; otherwise
|
||||
// returns a status error based on the context.
|
||||
func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
|
||||
// waitForResolvedAddrs blocks until the resolver provides addresses or the
|
||||
// context expires, whichever happens first.
|
||||
//
|
||||
// Error is nil unless the context expires first; otherwise returns a status
|
||||
// error based on the context.
|
||||
//
|
||||
// The returned boolean indicates whether it did block or not. If the
|
||||
// resolution has already happened once before, it returns false without
|
||||
// blocking. Otherwise, it wait for the resolution and return true if
|
||||
// resolution has succeeded or return false along with error if resolution has
|
||||
// failed.
|
||||
func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) {
|
||||
// This is on the RPC path, so we use a fast path to avoid the
|
||||
// more-expensive "select" below after the resolver has returned once.
|
||||
if cc.firstResolveEvent.HasFired() {
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
internal.NewStreamWaitingForResolver()
|
||||
select {
|
||||
case <-cc.firstResolveEvent.Done():
|
||||
return nil
|
||||
return true, nil
|
||||
case <-ctx.Done():
|
||||
return status.FromContextError(ctx.Err()).Err()
|
||||
return false, status.FromContextError(ctx.Err()).Err()
|
||||
case <-cc.ctx.Done():
|
||||
return ErrClientConnClosing
|
||||
return false, ErrClientConnClosing
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -867,7 +895,13 @@ func (cc *ClientConn) Target() string {
|
|||
return cc.target
|
||||
}
|
||||
|
||||
// CanonicalTarget returns the canonical target string of the ClientConn.
|
||||
// CanonicalTarget returns the canonical target string used when creating cc.
|
||||
//
|
||||
// This always has the form "<scheme>://[authority]/<endpoint>". For example:
|
||||
//
|
||||
// - "dns:///example.com:42"
|
||||
// - "dns://8.8.8.8/example.com:42"
|
||||
// - "unix:///path/to/socket"
|
||||
func (cc *ClientConn) CanonicalTarget() string {
|
||||
return cc.parsedTarget.String()
|
||||
}
|
||||
|
@ -1206,12 +1240,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
|
|||
// adjustParams updates parameters used to create transports upon
|
||||
// receiving a GoAway.
|
||||
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
|
||||
switch r {
|
||||
case transport.GoAwayTooManyPings:
|
||||
if r == transport.GoAwayTooManyPings {
|
||||
v := 2 * ac.dopts.copts.KeepaliveParams.Time
|
||||
ac.cc.mu.Lock()
|
||||
if v > ac.cc.mkp.Time {
|
||||
ac.cc.mkp.Time = v
|
||||
if v > ac.cc.keepaliveParams.Time {
|
||||
ac.cc.keepaliveParams.Time = v
|
||||
}
|
||||
ac.cc.mu.Unlock()
|
||||
}
|
||||
|
@ -1307,7 +1340,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c
|
|||
ac.mu.Lock()
|
||||
|
||||
ac.cc.mu.RLock()
|
||||
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
|
||||
ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
|
||||
ac.cc.mu.RUnlock()
|
||||
|
||||
copts := ac.dopts.copts
|
||||
|
|
|
@ -120,6 +120,20 @@ type AuthInfo interface {
|
|||
AuthType() string
|
||||
}
|
||||
|
||||
// AuthorityValidator validates the authority used to override the `:authority`
|
||||
// header. This is an optional interface that implementations of AuthInfo can
|
||||
// implement if they support per-RPC authority overrides. It is invoked when the
|
||||
// application attempts to override the HTTP/2 `:authority` header using the
|
||||
// CallAuthority call option.
|
||||
type AuthorityValidator interface {
|
||||
// ValidateAuthority checks the authority value used to override the
|
||||
// `:authority` header. The authority parameter is the override value
|
||||
// provided by the application via the CallAuthority option. This value
|
||||
// typically corresponds to the server hostname or endpoint the RPC is
|
||||
// targeting. It returns non-nil error if the validation fails.
|
||||
ValidateAuthority(authority string) error
|
||||
}
|
||||
|
||||
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||
// and the caller should not close rawConn.
|
||||
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
|
@ -207,14 +221,32 @@ type RequestInfo struct {
|
|||
AuthInfo AuthInfo
|
||||
}
|
||||
|
||||
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
|
||||
// context.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
|
||||
//
|
||||
// This API is experimental.
|
||||
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||
ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
|
||||
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
|
||||
return ri, ok
|
||||
}
|
||||
|
||||
// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it.
|
||||
//
|
||||
// This RequestInfo will be accessible via RequestInfoFromContext.
|
||||
//
|
||||
// Intended to be used from tests for PerRPCCredentials implementations (that
|
||||
// often need to check connection's SecurityLevel). Should not be used from
|
||||
// non-test code: the gRPC client already prepares a context with the correct
|
||||
// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata.
|
||||
//
|
||||
// This API is experimental.
|
||||
func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
|
||||
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
|
||||
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
|
||||
// balancer etc. Individual credential implementations control the actual
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
// NewCredentials returns a credentials which disables transport security.
|
||||
//
|
||||
// Note that using this credentials with per-RPC credentials which require
|
||||
// transport security is incompatible and will cause grpc.Dial() to fail.
|
||||
// transport security is incompatible and will cause RPCs to fail.
|
||||
func NewCredentials() credentials.TransportCredentials {
|
||||
return insecureTC{}
|
||||
}
|
||||
|
@ -71,6 +71,12 @@ func (info) AuthType() string {
|
|||
return "insecure"
|
||||
}
|
||||
|
||||
// ValidateAuthority allows any value to be overridden for the :authority
|
||||
// header.
|
||||
func (info) ValidateAuthority(string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// insecureBundle implements an insecure bundle.
|
||||
// An insecure bundle provides a thin wrapper around insecureTC to support
|
||||
// the credentials.Bundle interface.
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
@ -32,6 +33,8 @@ import (
|
|||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
|
@ -48,6 +51,21 @@ func (t TLSInfo) AuthType() string {
|
|||
return "tls"
|
||||
}
|
||||
|
||||
// ValidateAuthority validates the provided authority being used to override the
|
||||
// :authority header by verifying it against the peer certificates. It returns a
|
||||
// non-nil error if the validation fails.
|
||||
func (t TLSInfo) ValidateAuthority(authority string) error {
|
||||
var errs []error
|
||||
for _, cert := range t.State.PeerCertificates {
|
||||
var err error
|
||||
if err = cert.VerifyHostname(authority); err == nil {
|
||||
return nil
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...))
|
||||
}
|
||||
|
||||
// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
|
||||
func cipherSuiteLookup(cipherSuiteID uint16) string {
|
||||
for _, s := range tls.CipherSuites() {
|
||||
|
@ -128,7 +146,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
|
|||
if np == "" {
|
||||
if envconfig.EnforceALPNEnabled {
|
||||
conn.Close()
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
|
||||
}
|
||||
logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
|
||||
}
|
||||
|
@ -158,7 +176,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
|||
if cs.NegotiatedProtocol == "" {
|
||||
if envconfig.EnforceALPNEnabled {
|
||||
conn.Close()
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
|
||||
return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
|
||||
} else if logger.V(2) {
|
||||
logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ type dialOptions struct {
|
|||
chainUnaryInts []UnaryClientInterceptor
|
||||
chainStreamInts []StreamClientInterceptor
|
||||
|
||||
cp Compressor
|
||||
compressorV0 Compressor
|
||||
dc Decompressor
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
|
@ -94,6 +94,8 @@ type dialOptions struct {
|
|||
idleTimeout time.Duration
|
||||
defaultScheme string
|
||||
maxCallAttempts int
|
||||
enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled.
|
||||
useProxy bool // Specifies if a server should be connected via proxy.
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
|
@ -256,7 +258,7 @@ func WithCodec(c Codec) DialOption {
|
|||
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
o.compressorV0 = cp
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -358,7 +360,7 @@ func WithReturnConnectionError() DialOption {
|
|||
//
|
||||
// Note that using this DialOption with per-RPC credentials (through
|
||||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||
// security is incompatible and will cause grpc.Dial() to fail.
|
||||
// security is incompatible and will cause RPCs to fail.
|
||||
//
|
||||
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
|
||||
// instead. Will be supported throughout 1.x.
|
||||
|
@ -377,7 +379,22 @@ func WithInsecure() DialOption {
|
|||
// later release.
|
||||
func WithNoProxy() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.UseProxy = false
|
||||
o.useProxy = false
|
||||
})
|
||||
}
|
||||
|
||||
// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
|
||||
// specified in the environment. By default, the server name is provided
|
||||
// directly to the proxy as part of the CONNECT handshake. This is ignored if
|
||||
// WithNoProxy is used.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithLocalDNSResolution() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.enableLocalDNSResolution = true
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -428,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption {
|
|||
// returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||
// should try to reconnect to the network address.
|
||||
//
|
||||
// Note that gRPC by default performs name resolution on the target passed to
|
||||
// NewClient. To bypass name resolution and cause the target string to be
|
||||
// passed directly to the dialer here instead, use the "passthrough" resolver
|
||||
// by specifying it in the target string, e.g. "passthrough:target".
|
||||
//
|
||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
||||
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
|
||||
|
@ -662,14 +684,15 @@ func defaultDialOptions() dialOptions {
|
|||
copts: transport.ConnectOptions{
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
UseProxy: true,
|
||||
UserAgent: grpcUA,
|
||||
BufferPool: mem.DefaultBufferPool(),
|
||||
},
|
||||
bs: internalbackoff.DefaultExponential,
|
||||
idleTimeout: 30 * time.Minute,
|
||||
defaultScheme: "dns",
|
||||
maxCallAttempts: defaultMaxCallAttempts,
|
||||
bs: internalbackoff.DefaultExponential,
|
||||
idleTimeout: 30 * time.Minute,
|
||||
defaultScheme: "dns",
|
||||
maxCallAttempts: defaultMaxCallAttempts,
|
||||
useProxy: true,
|
||||
enableLocalDNSResolution: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
// Package stats contains experimental metrics/stats API's.
|
||||
package stats
|
||||
|
||||
import "google.golang.org/grpc/stats"
|
||||
|
||||
// MetricsRecorder records on metrics derived from metric registry.
|
||||
type MetricsRecorder interface {
|
||||
// RecordInt64Count records the measurement alongside labels on the int
|
||||
|
@ -37,3 +39,16 @@ type MetricsRecorder interface {
|
|||
// gauge associated with the provided handle.
|
||||
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
|
||||
}
|
||||
|
||||
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
|
||||
// Metrics will be deleted in a future release.
|
||||
type Metrics = stats.MetricSet
|
||||
|
||||
// Metric was replaced by direct usage of strings.
|
||||
type Metric = string
|
||||
|
||||
// NewMetrics is an experimental legacy alias of the now-stable
|
||||
// stats.NewMetricSet. NewMetrics will be deleted in a future release.
|
||||
func NewMetrics(metrics ...Metric) *Metrics {
|
||||
return stats.NewMetricSet(metrics...)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc v5.27.1
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
|
@ -28,6 +28,7 @@ import (
|
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -90,11 +91,10 @@ func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
|||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthCheckRequest) Reset() {
|
||||
|
@ -135,11 +135,10 @@ func (x *HealthCheckRequest) GetService() string {
|
|||
}
|
||||
|
||||
type HealthCheckResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthCheckResponse) Reset() {
|
||||
|
@ -179,76 +178,150 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
|||
return HealthCheckResponse_UNKNOWN
|
||||
}
|
||||
|
||||
type HealthListRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthListRequest) Reset() {
|
||||
*x = HealthListRequest{}
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthListRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthListRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HealthListRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HealthListRequest) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
type HealthListResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
// statuses contains all the services and their respective status.
|
||||
Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HealthListResponse) Reset() {
|
||||
*x = HealthListResponse{}
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HealthListResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthListResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HealthListResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HealthListResponse) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse {
|
||||
if x != nil {
|
||||
return x.Statuses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_health_v1_health_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
|
||||
0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
|
||||
0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
|
||||
0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
|
||||
0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
|
||||
0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
|
||||
0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
|
||||
0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
|
||||
0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
|
||||
0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
|
||||
0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
|
||||
0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
|
||||
0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
const file_grpc_health_v1_health_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" +
|
||||
"\x12HealthCheckRequest\x12\x18\n" +
|
||||
"\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" +
|
||||
"\x13HealthCheckResponse\x12I\n" +
|
||||
"\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" +
|
||||
"\rServingStatus\x12\v\n" +
|
||||
"\aUNKNOWN\x10\x00\x12\v\n" +
|
||||
"\aSERVING\x10\x01\x12\x0f\n" +
|
||||
"\vNOT_SERVING\x10\x02\x12\x13\n" +
|
||||
"\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" +
|
||||
"\x11HealthListRequest\"\xc4\x01\n" +
|
||||
"\x12HealthListResponse\x12L\n" +
|
||||
"\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" +
|
||||
"\rStatusesEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x129\n" +
|
||||
"\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" +
|
||||
"\x06Health\x12P\n" +
|
||||
"\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" +
|
||||
"\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" +
|
||||
"\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" +
|
||||
"\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3"
|
||||
|
||||
var (
|
||||
file_grpc_health_v1_health_proto_rawDescOnce sync.Once
|
||||
file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
|
||||
file_grpc_health_v1_health_proto_rawDescData []byte
|
||||
)
|
||||
|
||||
func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
|
||||
file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
|
||||
file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)))
|
||||
})
|
||||
return file_grpc_health_v1_health_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_grpc_health_v1_health_proto_goTypes = []any{
|
||||
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
|
||||
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
|
||||
(*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest
|
||||
(*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse
|
||||
nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry
|
||||
}
|
||||
var file_grpc_health_v1_health_proto_depIdxs = []int32{
|
||||
0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
3, // [3:5] is the sub-list for method output_type
|
||||
1, // [1:3] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry
|
||||
2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse
|
||||
1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest
|
||||
1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse
|
||||
2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
6, // [6:9] is the sub-list for method output_type
|
||||
3, // [3:6] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_grpc_health_v1_health_proto_init() }
|
||||
|
@ -260,9 +333,9 @@ func file_grpc_health_v1_health_proto_init() {
|
|||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)),
|
||||
NumEnums: 1,
|
||||
NumMessages: 2,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
@ -272,7 +345,6 @@ func file_grpc_health_v1_health_proto_init() {
|
|||
MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_health_v1_health_proto = out.File
|
||||
file_grpc_health_v1_health_proto_rawDesc = nil
|
||||
file_grpc_health_v1_health_proto_goTypes = nil
|
||||
file_grpc_health_v1_health_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9
|
|||
|
||||
const (
|
||||
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
|
||||
Health_List_FullMethodName = "/grpc.health.v1.Health/List"
|
||||
Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
|
||||
)
|
||||
|
||||
|
@ -55,9 +56,19 @@ type HealthClient interface {
|
|||
//
|
||||
// Clients should set a deadline when calling Check, and can declare the
|
||||
// server unhealthy if they do not receive a timely response.
|
||||
//
|
||||
// Check implementations should be idempotent and side effect free.
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
// List provides a non-atomic snapshot of the health of all the available
|
||||
// services.
|
||||
//
|
||||
// The server may respond with a RESOURCE_EXHAUSTED error if too many services
|
||||
// exist.
|
||||
//
|
||||
// Clients should set a deadline when calling List, and can declare the server
|
||||
// unhealthy if they do not receive a timely response.
|
||||
//
|
||||
// Clients should keep in mind that the list of health services exposed by an
|
||||
// application can change over the lifetime of the process.
|
||||
List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
|
@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HealthListResponse)
|
||||
err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
|
||||
|
@ -128,9 +149,19 @@ type HealthServer interface {
|
|||
//
|
||||
// Clients should set a deadline when calling Check, and can declare the
|
||||
// server unhealthy if they do not receive a timely response.
|
||||
//
|
||||
// Check implementations should be idempotent and side effect free.
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
// List provides a non-atomic snapshot of the health of all the available
|
||||
// services.
|
||||
//
|
||||
// The server may respond with a RESOURCE_EXHAUSTED error if too many services
|
||||
// exist.
|
||||
//
|
||||
// Clients should set a deadline when calling List, and can declare the server
|
||||
// unhealthy if they do not receive a timely response.
|
||||
//
|
||||
// Clients should keep in mind that the list of health services exposed by an
|
||||
// application can change over the lifetime of the process.
|
||||
List(context.Context, *HealthListRequest) (*HealthListResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
|
@ -159,6 +190,9 @@ type UnimplementedHealthServer struct{}
|
|||
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
|
||||
}
|
||||
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HealthServer).List(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Health_List_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HealthServer).List(ctx, req.(*HealthListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(HealthCheckRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
|
@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "Check",
|
||||
Handler: _Health_Check_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "List",
|
||||
Handler: _Health_List_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
|
|
10
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
10
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
|
@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error
|
|||
return nil, errBalancerClosed
|
||||
}
|
||||
bw := &balancerWrapper{
|
||||
builder: builder,
|
||||
gsb: gsb,
|
||||
ClientConn: gsb.cc,
|
||||
builder: builder,
|
||||
gsb: gsb,
|
||||
lastState: balancer.State{
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
|
@ -293,6 +294,7 @@ func (gsb *Balancer) Close() {
|
|||
// State updates from the wrapped balancer can result in invocation of the
|
||||
// graceful switch logic.
|
||||
type balancerWrapper struct {
|
||||
balancer.ClientConn
|
||||
balancer.Balancer
|
||||
gsb *Balancer
|
||||
builder balancer.Builder
|
||||
|
@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver
|
|||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Target() string {
|
||||
return bw.gsb.cc.Target()
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue