Introducing Conduit, the ultralight service mesh

We’ve built Conduit from the ground up to be the fastest, lightest,
simplest, and most secure service mesh in the world. It features an
incredibly fast and safe data plane written in Rust, a simple yet
powerful control plane written in Go, and a design that’s focused on
performance, security, and usability. Most importantly, Conduit
incorporates the many lessons we’ve learned from over 18 months of
production service mesh experience with Linkerd.

This repository contains a few tightly-related components:
- `proxy` -- an HTTP/2 proxy written in Rust;
- `controller` -- a control plane written in Go with gRPC;
- `web` -- a UI written in React, served by Go.
This commit is contained in:
Oliver Gould 2017-12-05 00:17:02 +00:00
commit b104bd0676
276 changed files with 42058 additions and 0 deletions

15
.dockerignore Normal file
View File

@ -0,0 +1,15 @@
.travis.yml
.git
**/.idea
**/cmake-*
**/CMakeLists.txt
*.iml
bin
**/Dockerfile*
Dockerfile*
**/target
target
!target/docker-build
web/app/node_modules
web/app/dist
vendor

18
.editorconfig Normal file
View File

@ -0,0 +1,18 @@
# top-most EditorConfig file
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8
indent_style = space
indent_size = 4
# Go code uses tabs. Display with 4 space indentation in editors and on GitHub
# (see https://github.com/isaacs/github/issues/170#issuecomment-150489692).
[*.go]
indent_style = tab
[*.yml]
indent_size = 2

BIN
.gcp.json.enc Normal file

Binary file not shown.

4
.gitattributes vendored Normal file
View File

@ -0,0 +1,4 @@
# By default, collapse for these files in in GitHub reviews.
Cargo.lock linguist-generated=true
Gopkg.lock linguist-generated=true
controller/gen/**/*.pb.go linguist-generated=true

13
.gitignore vendored Normal file
View File

@ -0,0 +1,13 @@
**/disco
target
tmp.discovery
**/.idea
**/cmake-*
**/CMakeLists.txt
*.iml
vendor
web/web
web/app/node_modules
web/app/dist
.protoc
.gorun

33
.prometheus.dev.yml Normal file
View File

@ -0,0 +1,33 @@
# used in docker-compose configurations
global:
scrape_interval: 10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'web'
static_configs:
- targets: ['web:9994']
- job_name: 'public-api'
static_configs:
- targets: ['public-api:9995']
- job_name: 'proxy-api'
static_configs:
- targets: ['proxy-api:9996']
- job_name: 'telemetry'
static_configs:
- targets: ['telemetry:9997']
- job_name: 'tap'
static_configs:
- targets: ['tap:9998']
- job_name: 'destination'
static_configs:
- targets: ['destination:9999']

84
.rustfmt.toml Normal file
View File

@ -0,0 +1,84 @@
# https://github.com/rust-lang-nursery/rustfmt/blob/master/Configurations.md
verbose = false
disable_all_formatting = false
skip_children = false
max_width = 100
error_on_line_overflow = false
error_on_line_overflow_comments = false
tab_spaces = 4
fn_call_width = 60
struct_lit_width = 18
struct_variant_width = 35
force_explicit_abi = true
newline_style = "Unix"
fn_brace_style = "SameLineWhere"
item_brace_style = "SameLineWhere"
control_style = "Rfc"
control_brace_style = "AlwaysSameLine"
impl_empty_single_line = true
trailing_comma = "Vertical"
trailing_semicolon = true
fn_empty_single_line = true
fn_single_line = false
fn_return_indent = "WithArgs"
fn_args_paren_newline = false
fn_args_density = "Tall"
fn_args_layout = "Block"
array_layout = "Block"
array_width = 60
array_horizontal_layout_threshold = 0
type_punctuation_density = "Wide"
where_style = "Rfc"
where_density = "CompressedIfEmpty"
where_layout = "Vertical"
where_pred_indent = "Visual"
generics_indent = "Block"
struct_lit_style = "Block"
struct_lit_multiline_style = "ForceMulti"
fn_call_style = "Block"
report_todo = "Never"
report_fixme = "Never"
chain_indent = "Block"
chain_one_line_max = 60
chain_split_single_child = false
imports_indent = "Block"
imports_layout = "HorizontalVertical"
reorder_extern_crates = true
reorder_extern_crates_in_group = true
reorder_imports = true
reorder_imports_in_group = true
reorder_imported_names = true
single_line_if_else_max_width = 50
format_strings = true
force_format_strings = false
take_source_hints = false
hard_tabs = false
wrap_comments = false
comment_width = 80
normalize_comments = false
wrap_match_arms = true
match_block_trailing_comma = true
indent_match_arms = true
match_pattern_separator_break_point = "Back"
closure_block_indent_threshold = 0
space_before_type_annotation = false
space_after_type_annotation_colon = true
space_before_struct_lit_field_colon = false
space_after_struct_lit_field_colon = true
space_before_bound = false
space_after_bound_colon = true
spaces_around_ranges = false
spaces_within_angle_brackets = false
spaces_within_square_brackets = false
spaces_within_parens = false
use_try_shorthand = true
write_mode = "Overwrite"
condense_wildcard_suffixes = false
combine_control_expr = true
struct_field_align_threshold = 0
remove_blank_lines_at_start_or_end_of_block = true
attributes_on_same_line_as_field = true
attributes_on_same_line_as_variant = true
multiline_closure_forces_block = false
multiline_match_arm_forces_block = false
merge_derives = true

109
.travis.yml Normal file
View File

@ -0,0 +1,109 @@
---
dist: trusty
sudo: false
# We do not test pushes to branches, since they are redundant with the pull_request build
# for each branch. Take that, Big CI!
branches:
only:
- master
stages:
- name: test
- name: docker-deploy
if: branch = master AND type != pull_request
jobs:
include:
# Compile the application and run tests.
- stage: test
language: rust
rust: stable
cache:
cargo: true
script:
- cargo check
- cargo test
- language: go
go: 1.9
go_import_path: github.com/runconduit/conduit
cache:
directories:
- vendor
before_install:
- go get -u github.com/golang/dep/cmd/dep
install:
- dep ensure
script:
# TODO decide whether protoc should be committed or not. If so, we shouldn't do
# this or we should error if it dirties the repo.
- ./bin/protoc-go.sh
- go test ./... --run "^(integration_test)"
# Push container images to Google Container Registry.
- stage: docker-deploy
language: generic
services:
- docker
cache:
directories:
- "$HOME/google-cloud-sdk/"
before_install:
- |
# Install docker.
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get -y install docker-ce
- docker version
- |
# Install gcloud and kubectl.
dir="${CLOUDSDK_INSTALL_DIR:-${HOME}}/google-cloud-sdk"
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
if [ -d "$dir/bin" ]; then
. "$dir/path.bash.inc"
gcloud components update
else
rm -rf "$dir"
curl https://sdk.cloud.google.com | bash
. "$dir/path.bash.inc"
fi
gcloud components install kubectl
- |
# Configure gcloud with a service account and a k8s context.
openssl aes-256-cbc \
-K "$encrypted_72832b647e2b_key" -iv "$encrypted_72832b647e2b_iv" \
-in .gcp.json.enc -out .gcp.json -d
gcloud auth activate-service-account --key-file .gcp.json
(. bin/_gcp.sh ; gcp_configure "$GCP_PROJECT" "$GCP_ZONE" "$GKE_CLUSTER")
- gcloud version
- kubectl version --short
before_script:
- gcloud docker --authorize-only
- bin/docker-pull-deps
- |
export CONDUIT_TAG=$(. bin/_tag.sh ; clean_head_root_tag)
echo "CONDUIT_TAG=${CONDUIT_TAG}"
- export PROXY_RELEASE=1 BUILD_DEBUG=1 DOCKER_TRACE=1
script:
- bin/docker-build $CONDUIT_TAG
after_success:
- bin/docker-push-deps
- bin/docker-push $CONDUIT_TAG
- bin/docker-retag-all $CONDUIT_TAG master && bin/docker-push master
- target/cli/linux/conduit install --version=$CONDUIT_TAG |tee conduit.yml
- kubectl -n conduit apply -f conduit.yml --prune --selector='conduit.io/plane=control'
notifications:
email:
on_success: never

1152
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

11
Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[workspace]
members = [
"codegen",
"futures-mpsc-lossy",
"proxy",
"tower-router",
"tower-grpc",
"tower-grpc-build",
"tower-grpc-examples",
"tower-h2",
]

15
Dockerfile-base Normal file
View File

@ -0,0 +1,15 @@
# A base image for runtimes.
#
# This means that all Conduit containers share a common set of tools, and furthermore, they
# are highly cacheable at runtime.
FROM debian:jessie-slim
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
curl \
dnsutils \
iptables \
jq \
nghttp2 \
&& rm -rf /var/lib/apt/lists/*

14
Dockerfile-go-deps Normal file
View File

@ -0,0 +1,14 @@
# A base image including all vendored dependencies, for building go projects
FROM golang:1.9.1
# get dep
RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v0.3.1/dep-linux-amd64 && chmod +x /usr/local/bin/dep
# ensure all dependencies are vendored
WORKDIR /go/src/github.com/runconduit/conduit
COPY . .
RUN dep ensure && dep prune
# remove all of the non dep related files
RUN find . -not -name 'Gopkg*' -not -path './vendor*' -delete

330
Gopkg.lock generated Normal file
View File

@ -0,0 +1,330 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "5a9e19d4e1e41a734154e44a2132b358afb49a03"
version = "v0.13.0"
[[projects]]
name = "github.com/PuerkitoBio/purell"
packages = ["."]
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
[[projects]]
name = "github.com/blang/semver"
packages = ["."]
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
branch = "master"
name = "github.com/coreos/go-oidc"
packages = ["http","jose","key","oauth2","oidc"]
revision = "a4973d9a4225417aecf5d450a9522f00c1f7130f"
[[projects]]
name = "github.com/coreos/pkg"
packages = ["health","httputil","timeutil"]
revision = "3ac0863d7acf3bc44daf49afef8919af12f704ef"
version = "v3"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/docker/distribution"
packages = ["digest","reference"]
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [".","log","swagger"]
revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
version = "v1.2"
[[projects]]
name = "github.com/ghodss/yaml"
packages = ["."]
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto","sortkeys"]
revision = "100ba4e885062801d56799d78530b73b178a78f3"
version = "v0.4"
[[projects]]
branch = "master"
name = "github.com/golang/glog"
packages = ["."]
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["jsonpb","proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/grpc","protoc-gen-go/plugin","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
name = "github.com/grpc-ecosystem/go-grpc-prometheus"
packages = ["."]
revision = "6b7015e65d366bf3f19b2b2a000a831940f0f7e0"
version = "v1.1"
[[projects]]
branch = "master"
name = "github.com/howeyc/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc"
version = "0.2.2"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
name = "github.com/jonboulle/clockwork"
packages = ["."]
revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
[[projects]]
name = "github.com/julienschmidt/httprouter"
packages = ["."]
revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
version = "v1.1"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"]
revision = "2a92e673c9a6302dd05c3a691ae1f24aef46457d"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0"
[[projects]]
name = "github.com/pborman/uuid"
packages = ["."]
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
version = "v1.1"
[[projects]]
name = "github.com/pkg/browser"
packages = ["."]
revision = "c90ca0c84f15f81c982e32665bffd8d7aac8f097"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = ["api","api/prometheus/v1","prometheus","prometheus/promhttp"]
revision = "967789050ba94deca04a5e84cce8ad472ce313c1"
version = "v0.9.0-pre1"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
revision = "1bab55dd05dbff384524a6a1c99006d9eb5f139b"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [".","xfs"]
revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
[[projects]]
name = "github.com/spf13/cobra"
packages = ["."]
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
version = "v1.1.4"
[[projects]]
branch = "master"
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "8c0409fcbb70099c748d71f714529204975f6c3f"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "faadfbdc035307d901e69eea569f5dda451a3ee3"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "b129b8e0fbeb39c8358e51a07ab6c50ad415e72e"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "13449ad91cb26cb47661c1b080790392170385fd"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "062cd7e4e68206d8bab9b18396626e855c992658"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
revision = "acd49d43e9b95df46670431bf5c7ae59586daad8"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "595979c8a7bf586b2d293fb42246bf91a0b893d9"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","stats","status","tap","transport"]
revision = "61d37c5d657a47e4404fd6823bd598341a2595de"
version = "v1.7.1"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]]
branch = "master"
name = "k8s.io/apimachinery"
packages = ["pkg/api/resource","pkg/apis/meta/v1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/selection","pkg/types","pkg/util/errors","pkg/util/intstr","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/watch","third_party/forked/golang/reflect"]
revision = "18a564baac720819100827c16fdebcadb05b2d0d"
[[projects]]
name = "k8s.io/client-go"
packages = ["discovery","kubernetes","kubernetes/typed/apps/v1beta1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1alpha1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/storage/v1beta1","pkg/api","pkg/api/errors","pkg/api/install","pkg/api/meta","pkg/api/meta/metatypes","pkg/api/resource","pkg/api/unversioned","pkg/api/v1","pkg/api/validation/path","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/apps","pkg/apis/apps/install","pkg/apis/apps/v1beta1","pkg/apis/authentication","pkg/apis/authentication/install","pkg/apis/authentication/v1beta1","pkg/apis/authorization","pkg/apis/authorization/install","pkg/apis/authorization/v1beta1","pkg/apis/autoscaling","pkg/apis/autoscaling/install","pkg/apis/autoscaling/v1","pkg/apis/batch","pkg/apis/batch/install","pkg/apis/batch/v1","pkg/apis/batch/v2alpha1","pkg/apis/certificates","pkg/apis/certificates/install","pkg/apis/certificates/v1alpha1","pkg/apis/extensions","pkg/apis/extensions/install","pkg/apis/extensions/v1beta1","pkg/apis/policy","pkg/apis/policy/install","pkg/apis/policy/v1beta1","pkg/apis/rbac","pkg/apis/rbac/install","pkg/apis/rbac/v1alpha1","pkg/apis/storage","pkg/apis/storage/install","pkg/apis/storage/v1beta1","pkg/auth/user","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/genericapiserver/openapi/common","pkg/labels","pkg/runtime","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/third_party/forked/golang/reflect","pkg/third_party/forked/golang/template","pkg/types","pkg/util","pkg/util/cert","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/flowcontrol","pkg/util/framer","pkg/util/homedir","pkg/util/integer","pkg/util/intstr","pkg/util/json","pkg/util/jsonpath","pkg/util/labels","pkg/util/net","pkg/util/parsers","pkg/util/rand","pkg/util/runtime","pkg/util/sets","pkg/util/uuid","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","pkg/watch/versioned","plugin/pkg/client/auth","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","rest","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","transport"]
revision = "e121606b0d09b2e1c467183ee46217fa85a6b672"
version = "v2.0.0"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
revision = "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "f97c917e7e838d8679fc347563a68c30343cd0349aee583e48eefd33ba0d0d1b"
solver-name = "gps-cdcl"
solver-version = 1

25
Gopkg.toml Normal file
View File

@ -0,0 +1,25 @@
required = ["github.com/golang/protobuf/protoc-gen-go"]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.7.0"
[[constraint]]
name = "github.com/spf13/cobra"
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" # cobra has no release tags at time of writing
[[constraint]]
name = "k8s.io/client-go"
version = "2.0.0"
[[constraint]]
name = "github.com/sirupsen/logrus"
version = "v1.0.3"
[[constraint]]
name = "github.com/pkg/browser"
revision = "c90ca0c84f15f81c982e32665bffd8d7aac8f097" # browser has no release tags at time of writing
[[constraint]]
name = "github.com/golang/protobuf"
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" # protobuf has no release tags at time of writing

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

127
bin/_docker.sh Normal file
View File

@ -0,0 +1,127 @@
#!/bin/sh
#
# docker
#
set -eu
. bin/_log.sh
# TODO this should be set to the canonical public docker regsitry; we can override this
# docker regsistry in, for instance, CI.
export DOCKER_REGISTRY="${DOCKER_REGISTRY:-gcr.io/runconduit}"
# When set, causes docker's build output to be emitted to stderr.
export DOCKER_TRACE="${DOCKER_TRACE:-}"
docker_repo() {
repo="$1"
name="$repo"
if [ -n "${DOCKER_REGISTRY:-}" ]; then
name="$DOCKER_REGISTRY/$name"
fi
echo "$name"
}
docker_tags() {
image="$1"
docker image ls "${image}" | sed 1d | awk '{print $2}'
}
docker_build() {
dir="$1"
shift
repo="$1"
shift
tag="$1"
shift
file="$1"
shift
extra="$@"
output="/dev/null"
if [ -n "$DOCKER_TRACE" ]; then
output="/dev/stderr"
fi
# Even when we haven't built an image locally, we can try to use a known prior version
# of the image to prevent rebuilding layers.
if [ -n "${DOCKER_BUILD_CACHE_FROM_TAG:-}" ]; then
if [ -n "$extra" ]; then
extra="$extra "
fi
extra="${extra}--cache-from='$repo:${DOCKER_BUILD_CACHE_FROM_TAG}'"
fi
log_debug " :; docker build $dir -t $repo:$tag -f $file $extra"
docker build "$dir" \
-t "$repo:$tag" \
-f "$file" \
$extra \
> "$output"
echo "$repo:$tag"
}
# Builds a docker image if it doesn't exist and/or can't be found.
#
# If the `tag` is 'latest', an image will always be built.
docker_maybe_build() {
dir="$1"
shift
repo="$1"
shift
tag="$1"
shift
file="$1"
shift
extra="$@"
if [ -z "${DOCKER_FORCE_BUILD:-}" ]; then
docker pull "${repo}:${tag}" >/dev/null 2>&1 || true
for t in $(docker_tags "${repo}:${tag}") ; do
if [ "$t" = "$tag" ]; then
docker tag "${repo}:${tag}" "${repo}:latest" >/dev/null
echo "${repo}:${tag}"
return 0
fi
done
fi
docker_build "$dir" "$repo" "$tag" "$file" $extra
}
docker_pull() {
repo=$(docker_repo "$1")
tag="$2"
log_debug " :; docker pull $repo:$tag"
docker pull "$repo:$tag"
}
docker_push() {
repo=$(docker_repo "$1")
tag="$2"
log_debug " :; docker push $repo:$tag"
docker push "$repo:$tag"
}
docker_retag() {
repo=$(docker_repo "$1")
from="$2"
to="$3"
log_debug " :; docker tag $repo:$from $repo:$to"
docker tag "$repo:$from" "$repo:$to"
echo "$repo:$to"
}

27
bin/_gcp.sh Normal file
View File

@ -0,0 +1,27 @@
#!/bin/sh
#
# gcp -- mostly for CI
#
set -eu
. bin/_log.sh
gcp_configure() {
project="$1"
zone="$2"
cluster="$3"
gcloud config set core/project "$project"
gcloud config set compute/zone "$zone"
gcloud config set container/cluster "$cluster"
for c in $(kubectl config get-clusters |sed 1d) ; do
if [ "$c" = "gke_${project}_${zone}_${cluster}" ]; then
return 0
fi
done
log_debug " :; gcloud container clusters get-credentials $cluster"
gcloud container clusters get-credentials "$cluster"
}

17
bin/_log.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/sh
set -eu
# debug logging is enabled by default and may be disabled with BUILD_DEBUG=
#export BUILD_DEBUG="${BUILD_DEBUG:-}"
export TRACE="${TRACE:-}"
if [ -n "$TRACE" ]; then
set -x
fi
log_debug() {
if [ -z "$TRACE" ] && [ -n "${BUILD_DEBUG:-}" ]; then
echo "$@" >&2
fi
}

33
bin/_tag.sh Normal file
View File

@ -0,0 +1,33 @@
#!/bin/sh
set -eu
git_sha() {
git rev-parse "$1" | cut -c 1-8
}
cargo_sha() {
shasum Cargo.lock | awk '{print $1}' |cut -c 1-8
}
gopkg_sha() {
shasum Gopkg.lock | awk '{print $1}' |cut -c 1-8
}
dir_tag() {
dir="$1"
echo "git-$(git log -n 1 --format="%h" "$dir")"
}
clean_head_root_tag() {
if git diff-index --quiet HEAD -- ; then
echo "git-$(git_sha HEAD)"
else
echo "Commit unstaged changes or set an explicit build tag." >&2
exit 3
fi
}
master_root_tag() {
echo "git-$(git_sha master)"
}

21
bin/docker-build Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
bin/docker-build-controller "$tag"
bin/docker-build-web "$tag"
bin/docker-build-proxy "$tag"
bin/docker-build-proxy-init "$tag"
bin/docker-build-cli "$tag"

14
bin/docker-build-base Executable file
View File

@ -0,0 +1,14 @@
#!/bin/sh
# Builds our base runtime docker image.
set -eu
. bin/_docker.sh
tag="${1:-2017-10-30.01}"
docker_maybe_build . \
"$(docker_repo base)" \
"${tag}" \
Dockerfile-base

24
bin/docker-build-cli Executable file
View File

@ -0,0 +1,24 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
# Build gcr.io/runconduit/cli-bin, which is used by Dockerfile-cli.
bin/docker-build-cli-bin "${tag}" >/dev/null
docker_maybe_build . \
"$(docker_repo cli)" \
"${tag}" \
cli/Dockerfile

42
bin/docker-build-cli-bin Executable file
View File

@ -0,0 +1,42 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
(
unset DOCKER_FORCE_BUILD
bin/docker-build-base
bin/docker-build-go-deps
) >/dev/null
IMG=$(docker_maybe_build . \
"$(docker_repo cli-bin)" \
"${tag}" \
cli/Dockerfile-bin)
ID=$(docker create "$IMG")
for OS in linux macos ; do
DIR="target/cli/${OS}"
mkdir -p "$DIR"
if docker cp "$ID:/out/conduit-${OS}" "$DIR/conduit" ; then
echo "$DIR/conduit"
else
docker rm "$ID" >/dev/null
exit 1
fi
done
docker rm "$ID" >/dev/null

26
bin/docker-build-controller Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
(
unset DOCKER_FORCE_BUILD
bin/docker-build-base
bin/docker-build-go-deps
) >/dev/null
docker_maybe_build . \
"$(docker_repo controller)" \
"${tag}" \
controller/Dockerfile

13
bin/docker-build-go-deps Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
image=$(docker_maybe_build . \
"$(docker_repo go-deps)" \
"$(gopkg_sha)" \
Dockerfile-go-deps)
echo "$image"

28
bin/docker-build-proxy Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
(
unset DOCKER_FORCE_BUILD
bin/docker-build-base
bin/docker-build-proxy-deps
) >/dev/null
# Build release images by default
docker_build . \
"$(docker_repo proxy)" \
"${tag}" \
proxy/Dockerfile \
--build-arg="RELEASE=${PROXY_RELEASE:-1}"

13
bin/docker-build-proxy-deps Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
image=$(docker_maybe_build . \
"$(docker_repo proxy-deps)" \
"$(cargo_sha)" \
proxy/Dockerfile-deps)
echo "$image"

26
bin/docker-build-proxy-init Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
(
unset DOCKER_FORCE_BUILD
bin/docker-build-base
bin/docker-build-go-deps
) >/dev/null
docker_maybe_build . \
"$(docker_repo proxy-init)" \
"${tag}" \
proxy-init/Dockerfile

26
bin/docker-build-web Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
(
unset DOCKER_FORCE_BUILD
bin/docker-build-base
bin/docker-build-go-deps
) >/dev/null
docker_maybe_build . \
"$(docker_repo web)" \
"${tag}" \
web/Dockerfile

32
bin/docker-images Executable file
View File

@ -0,0 +1,32 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
docker_image() {
repo="$(docker_repo "$1")"
docker image ls \
--format "{{printf \"%-16s %-10s\" \"$1\" \"${2}\"}} {{.Size | printf \"%6s\"}} {{.ID}} {{.CreatedAt}}" \
"${repo}:${2}"
}
docker_image controller "${tag}"
docker_image proxy "${tag}"
docker_image proxy-init "${tag}"
docker_image web "${tag}"
docker_image cli "${tag}"
docker_image cli-bin "${tag}"
docker_image go-deps "$(gopkg_sha)"
docker_image proxy-deps "$(cargo_sha)"

23
bin/docker-pull Executable file
View File

@ -0,0 +1,23 @@
#!/bin/sh
set -eu
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
. bin/_docker.sh
docker_pull proxy "${tag}" || true
docker_pull proxy-init "${tag}" || true
docker_pull controller "${tag}" || true
docker_pull web "${tag}" || true
docker_pull cli "${tag}" || true
docker_pull cli-bin "${tag}" || true

10
bin/docker-pull-deps Executable file
View File

@ -0,0 +1,10 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
docker_pull base 2017-10-30.01 || true
docker_pull go-deps "$(gopkg_sha)" || true
docker_pull proxy-deps "$(cargo_sha)" || true

23
bin/docker-push Executable file
View File

@ -0,0 +1,23 @@
#!/bin/sh
set -eu
. bin/_tag.sh
if [ $# -eq 0 ]; then
tag="$(clean_head_root_tag)"
elif [ $# -eq 1 ]; then
tag="${1:-}"
else
echo "usage: $(basename $0) [tag]" >&2
exit 64
fi
. bin/_docker.sh
docker_push proxy "${tag}"
docker_push proxy-init "${tag}"
docker_push controller "${tag}"
docker_push web "${tag}"
docker_push cli "${tag}"
docker_push cli-bin "${tag}"

10
bin/docker-push-deps Executable file
View File

@ -0,0 +1,10 @@
#!/bin/sh
set -eu
. bin/_docker.sh
. bin/_tag.sh
docker_push base 2017-10-30.01
docker_push go-deps "$(gopkg_sha)"
docker_push proxy-deps "$(cargo_sha)"

19
bin/docker-retag-all Executable file
View File

@ -0,0 +1,19 @@
#!/bin/sh
set -eu
if [ $# -ne 2 ]; then
echo "usage: $0 from-tag to-tag" >&2
exit 64
fi
from="${1}"
to="${2}"
. bin/_docker.sh
docker_retag proxy "$from" "$to"
docker_retag proxy-init "$from" "$to"
docker_retag controller "$from" "$to"
docker_retag web "$from" "$to"
docker_retag cli "$from" "$to"
docker_retag cli-bin "$from" "$to"

12
bin/go-run Executable file
View File

@ -0,0 +1,12 @@
#!/bin/sh
set -eu
if [ "$#" -eq 0 ]; then
echo "Usage: bin/go-run path/to/main [args]" >&2
exit 1
fi
go build -v -i -race -o .gorun ./"$1"
shift
exec ./.gorun "$@"

View File

@ -0,0 +1,8 @@
REM Starts minikube on Windows 10 using Hyper-V.
REM
REM Windows 10 version 1709 (Creator's Update) or later is required for the
REM "Default Switch (NAT with automatic DHCP).
REM
REM Hyper-V must be enabled in "Turn Windows features on or off."
minikube start --kubernetes-version="v1.8.0" --vm-driver="hyperv" --disk-size=30G --memory=8192 --cpus=4 --hyperv-virtual-switch="Default Switch" --v=7 --alsologtostderr

22
bin/mkube Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
# A wrapper for interacting with minikube.
#
# Example:
# :; bin/mkube docker image ls
# If we're running under WSL then we have to use the Windows native Minikube
# as the Linux version doesn't work in WSL. Assume WSL is Microsoft's only
# Linux distro.
uname -r | grep "Microsoft" > /dev/null
if [ $? -ne 0 ]; then
MINIKUBE_EXE=minikube
else
# This is where minikube-installer.exe puts it.
MINIKUBE_EXE="${MINIKUBE_EXE:-/mnt/c/Program Files (x86)/Kubernetes/Minikube/minikube.exe}"
fi
# Rewrite Windows style paths "C:\\whatever\\whatever" to "/mnt/c/whatever/whatever".
eval $("${MINIKUBE_EXE}" docker-env --shell=bash | sed "s|C:\\\|/mnt/c/|g" | sed "s|\\\|/|g")
exec $@

28
bin/protoc Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
set -eu
if [ "$(uname -s)" = "Darwin" ]; then
os=osx
else
os=linux
fi
arch=$(uname -m)
protocbin=.protoc
protocversion=3.4.0
protocurl="https://github.com/google/protobuf/releases/download/v${protocversion}/protoc-${protocversion}-${os}-${arch}.zip"
if [ ! -f "$protocbin" ]; then
tmp=$(mktemp -d -t protoc.XXX)
(
cd $tmp
curl -L --silent --fail -o "$protocbin.zip" "$protocurl"
jar xf "$protocbin.zip"
chmod +x bin/protoc
)
mv "$tmp/bin/protoc" "$protocbin"
rm -rf "$tmp"
fi
./$protocbin "$@"

23
bin/protoc-go.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/sh
set -eu
go install ./vendor/github.com/golang/protobuf/protoc-gen-go
rm -rf controller/gen
mkdir controller/gen
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/public/api.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/common/common.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/proxy/telemetry/telemetry.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/proxy/destination/destination.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/proxy/tap/tap.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/controller/telemetry/telemetry.proto
bin/protoc -I proto --go_out=plugins=grpc:controller/gen proto/controller/tap/tap.proto
# Manually fix imports
find controller/gen -type f -exec sed -i.bak 's:"common":"github.com\/runconduit\/conduit\/controller\/gen\/common":g' {} +
find controller/gen -type f -exec sed -i.bak 's:"proxy/tap":"github.com\/runconduit\/conduit\/controller\/gen\/proxy\/tap":g' {} +
find controller/gen -type f -exec sed -i.bak 's:"controller/tap":"github.com\/runconduit\/conduit\/controller\/gen\/controller\/tap":g' {} +
find controller/gen -type f -exec sed -i.bak 's:"public":"github.com\/runconduit\/conduit\/controller\/gen\/public":g' {} +
find controller/gen -name '*.bak' -delete

5
bin/root-tag Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
. bin/_tag.sh
clean_head_root_tag

11
cli/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
# The bin image contains binaries for all platforms.
FROM gcr.io/runconduit/cli-bin:latest as build
# We create a new minimal image with the linux binary installed.
FROM gcr.io/runconduit/base:2017-10-30.01
RUN mkdir -p /usr/local/bin
COPY --from=build /out/conduit-linux /usr/local/bin/conduit
ENV PATH=$PATH:/usr/local/bin
ENTRYPOINT ["/usr/local/bin/conduit"]

13
cli/Dockerfile-bin Normal file
View File

@ -0,0 +1,13 @@
## compile binaries
FROM gcr.io/runconduit/go-deps:4cf1f22f as golang
WORKDIR /go/src/github.com/runconduit/conduit
COPY cli cli
COPY controller controller
RUN mkdir -p /out
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o /out/conduit-linux ./cli
RUN CGO_ENABLED=0 GOOS=darwin go build -a -installsuffix cgo -o /out/conduit-macos ./cli
## export without sources & depdenndencies
FROM gcr.io/runconduit/base:2017-10-30.01
COPY --from=golang /out /out
WORKDIR /out

73
cli/cmd/dashboard.go Normal file
View File

@ -0,0 +1,73 @@
package cmd
import (
"bufio"
"fmt"
"log"
"os/exec"
"github.com/pkg/browser"
"github.com/spf13/cobra"
)
var (
proxyPort int32 = -1
)
var dashboardCmd = &cobra.Command{
Use: "dashboard [flags]",
Short: "Open the Conduit dashboard in a web browser",
Long: "Open the Conduit dashboard in a web browser.",
RunE: func(cmd *cobra.Command, args []string) error {
if proxyPort <= 0 {
log.Fatalf("port must be positive, was %d", proxyPort)
}
portArg := fmt.Sprintf("--port=%d", proxyPort)
kubectl := exec.Command("kubectl", "proxy", portArg)
kubeCtlStdOut, err := kubectl.StdoutPipe()
if err != nil {
log.Fatalf("Failed to set up pipe for kubectl output: %v", err)
}
fmt.Printf("Running `kubectl proxy %s`\n", portArg)
go func() {
// Wait for `kubectl proxy` to output one line, which indicates that the proxy has been set up.
kubeCtlStdOutLines := bufio.NewReader(kubeCtlStdOut)
firstLine, err := kubeCtlStdOutLines.ReadString('\n')
if err != nil {
log.Fatalf("Failed to read output from kubectl proxy: %v", err)
}
fmt.Printf("%s", firstLine)
// Use "127.0.0.1" instead of "localhost" in case "localhost" resolves to "[::1]" (IPv6) or another
// unexpected address.
url := fmt.Sprintf("http://127.0.0.1:%d/api/v1/namespaces/%s/services/web:http/proxy/", proxyPort, controlPlaneNamespace)
fmt.Printf("Opening %v in the default browser\n", url)
err = browser.OpenURL(url)
if err != nil {
log.Fatalf("failed to open URL %s in the default browser: %v", url, err)
}
}()
err = kubectl.Run()
if err != nil {
log.Fatalf("Failed to run %v: %v", kubectl, err)
}
return nil
},
}
func init() {
RootCmd.AddCommand(dashboardCmd)
dashboardCmd.Args = cobra.NoArgs
// This is identical to what `kubectl proxy --help` reports, except
// `kubectl proxy` allows `--port=0` to indicate a random port; That's
// inconvenient to support so it isn't supported.
dashboardCmd.PersistentFlags().Int32VarP(&proxyPort, "port", "p", 8001, "The port on which to run the proxy, which must not be 0.")
}

52
cli/cmd/get.go Normal file
View File

@ -0,0 +1,52 @@
package cmd
import (
"context"
"errors"
"fmt"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/spf13/cobra"
)
var getCmd = &cobra.Command{
Use: "get [flags] RESOURCE",
Short: "Display one or many mesh resources",
Long: `Display one or many mesh resources.
Valid resource types include:
* pods (aka pod, po)`,
RunE: func(cmd *cobra.Command, args []string) error {
switch len(args) {
case 1:
resourceType := args[0]
switch resourceType {
case "pod", "pods", "po":
client, err := newApiClient()
if err != nil {
return err
}
resp, err := client.ListPods(context.Background(), &pb.Empty{})
if err != nil {
return err
}
for _, pod := range resp.GetPods() {
fmt.Println(pod.Name)
}
default:
return errors.New("invalid resource type")
}
return nil
default:
return errors.New("please specify a resource type")
}
},
}
func init() {
RootCmd.AddCommand(getCmd)
addControlPlaneNetworkingArgs(getCmd)
}

368
cli/cmd/inject.go Normal file
View File

@ -0,0 +1,368 @@
package cmd
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/runconduit/conduit/controller"
"github.com/ghodss/yaml"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"github.com/spf13/cobra"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/api/v1"
yamlDecoder "k8s.io/client-go/pkg/util/yaml"
)
var (
initImage string
proxyImage string
proxyUID int64
inboundPort uint
outboundPort uint
ignoreInboundPorts []uint
proxyControlPort uint
proxyAPIPort uint
conduitCreatedByAnnotation = "conduit.io/created-by"
conduitProxyVersionAnnotation = "conduit.io/proxy-version"
conduitControlLabel = "conduit.io/controller"
conduitPlaneLabel = "conduit.io/plane"
)
var injectCmd = &cobra.Command{
Use: "inject [flags] CONFIG-FILE",
Short: "Add the Conduit proxy to a Kubernetes config",
Long: `Add the Conduit proxy to a Kubernetes config.
You can use a config file from stdin by using the '-' argument
with 'conduit inject'. e.g. curl http://url.to/yml | conduit inject -
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("please specify a deployment file")
}
var in io.Reader
var err error
if args[0] == "-" {
in = os.Stdin
} else {
if in, err = os.Open(args[0]); err != nil {
return err
}
}
reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096))
// Iterate over all YAML objects in the input
for {
// Read a single YAML object
bytes, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
// Unmarshal the object enough to read the Kind field
var meta meta_v1.TypeMeta
if err := yaml.Unmarshal(bytes, &meta); err != nil {
return err
}
var injected interface{} = nil
switch meta.Kind {
case "Deployment":
injected, err = injectDeployment(bytes)
case "ReplicationController":
injected, err = injectReplicationController(bytes)
case "ReplicaSet":
injected, err = injectReplicaSet(bytes)
case "Job":
injected, err = injectJob(bytes)
case "DaemonSet":
injected, err = injectDaemonSet(bytes)
}
output := bytes
if injected != nil {
output, err = yaml.Marshal(injected)
if err != nil {
return err
}
}
os.Stdout.Write(output)
fmt.Println("---")
}
return nil
},
}
/* Given a byte slice representing a deployment, unmarshal the deployment and
* return a new deployment with the sidecar and init-container injected.
*/
func injectDeployment(bytes []byte) (interface{}, error) {
var deployment v1beta1.Deployment
err := yaml.Unmarshal(bytes, &deployment)
if err != nil {
return nil, err
}
podTemplateSpec := injectPodTemplateSpec(&deployment.Spec.Template)
return enhancedDeployment{
&deployment,
enhancedDeploymentSpec{
&deployment.Spec,
podTemplateSpec,
},
}, nil
}
/* Given a byte slice representing a replication controller, unmarshal the
* replication controller and return a new replication controller with the
* sidecar and init-container injected.
*/
func injectReplicationController(bytes []byte) (interface{}, error) {
var rc v1.ReplicationController
err := yaml.Unmarshal(bytes, &rc)
if err != nil {
return nil, err
}
podTemplateSpec := injectPodTemplateSpec(rc.Spec.Template)
return enhancedReplicationController{
&rc,
enhancedReplicationControllerSpec{
&rc.Spec,
podTemplateSpec,
},
}, nil
}
/* Given a byte slice representing a replica set, unmarshal the replica set and
* return a new replica set with the sidecar and init-container injected.
*/
func injectReplicaSet(bytes []byte) (interface{}, error) {
var rs v1beta1.ReplicaSet
err := yaml.Unmarshal(bytes, &rs)
if err != nil {
return nil, err
}
podTemplateSpec := injectPodTemplateSpec(&rs.Spec.Template)
return enhancedReplicaSet{
&rs,
enhancedReplicaSetSpec{
&rs.Spec,
podTemplateSpec,
},
}, nil
}
/* Given a byte slice representing a job, unmarshal the job and return a new job
* with the sidecar and init-container injected.
*/
func injectJob(bytes []byte) (interface{}, error) {
var job v1beta1.Job
err := yaml.Unmarshal(bytes, &job)
if err != nil {
return nil, err
}
podTemplateSpec := injectPodTemplateSpec(&job.Spec.Template)
return enhancedJob{
&job,
enhancedJobSpec{
&job.Spec,
podTemplateSpec,
},
}, nil
}
/* Given a byte slice representing a daemonset, unmarshal the daemonset and
* return a new daemonset with the sidecar and init-container injected.
*/
func injectDaemonSet(bytes []byte) (interface{}, error) {
var ds v1beta1.DaemonSet
err := yaml.Unmarshal(bytes, &ds)
if err != nil {
return nil, err
}
podTemplateSpec := injectPodTemplateSpec(&ds.Spec.Template)
return enhancedDaemonSet{
&ds,
enhancedDaemonSetSpec{
&ds.Spec,
podTemplateSpec,
},
}, nil
}
/* Given a PodTemplateSpec, return a new PodTemplateSpec with the sidecar
* and init-container injected.
*/
func injectPodTemplateSpec(t *v1.PodTemplateSpec) enhancedPodTemplateSpec {
f := false
skipPorts := append(ignoreInboundPorts, proxyControlPort)
skipPortsStr := make([]string, len(skipPorts))
for i, p := range skipPorts {
skipPortsStr[i] = strconv.Itoa(int(p))
}
initContainer := v1.Container{
Name: "conduit-init",
Image: fmt.Sprintf("%s:%s", initImage, version),
ImagePullPolicy: v1.PullPolicy(imagePullPolicy),
Args: []string{
"-p", fmt.Sprintf("%d", inboundPort),
"-o", fmt.Sprintf("%d", outboundPort),
"-i", fmt.Sprintf("%s", strings.Join(skipPortsStr, ",")),
"-u", fmt.Sprintf("%d", proxyUID),
},
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{v1.Capability("NET_ADMIN")},
},
Privileged: &f,
},
}
sidecar := v1.Container{
Name: "conduit-proxy",
Image: fmt.Sprintf("%s:%s", proxyImage, version),
ImagePullPolicy: v1.PullPolicy(imagePullPolicy),
SecurityContext: &v1.SecurityContext{
RunAsUser: &proxyUID,
},
Ports: []v1.ContainerPort{
v1.ContainerPort{
Name: "conduit-proxy",
ContainerPort: int32(inboundPort),
},
},
Env: []v1.EnvVar{
v1.EnvVar{Name: "CONDUIT_PROXY_LOG", Value: "trace,h2=debug,mio=info,tokio_core=info"},
v1.EnvVar{
Name: "CONDUIT_PROXY_CONTROL_URL",
Value: fmt.Sprintf("tcp://proxy-api.%s.svc.cluster.local:%d", controlPlaneNamespace, proxyAPIPort),
},
v1.EnvVar{Name: "CONDUIT_PROXY_CONTROL_LISTENER", Value: fmt.Sprintf("tcp://0.0.0.0:%d", proxyControlPort)},
v1.EnvVar{Name: "CONDUIT_PROXY_PRIVATE_LISTENER", Value: fmt.Sprintf("tcp://127.0.0.1:%d", outboundPort)},
v1.EnvVar{Name: "CONDUIT_PROXY_PUBLIC_LISTENER", Value: fmt.Sprintf("tcp://0.0.0.0:%d", inboundPort)},
v1.EnvVar{
Name: "CONDUIT_PROXY_NODE_NAME",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "spec.nodeName"}},
},
v1.EnvVar{
Name: "CONDUIT_PROXY_POD_NAME",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}},
},
v1.EnvVar{
Name: "CONDUIT_PROXY_POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},
},
},
}
if t.Annotations == nil {
t.Annotations = make(map[string]string)
}
t.Annotations[conduitCreatedByAnnotation] = fmt.Sprintf("conduit/cli %s", controller.Version)
t.Annotations[conduitProxyVersionAnnotation] = version
if t.Labels == nil {
t.Labels = make(map[string]string)
}
t.Labels[conduitControlLabel] = controlPlaneNamespace
t.Labels[conduitPlaneLabel] = "data"
t.Spec.Containers = append(t.Spec.Containers, sidecar)
return enhancedPodTemplateSpec{
t,
enhancedPodSpec{
&t.Spec,
append(t.Spec.InitContainers, initContainer),
},
}
}
/* The v1.PodSpec struct contains a field annotation that causes the
* InitContainers field to be omitted when serializing the struct as json.
* Since we wish for this field to be included, we have to define our own
* enhancedPodSpec struct with a different annotation on this field. We then
* must define our own structs to use this struct, and so on.
*/
type enhancedPodSpec struct {
*v1.PodSpec
InitContainers []v1.Container `json:"initContainers"`
}
type enhancedPodTemplateSpec struct {
*v1.PodTemplateSpec
Spec enhancedPodSpec `json:"spec,omitempty"`
}
type enhancedDeploymentSpec struct {
*v1beta1.DeploymentSpec
Template enhancedPodTemplateSpec `json:"template,omitempty"`
}
type enhancedDeployment struct {
*v1beta1.Deployment
Spec enhancedDeploymentSpec `json:"spec,omitempty"`
}
type enhancedReplicationControllerSpec struct {
*v1.ReplicationControllerSpec
Template enhancedPodTemplateSpec `json:"template,omitempty"`
}
type enhancedReplicationController struct {
*v1.ReplicationController
Spec enhancedReplicationControllerSpec `json:"spec,omitempty"`
}
type enhancedReplicaSetSpec struct {
*v1beta1.ReplicaSetSpec
Template enhancedPodTemplateSpec `json:"template,omitempty"`
}
type enhancedReplicaSet struct {
*v1beta1.ReplicaSet
Spec enhancedReplicaSetSpec `json:"spec,omitempty"`
}
type enhancedJobSpec struct {
*v1beta1.JobSpec
Template enhancedPodTemplateSpec `json:"template,omitempty"`
}
type enhancedJob struct {
*v1beta1.Job
Spec enhancedJobSpec `json:"spec,omitempty"`
}
type enhancedDaemonSetSpec struct {
*v1beta1.DaemonSetSpec
Template enhancedPodTemplateSpec `json:"template,omitempty"`
}
type enhancedDaemonSet struct {
*v1beta1.DaemonSet
Spec enhancedDaemonSetSpec `json:"spec,omitempty"`
}
func init() {
RootCmd.AddCommand(injectCmd)
injectCmd.PersistentFlags().StringVarP(&version, "conduit-version", "v", "latest", "tag to be used for conduit images")
injectCmd.PersistentFlags().StringVar(&initImage, "init-image", "gcr.io/runconduit/proxy-init", "Conduit init container image name")
injectCmd.PersistentFlags().StringVar(&proxyImage, "proxy-image", "gcr.io/runconduit/proxy", "Conduit proxy container image name")
injectCmd.PersistentFlags().StringVar(&imagePullPolicy, "image-pull-policy", "IfNotPresent", "Docker image pull policy")
injectCmd.PersistentFlags().Int64Var(&proxyUID, "proxy-uid", 2102, "Run the proxy under this user ID")
injectCmd.PersistentFlags().UintVar(&inboundPort, "inbound-port", 4143, "proxy port to use for inbound traffic")
injectCmd.PersistentFlags().UintVar(&outboundPort, "outbound-port", 4140, "proxy port to use for outbound traffic")
injectCmd.PersistentFlags().UintSliceVar(&ignoreInboundPorts, "skip-inbound-ports", nil, "ports that should skip the proxy and send directly to the applicaiton")
injectCmd.PersistentFlags().UintVar(&proxyControlPort, "control-port", 4190, "proxy port to use for control")
injectCmd.PersistentFlags().UintVar(&proxyAPIPort, "api-port", 8086, "port where the Conduit controller is running")
}

398
cli/cmd/install.go Normal file
View File

@ -0,0 +1,398 @@
package cmd
import (
"fmt"
"log"
"os"
"regexp"
"text/template"
"github.com/runconduit/conduit/controller"
uuid "github.com/satori/go.uuid"
"github.com/spf13/cobra"
)
var conduitTemplate = `### Namespace ###
kind: Namespace
apiVersion: v1
metadata:
name: {{.Namespace}}
### Controller ###
---
kind: Service
apiVersion: v1
metadata:
name: api
namespace: {{.Namespace}}
labels:
app: controller
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
type: ClusterIP
selector:
app: controller
ports:
- name: http
port: 8085
targetPort: 8085
---
kind: Service
apiVersion: v1
metadata:
name: proxy-api
namespace: {{.Namespace}}
labels:
app: controller
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
type: ClusterIP
selector:
app: controller
ports:
- name: grpc
port: 8086
targetPort: 8086
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: controller
namespace: {{.Namespace}}
labels:
app: controller
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
replicas: {{.ControllerReplicas}}
template:
metadata:
labels:
app: controller
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
containers:
- name: public-api
ports:
- name: http
containerPort: 8085
- name: admin-http
containerPort: 9995
image: {{.ControllerImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "public-api"
- "-addr=:8085"
- "-metrics-addr=:9995"
- "-telemetry-addr=127.0.0.1:8087"
- "-tap-addr=127.0.0.1:8088"
- name: destination
ports:
- name: grpc
containerPort: 8089
- name: admin-http
containerPort: 9999
image: {{.ControllerImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "destination"
- "-addr=:8089"
- "-metrics-addr=:9999"
- name: proxy-api
ports:
- name: grpc
containerPort: 8086
- name: admin-http
containerPort: 9996
image: {{.ControllerImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "proxy-api"
- "-addr=:8086"
- "-metrics-addr=:9996"
- "-destination-addr=:8089"
- "-telemetry-addr=:8087"
- name: tap
ports:
- name: grpc
containerPort: 8088
- name: admin-http
containerPort: 9998
image: {{.ControllerImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "tap"
- "-addr=:8088"
- "-metrics-addr=:9998"
- name: telemetry
ports:
- name: grpc
containerPort: 8087
- name: admin-http
containerPort: 9997
image: {{.ControllerImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "telemetry"
- "-addr=:8087"
- "-metrics-addr=:9997"
- "-ignore-namespaces=kube-system"
- "-prometheus-url=http://prometheus:9090"
### Web ###
---
kind: Service
apiVersion: v1
metadata:
name: web
namespace: {{.Namespace}}
labels:
app: web
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
type: ClusterIP
selector:
app: web
ports:
- name: http
port: 8084
targetPort: 8084
- name: admin-http
port: 9994
targetPort: 9994
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: web
namespace: {{.Namespace}}
labels:
app: web
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
replicas: {{.WebReplicas}}
template:
metadata:
labels:
app: web
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
containers:
- name: web
ports:
- name: http
containerPort: 8084
- name: admin-http
containerPort: 9994
image: {{.WebImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "-addr=:8084"
- "-metrics-addr=:9994"
- "-api-addr=api:8085"
- "-static-dir=/dist"
- "-template-dir=/templates"
- "-uuid={{.UUID}}"
- "-namespace={{.Namespace}}"
### Prometheus ###
---
kind: Service
apiVersion: v1
metadata:
name: prometheus
namespace: {{.Namespace}}
labels:
app: prometheus
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
type: ClusterIP
selector:
app: prometheus
ports:
- name: http
port: 9090
targetPort: 9090
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: prometheus
namespace: {{.Namespace}}
labels:
app: prometheus
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
replicas: {{.PrometheusReplicas}}
template:
metadata:
labels:
app: prometheus
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
spec:
volumes:
- name: prometheus-config
configMap:
name: prometheus-config
containers:
- name: prometheus
ports:
- name: http
containerPort: 9090
volumeMounts:
- name: prometheus-config
mountPath: /etc/prometheus
readOnly: true
image: {{.PrometheusImage}}
imagePullPolicy: {{.ImagePullPolicy}}
args:
- "-storage.local.retention=6h"
- "-storage.local.memory-chunks=500000"
- "-config.file=/etc/prometheus/prometheus.yml"
# TODO remove/replace?
- name: kubectl
image: buoyantio/kubectl:v1.6.2
args: ["proxy", "-p", "8001"]
---
kind: ConfigMap
apiVersion: v1
metadata:
name: prometheus-config
namespace: {{.Namespace}}
labels:
app: prometheus
conduit.io/plane: control
annotations:
conduit.io/created-by: "{{.CliVersion}}"
data:
prometheus.yml: |-
global:
scrape_interval: 10s
evaluation_interval: 10s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'controller'
kubernetes_sd_configs:
- role: pod
namespaces:
names: ['{{.Namespace}}']
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_port_name]
action: keep
regex: ^admin-http$
- source_labels: [__meta_kubernetes_pod_container_name]
action: replace
target_label: job
`
type installConfig struct {
Namespace string
ControllerImage string
WebImage string
PrometheusImage string
ControllerReplicas uint
WebReplicas uint
PrometheusReplicas uint
ImagePullPolicy string
UUID string
CliVersion string
}
var (
version string
dockerRegistry string
controllerReplicas uint
webReplicas uint
prometheusReplicas uint
imagePullPolicy string
)
var installCmd = &cobra.Command{
Use: "install [flags]",
Short: "Output Kubernetes configs to install Conduit",
Long: "Output Kubernetes configs to install Conduit.",
RunE: func(cmd *cobra.Command, args []string) error {
if err := validate(); err != nil {
log.Fatal(err.Error())
}
template, err := template.New("conduit").Parse(conduitTemplate)
if err != nil {
return err
}
template.Execute(os.Stdout, installConfig{
Namespace: controlPlaneNamespace,
ControllerImage: fmt.Sprintf("%s/controller:%s", dockerRegistry, version),
WebImage: fmt.Sprintf("%s/web:%s", dockerRegistry, version),
PrometheusImage: "prom/prometheus:v1.8.1",
ControllerReplicas: controllerReplicas,
WebReplicas: webReplicas,
PrometheusReplicas: prometheusReplicas,
ImagePullPolicy: imagePullPolicy,
UUID: uuid.NewV4().String(),
CliVersion: fmt.Sprintf("conduit/cli %s", controller.Version),
})
return nil
},
}
var alphaNumDash = regexp.MustCompile("^[a-zA-Z0-9-]+$")
var alphaNumDashDot = regexp.MustCompile("^[\\.a-zA-Z0-9-]+$")
var alphaNumDashDotSlash = regexp.MustCompile("^[\\./a-zA-Z0-9-]+$")
func validate() error {
// These regexs are not as strict as they could be, but are a quick and dirty
// sanity check against illegal characters.
if !alphaNumDash.MatchString(controlPlaneNamespace) {
return fmt.Errorf("%s is not a valid namespace", controlPlaneNamespace)
}
if !alphaNumDashDot.MatchString(version) {
return fmt.Errorf("%s is not a valid verison", version)
}
if !alphaNumDashDotSlash.MatchString(dockerRegistry) {
return fmt.Errorf("%s is not a valid Docker registry", dockerRegistry)
}
if imagePullPolicy != "Always" && imagePullPolicy != "IfNotPresent" && imagePullPolicy != "Never" {
return fmt.Errorf("imagePullPolicy must be one of Always, IfNotPresent, or Never")
}
return nil
}
func init() {
RootCmd.AddCommand(installCmd)
installCmd.PersistentFlags().StringVarP(&version, "version", "v", "latest", "Conduit version to install")
installCmd.PersistentFlags().StringVarP(&dockerRegistry, "registry", "r", "gcr.io/runconduit", "Docker registry to pull images from")
installCmd.PersistentFlags().UintVar(&controllerReplicas, "controller-replicas", 1, "replicas of the controller to deploy")
installCmd.PersistentFlags().UintVar(&webReplicas, "web-replicas", 1, "replicas of the web server to deploy")
installCmd.PersistentFlags().UintVar(&prometheusReplicas, "prometheus-replicas", 1, "replicas of prometheus to deploy")
installCmd.PersistentFlags().StringVar(&imagePullPolicy, "image-pull-policy", "IfNotPresent", "Docker image pull policy")
}

118
cli/cmd/root.go Normal file
View File

@ -0,0 +1,118 @@
package cmd
import (
"errors"
"fmt"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"github.com/runconduit/conduit/controller/api/public"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var cfgFile string
var controlPlaneNamespace string
var apiAddr string // An empty value means "use the Kubernetes configuration"
var kubeconfigPath string
var RootCmd = &cobra.Command{
Use: "conduit",
Short: "conduit manages the Conduit service mesh",
Long: `conduit manages the Conduit service mesh.`,
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
func init() {
RootCmd.PersistentFlags().StringVarP(&controlPlaneNamespace, "conduit-namespace", "n", "conduit", "namespace in which Conduit is installed")
}
// TODO: decide if we want to use viper
func addControlPlaneNetworkingArgs(cmd *cobra.Command) {
// See https://github.com/kubernetes/client-go/blob/master/examples/out-of-cluster-client-configuration/main.go
kubeconfigDefaultPath := ""
var homeEnvVar string
if runtime.GOOS == "windows" {
homeEnvVar = "USERPROFILE"
} else {
homeEnvVar = "HOME"
}
homeDir := os.Getenv(homeEnvVar)
if homeDir != "" {
kubeconfigDefaultPath = filepath.Join(homeDir, ".kube", "config")
}
// Use the same argument name as `kubectl` (see the output of `kubectl options`).
cmd.PersistentFlags().StringVar(&kubeconfigPath, "kubeconfig", kubeconfigDefaultPath, "Path to the kubeconfig file to use for CLI requests")
cmd.PersistentFlags().StringVar(&apiAddr, "api-addr", "", "Override kubeconfig and communicate directly with the control plane at host:port (mostly for testing)")
}
func newApiClient() (pb.ApiClient, error) {
var serverURL *url.URL
var transport http.RoundTripper
if apiAddr != "" {
// TODO: Standalone local testing should be done over HTTPS too.
serverURL = &url.URL{
Scheme: "http",
Host: apiAddr,
Path: "/",
}
transport = http.DefaultTransport
} else {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
serverURLBase, err := url.Parse(kubeConfig.Host)
if err != nil {
return nil, errors.New(fmt.Sprintf("invalid host in kubernetes config: %s", kubeConfig.Host))
}
proxyURLRef := url.URL{
Path: fmt.Sprintf("api/v1/namespaces/%s/services/http:api:http/proxy/", controlPlaneNamespace),
}
serverURL = serverURLBase.ResolveReference(&proxyURLRef)
transport, err = rest.TransportFor(kubeConfig)
if err != nil {
return nil, err
}
}
apiConfig := &public.Config{
ServerURL: serverURL,
}
return public.NewClient(apiConfig, transport)
}
// Exit with non-zero exit status without printing the command line usage and
// without printing the error message.
//
// When a `RunE` command returns an error, Cobra will print the usage message
// so the `RunE` function needs to handle any non-usage errors itself without
// returning an error. `exitSilentlyOnError` can be used as the `Run` (not
// `RunE`) function to help with this.
//
// TODO: This is used by the `version` command now; it should be used by other commands too.
func exitSilentlyOnError(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) {
return func(cmd *cobra.Command, args []string) {
if err := f(cmd, args); err != nil {
os.Exit(2) // Reserve 1 for usage errors.
}
}
}

188
cli/cmd/stat.go Normal file
View File

@ -0,0 +1,188 @@
package cmd
import (
"context"
"errors"
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/runconduit/conduit/controller/api/util"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/spf13/cobra"
)
const padding = 3
type row struct {
requestRate float64
successRate float64
latencyP50 int64
latencyP99 int64
}
var target string
var timeWindow string
var watch bool
var watchOnly bool
var statCmd = &cobra.Command{
Use: "stat [flags] RESOURCE [TARGET]",
Short: "Display runtime statistics about mesh resources",
Long: `Display runtime statistics about mesh resources.
Valid resource types include:
* pods (aka pod, po)
* deployments (aka deployment, deploy)
The optional [TARGET] option can be either a name for a deployment or pod resource`,
RunE: func(cmd *cobra.Command, args []string) error {
var resourceType string
switch len(args) {
case 1:
resourceType = args[0]
case 2:
resourceType = args[0]
target = args[1]
default:
return errors.New("please specify a resource type: pods or deployments")
}
switch resourceType {
case "pods", "pod", "po":
return makeStatsRequest(pb.AggregationType_TARGET_POD)
case "deployments", "deployment", "deploy":
return makeStatsRequest(pb.AggregationType_TARGET_DEPLOY)
default:
return errors.New("invalid resource type")
}
return nil
},
}
func makeStatsRequest(aggType pb.AggregationType) error {
client, err := newApiClient()
if err != nil {
return err
}
req, err := buildMetricRequest(aggType)
if err != nil {
return err
}
resp, err := client.Stat(context.Background(), req)
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, padding, ' ', 0)
displayStats(resp, w)
w.Flush()
return nil
}
func sortStatsKeys(stats map[string]*row) []string {
var sortedKeys []string
for key, _ := range stats {
sortedKeys = append(sortedKeys, key)
}
sort.Strings(sortedKeys)
return sortedKeys
}
func displayStats(resp *pb.MetricResponse, w *tabwriter.Writer) {
fmt.Fprintln(w, strings.Join([]string{
"NAME",
"REQUEST_RATE",
"SUCCESS_RATE",
"P50_LATENCY",
"P99_LATENCY",
}, "\t"))
stats := make(map[string]*row)
for _, metric := range resp.Metrics {
if len(metric.Datapoints) == 0 {
continue
}
metadata := *metric.Metadata
var name string
if metadata.TargetPod != "" {
name = metadata.TargetPod
} else if metadata.TargetDeploy != "" {
name = metadata.TargetDeploy
}
if _, ok := stats[name]; !ok {
stats[name] = &row{}
}
switch metric.Name {
case pb.MetricName_REQUEST_RATE:
stats[name].requestRate = metric.Datapoints[0].Value.GetGauge()
case pb.MetricName_SUCCESS_RATE:
stats[name].successRate = metric.Datapoints[0].Value.GetGauge()
case pb.MetricName_LATENCY:
for _, v := range metric.Datapoints[0].Value.GetHistogram().Values {
switch v.Label {
case pb.HistogramLabel_P50:
stats[name].latencyP50 = v.Value
case pb.HistogramLabel_P99:
stats[name].latencyP99 = v.Value
}
}
}
}
sortedNames := sortStatsKeys(stats)
for _, name := range sortedNames {
fmt.Fprintf(
w,
"%s\t%frps\t%f%%\t%dms\t%dms\n",
name,
stats[name].requestRate,
stats[name].successRate*100,
stats[name].latencyP50,
stats[name].latencyP99,
)
}
}
func buildMetricRequest(aggregationType pb.AggregationType) (*pb.MetricRequest, error) {
var filterBy pb.MetricMetadata
window, err := util.GetWindow(timeWindow)
if err != nil {
return nil, err
}
if target != "all" && aggregationType == pb.AggregationType_TARGET_POD {
filterBy.TargetPod = target
}
if target != "all" && aggregationType == pb.AggregationType_TARGET_DEPLOY {
filterBy.TargetDeploy = target
}
return &pb.MetricRequest{
Metrics: []pb.MetricName{
pb.MetricName_REQUEST_RATE,
pb.MetricName_SUCCESS_RATE,
pb.MetricName_LATENCY,
},
Window: window,
FilterBy: &filterBy,
GroupBy: aggregationType,
Summarize: true,
}, nil
}
func init() {
RootCmd.AddCommand(statCmd)
addControlPlaneNetworkingArgs(statCmd)
statCmd.PersistentFlags().StringVarP(&timeWindow, "time-window", "t", "1m", "Stat window. One of: '10s', '1m', '10m', '1h', '6h', '24h'.")
statCmd.PersistentFlags().BoolVarP(&watch, "watch", "w", false, "After listing/getting the requested object, watch for changes.")
statCmd.PersistentFlags().BoolVar(&watchOnly, "watch-only", false, "Watch for changes to the requested object(s), without listing/getting first.")
}

26
cli/cmd/stat_test.go Normal file
View File

@ -0,0 +1,26 @@
package cmd
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSortStatsKeys(t *testing.T) {
unsorted := map[string]*row{
"kube-system/heapster-v1.4.3": {0.008091, 24.137931, 516666, 990333},
"test/backend4": {0.066121, 38.818565, 494553, 989891},
"test/hello10": {0.000000, 0.000000, 0, 0},
"test/world-deploy1": {0.051893, 33.870968, 510526, 990210},
"test/world-deploy2": {2.504800, 33.749165, 497249, 989944},
"kube-system/kubernetes-dashboard": {0.017856, 39.062500, 520000, 990400},
"other/grafana": {0.060557, 35.944212, 518960, 990379},
"kube-system/l7-default-backend": {0.020371, 31.508049, 516923, 990338},
}
expected := []string{"other/grafana", "kube-system/heapster-v1.4.3", "kube-system/kubernetes-dashboard",
"kube-system/l7-default-backend", "test/backend4", "test/hello10", "test/world-deploy1", "test/world-deploy2"}
sorted := sortStatsKeys(unsorted)
assert.Equal(t, expected, sorted, "Not Sorted!")
}

141
cli/cmd/tap.go Normal file
View File

@ -0,0 +1,141 @@
package cmd
import (
"context"
"errors"
"fmt"
"io"
"strings"
common "github.com/runconduit/conduit/controller/gen/common"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/runconduit/conduit/controller/util"
"github.com/spf13/cobra"
)
var (
maxRps float32
toPort uint32
toIP string
fromPort uint32
fromIP string
scheme string
method string
authority string
path string
)
var tapCmd = &cobra.Command{
Use: "tap [flags] (deployment|pod) TARGET",
Short: "Listen to a traffic stream",
Long: `Listen to a traffic stream.
Valid targets include:
* Pods (default/hello-world-h4fb2)
* Deployments (default/hello-world)`,
RunE: func(cmd *cobra.Command, args []string) error {
switch len(args) {
case 2:
resourceType := strings.ToLower(args[0])
// We don't validate inputs because they are validated on the server.
req := &pb.TapRequest{
MaxRps: maxRps,
ToPort: toPort,
ToIP: toIP,
FromPort: fromPort,
FromIP: fromIP,
Scheme: scheme,
Method: method,
Authority: authority,
Path: path,
}
switch resourceType {
case "deploy", "deployment", "deployments":
req.Target = &pb.TapRequest_Deployment{
Deployment: args[1],
}
case "po", "pod", "pods":
req.Target = &pb.TapRequest_Pod{
Pod: args[1],
}
default:
return errors.New("invalid target type")
}
client, err := newApiClient()
if err != nil {
return err
}
rsp, err := client.Tap(context.Background(), req)
if err != nil {
return err
}
print(rsp)
return nil
default:
return errors.New("please specify a target")
}
},
}
func init() {
RootCmd.AddCommand(tapCmd)
addControlPlaneNetworkingArgs(tapCmd)
tapCmd.PersistentFlags().Float32Var(&maxRps, "max-rps", 1.0, "Maximum requests per second to tap.")
tapCmd.PersistentFlags().Uint32Var(&toPort, "to-port", 0, "Display requests to this port")
tapCmd.PersistentFlags().StringVar(&toIP, "to-ip", "", "Display requests to this IP")
tapCmd.PersistentFlags().Uint32Var(&fromPort, "from-port", 0, "Display requests from this port")
tapCmd.PersistentFlags().StringVar(&fromIP, "from-ip", "", "Display requests from this IP")
tapCmd.PersistentFlags().StringVar(&scheme, "scheme", "", "Display requests with this scheme")
tapCmd.PersistentFlags().StringVar(&method, "method", "", "Display requests with this HTTP method")
tapCmd.PersistentFlags().StringVar(&authority, "authority", "", "Display requests with this :authority")
tapCmd.PersistentFlags().StringVar(&path, "path", "", "Display requests with paths that start with this prefix")
}
func print(rsp pb.Api_TapClient) {
for {
event, err := rsp.Recv()
if err == io.EOF {
break
}
if err != nil {
fmt.Println(err)
break
}
fmt.Printf("[%s -> %s]\n", util.AddressToString(event.GetSource()), util.AddressToString(event.GetTarget()))
switch ev := event.GetHttp().Event.(type) {
case *common.TapEvent_Http_RequestInit_:
fmt.Printf("HTTP Request\n")
fmt.Printf("Stream ID: (%d, %d)\n", ev.RequestInit.Id.Base, ev.RequestInit.Id.Stream)
fmt.Printf("%s %s %s%s\n",
ev.RequestInit.Scheme.GetRegistered().String(),
ev.RequestInit.Method.GetRegistered().String(),
ev.RequestInit.Authority,
ev.RequestInit.Path,
)
fmt.Println()
case *common.TapEvent_Http_ResponseInit_:
fmt.Printf("HTTP Response\n")
fmt.Printf("Stream ID: (%d, %d)\n", ev.ResponseInit.Id.Base, ev.ResponseInit.Id.Stream)
fmt.Printf("Status: %d\nLatency (us): %d\n",
ev.ResponseInit.GetHttpStatus(),
ev.ResponseInit.GetSinceRequestInit().Nanos/1000,
)
fmt.Println()
case *common.TapEvent_Http_ResponseEnd_:
fmt.Printf("HTTP Response End\n")
fmt.Printf("Stream ID: (%d, %d)\n", ev.ResponseEnd.Id.Base, ev.ResponseEnd.Id.Stream)
fmt.Printf("Grpc-Status: %d\nDuration (us): %d\nBytes: %d\n",
ev.ResponseEnd.GetGrpcStatus(),
ev.ResponseEnd.GetSinceResponseInit().Nanos/1000,
ev.ResponseEnd.GetResponseBytes(),
)
fmt.Println()
}
}
}

45
cli/cmd/version.go Normal file
View File

@ -0,0 +1,45 @@
package cmd
import (
"context"
"fmt"
"github.com/runconduit/conduit/controller"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/spf13/cobra"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the client and server version information",
Long: "Print the client and server version information.",
Args: cobra.NoArgs,
Run: exitSilentlyOnError(func(cmd *cobra.Command, args []string) error {
fmt.Println("Client version: " + controller.Version)
serverVersion, err := getVersion()
if err != nil {
serverVersion = "unavailable"
}
fmt.Println("Server version: " + serverVersion)
return err
}),
}
func init() {
RootCmd.AddCommand(versionCmd)
addControlPlaneNetworkingArgs(versionCmd)
}
func getVersion()(string, error) {
client, err := newApiClient()
if err != nil {
return "", err
}
resp, err := client.Version(context.Background(), &pb.Empty{})
if err != nil {
return "", err
}
return resp.GetReleaseVersion(), nil
}

18
cli/main.go Normal file
View File

@ -0,0 +1,18 @@
package main
import (
"fmt"
"os"
"github.com/runconduit/conduit/cli/cmd"
// Load all the auth plugins for the cloud providers.
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

7
codegen/Cargo.toml Normal file
View File

@ -0,0 +1,7 @@
[package]
name = "codegen"
version = "0.1.0"
authors = ["Carl Lerche <me@carllerche.com>"]
[dependencies]
ordermap = "0.3.0"

1284
codegen/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

240
codegen/tests/codegen.rs Normal file
View File

@ -0,0 +1,240 @@
extern crate codegen;
use codegen::Scope;
#[test]
fn empty_scope() {
let scope = Scope::new();
assert_eq!(scope.to_string(), "");
}
#[test]
fn single_struct() {
let mut scope = Scope::new();
scope.structure("Foo")
.field("one", "usize")
.field("two", "String");
let expect = r#"
struct Foo {
one: usize,
two: String,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn two_structs() {
let mut scope = Scope::new();
scope.structure("Foo")
.field("one", "usize")
.field("two", "String");
scope.structure("Bar")
.field("hello", "World");
let expect = r#"
struct Foo {
one: usize,
two: String,
}
struct Bar {
hello: World,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_derive() {
let mut scope = Scope::new();
scope.structure("Foo")
.derive("Debug").derive("Clone")
.field("one", "usize")
.field("two", "String");
let expect = r#"
#[derive(Debug, Clone)]
struct Foo {
one: usize,
two: String,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_1() {
let mut scope = Scope::new();
scope.structure("Foo")
.generic("T")
.generic("U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_2() {
let mut scope = Scope::new();
scope.structure("Foo")
.generic("T, U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_3() {
let mut scope = Scope::new();
scope.structure("Foo")
.generic("T: Win, U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T: Win, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_where_clause_1() {
let mut scope = Scope::new();
scope.structure("Foo")
.generic("T")
.bound("T", "Foo")
.field("one", "T");
let expect = r#"
struct Foo<T>
where T: Foo,
{
one: T,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_where_clause_2() {
let mut scope = Scope::new();
scope.structure("Foo")
.generic("T, U")
.bound("T", "Foo")
.bound("U", "Baz")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U>
where T: Foo,
U: Baz,
{
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_doc() {
let mut scope = Scope::new();
scope.structure("Foo")
.doc("Hello, this is a doc string\n\
that continues on another line.")
.field("one", "T");
let expect = r#"
/// Hello, this is a doc string
/// that continues on another line.
struct Foo {
one: T,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_in_mod() {
let mut scope = Scope::new();
{
let module = scope.module("foo");
module.structure("Foo")
.doc("Hello some docs")
.derive("Debug")
.generic("T, U")
.bound("T", "SomeBound")
.bound("U", "SomeOtherBound")
.field("one", "T")
.field("two", "U")
;
}
let expect = r#"
mod foo {
/// Hello some docs
#[derive(Debug)]
struct Foo<T, U>
where T: SomeBound,
U: SomeOtherBound,
{
one: T,
two: U,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_mod_import() {
let mut scope = Scope::new();
scope.module("foo")
.import("bar", "Bar")
.structure("Foo")
.field("bar", "Bar")
;
let expect = r#"
mod foo {
use bar::Bar;
struct Foo {
bar: Bar,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}

12
controller/Dockerfile Normal file
View File

@ -0,0 +1,12 @@
## compile controller services
FROM gcr.io/runconduit/go-deps:4cf1f22f as golang
WORKDIR /go/src/github.com/runconduit/conduit
COPY controller controller
# use `install` so that we produce multiple binaries
RUN CGO_ENABLED=0 GOOS=linux go install -a -installsuffix cgo ./controller/cmd/...
## package runtime
FROM gcr.io/runconduit/base:2017-10-30.01
RUN mkdir /go
ENV PATH=$PATH:/go/bin
COPY --from=golang /go/bin /go/bin

View File

@ -0,0 +1,15 @@
package proxy
import (
pb "github.com/runconduit/conduit/controller/gen/proxy/telemetry"
"google.golang.org/grpc"
)
func NewTelemetryClient(addr string) (pb.TelemetryClient, *grpc.ClientConn, error) {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
return pb.NewTelemetryClient(conn), conn, nil
}

View File

@ -0,0 +1,82 @@
package proxy
import (
"context"
"io"
"net"
common "github.com/runconduit/conduit/controller/gen/common"
destination "github.com/runconduit/conduit/controller/gen/proxy/destination"
telemetry "github.com/runconduit/conduit/controller/gen/proxy/telemetry"
"github.com/runconduit/conduit/controller/util"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
type (
server struct {
telemetryClient telemetry.TelemetryClient
destinationClient destination.DestinationClient
}
)
func (s *server) Report(ctx context.Context, req *telemetry.ReportRequest) (*telemetry.ReportResponse, error) {
log.Debug("Report")
resp, err := s.telemetryClient.Report(ctx, req)
if err != nil {
log.Errorf("Report: %v", err)
return nil, err
}
return resp, nil
}
func (s *server) Get(dest *common.Destination, stream destination.Destination_GetServer) error {
log := log.WithFields(
log.Fields{
"scheme": dest.Scheme,
"path": dest.Path,
})
log.Debug("Get")
rsp, err := s.destinationClient.Get(stream.Context(), dest)
if err != nil {
log.Error(err)
return err
}
for {
update, err := rsp.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Error(err)
return err
}
log.Debug("Get update: %v", update)
stream.Send(update)
}
log.Debug("Get complete")
return nil
}
/*
* The Proxy-API server accepts requests from proxy instances and forwards those
* requests to the appropriate controller service.
*/
func NewServer(addr string, telemetryClient telemetry.TelemetryClient, destinationClient destination.DestinationClient) (*grpc.Server, net.Listener, error) {
lis, err := net.Listen("tcp", addr)
if err != nil {
return nil, nil, err
}
s := util.NewGrpcServer()
srv := server{telemetryClient: telemetryClient, destinationClient: destinationClient}
telemetry.RegisterTelemetryServer(s, &srv)
destination.RegisterDestinationServer(s, &srv)
return s, lis, nil
}

View File

@ -0,0 +1,142 @@
package public
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"net/http"
"net/url"
"errors"
common "github.com/runconduit/conduit/controller/gen/common"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type (
Config struct {
ServerURL *url.URL
}
client struct {
serverURL *url.URL
client *http.Client
}
tapClient struct {
ctx context.Context
reader *bufio.Reader
}
)
func NewClient(config *Config, transport http.RoundTripper) (pb.ApiClient, error) {
if !config.ServerURL.IsAbs() {
return nil, errors.New("server URL must be absolute")
}
return &client{
serverURL: config.ServerURL.ResolveReference(&url.URL{Path: apiPrefix}),
client: &http.Client {
Transport: transport,
},
}, nil
}
func (c *client) Stat(ctx context.Context, req *pb.MetricRequest, _ ...grpc.CallOption) (*pb.MetricResponse, error) {
var msg pb.MetricResponse
err := c.apiRequest(ctx, "Stat", req, &msg)
return &msg, err
}
func (c *client) Version(ctx context.Context, req *pb.Empty, _ ...grpc.CallOption) (*pb.VersionInfo, error) {
var msg pb.VersionInfo
err := c.apiRequest(ctx, "Version", req, &msg)
return &msg, err
}
func (c *client) ListPods(ctx context.Context, req *pb.Empty, _ ...grpc.CallOption) (*pb.ListPodsResponse, error) {
var msg pb.ListPodsResponse
err := c.apiRequest(ctx, "ListPods", req, &msg)
return &msg, err
}
func (c *client) Tap(ctx context.Context, req *pb.TapRequest, _ ...grpc.CallOption) (pb.Api_TapClient, error) {
rsp, err := c.post(ctx, "Tap", req)
if err != nil {
return nil, err
}
go func() {
<-ctx.Done()
rsp.Body.Close()
}()
return &tapClient{ctx: ctx, reader: bufio.NewReader(rsp.Body)}, nil
}
func (c *client) apiRequest(ctx context.Context, endpoint string, req proto.Message, rsp proto.Message) error {
httpRsp, err := c.post(ctx, endpoint, req)
if err != nil {
return err
}
defer httpRsp.Body.Close()
return clientUnmarshal(bufio.NewReader(httpRsp.Body), rsp)
}
func (c *client) post(ctx context.Context, endpoint string, req proto.Message) (*http.Response, error) {
url := c.serverURL.ResolveReference(&url.URL{Path: endpoint})
reqBytes, err := proto.Marshal(req)
if err != nil {
return nil, err
}
httpReq, err := http.NewRequest(
http.MethodPost,
url.String(),
bytes.NewReader(reqBytes),
)
if err != nil {
return nil, err
}
return c.client.Do(httpReq.WithContext(ctx))
}
func clientUnmarshal(r *bufio.Reader, msg proto.Message) error {
byteSize := make([]byte, 4)
_, err := r.Read(byteSize)
if err != nil {
return err
}
size := binary.LittleEndian.Uint32(byteSize)
bytes := make([]byte, size)
_, err = io.ReadFull(r, bytes)
if err != nil {
return err
}
return proto.Unmarshal(bytes, msg)
}
func (c tapClient) Recv() (*common.TapEvent, error) {
var msg common.TapEvent
err := clientUnmarshal(c.reader, &msg)
return &msg, err
}
// satisfy the pb.Api_TapClient interface
func (c tapClient) Header() (metadata.MD, error) { return nil, nil }
func (c tapClient) Trailer() metadata.MD { return nil }
func (c tapClient) CloseSend() error { return nil }
func (c tapClient) Context() context.Context { return c.ctx }
func (c tapClient) SendMsg(interface{}) error { return nil }
func (c tapClient) RecvMsg(interface{}) error { return nil }

View File

@ -0,0 +1,68 @@
package public
import (
"bufio"
"bytes"
"encoding/binary"
"testing"
"time"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/golang/protobuf/proto"
)
func TestClientUnmarshal(t *testing.T) {
versionInfo := pb.VersionInfo{
GoVersion: "1.9.1",
BuildDate: "2017.11.17",
ReleaseVersion: "0.0.1",
}
var unmarshaled pb.VersionInfo
reader := bufferedReader(t, &versionInfo)
err := clientUnmarshal(reader, &unmarshaled)
if err != nil {
t.Fatal(err.Error())
}
if unmarshaled != versionInfo {
t.Fatalf("mismatch, %+v != %+v", unmarshaled, versionInfo)
}
}
func TestClientUnmarshalLargeMessage(t *testing.T) {
series := pb.MetricSeries{
Name: pb.MetricName_REQUEST_RATE,
Metadata: &pb.MetricMetadata{},
Datapoints: make([]*pb.MetricDatapoint, 0),
}
for i := float64(0); i < 1000; i++ {
datapoint := pb.MetricDatapoint{
Value: &pb.MetricValue{Value: &pb.MetricValue_Gauge{Gauge: i}},
TimestampMs: time.Now().UnixNano() / int64(time.Millisecond),
}
series.Datapoints = append(series.Datapoints, &datapoint)
}
var unmarshaled pb.MetricSeries
reader := bufferedReader(t, &series)
err := clientUnmarshal(reader, &unmarshaled)
if err != nil {
t.Fatal(err.Error())
}
if len(unmarshaled.Datapoints) != 1000 {
t.Fatal("missing datapoints")
}
}
func bufferedReader(t *testing.T, msg proto.Message) *bufio.Reader {
msgBytes, err := proto.Marshal(msg)
if err != nil {
t.Fatal(err.Error())
}
sizeBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(sizeBytes, uint32(len(msgBytes)))
return bufio.NewReader(bytes.NewReader(append(sizeBytes, msgBytes...)))
}

View File

@ -0,0 +1,576 @@
package public
import (
"fmt"
"math"
"runtime"
"sort"
"strings"
"time"
"github.com/runconduit/conduit/controller"
"github.com/runconduit/conduit/controller/api/util"
tapPb "github.com/runconduit/conduit/controller/gen/controller/tap"
telemPb "github.com/runconduit/conduit/controller/gen/controller/telemetry"
pb "github.com/runconduit/conduit/controller/gen/public"
"golang.org/x/net/context"
)
type (
grpcServer struct {
telemetryClient telemPb.TelemetryClient
tapClient tapPb.TapClient
}
successRate struct {
success float64
failure float64
}
// sortable slice of unix ms timestamps
timestamps []int64
)
const (
countQuery = "sum(irate(responses_total{%s}[%s])) by (%s)"
countHttpQuery = "sum(irate(http_requests_total{%s}[%s])) by (%s)"
countGrpcQuery = "sum(irate(grpc_server_handled_total{%s}[%s])) by (%s)"
latencyQuery = "sum(irate(response_latency_ms_bucket{%s}[%s])) by (%s)"
quantileQuery = "histogram_quantile(%s, %s)"
defaultVectorRange = "1m"
targetPodLabel = "target"
targetDeployLabel = "target_deployment"
sourcePodLabel = "source"
sourceDeployLabel = "source_deployment"
jobLabel = "job"
)
var (
quantileMap = map[string]pb.HistogramLabel{
"0.5": pb.HistogramLabel_P50,
"0.95": pb.HistogramLabel_P95,
"0.99": pb.HistogramLabel_P99,
}
stepMap = map[pb.TimeWindow]string{
pb.TimeWindow_TEN_SEC: "10s",
pb.TimeWindow_ONE_MIN: "10s",
pb.TimeWindow_TEN_MIN: "10s",
pb.TimeWindow_ONE_HOUR: "1m",
}
aggregationMap = map[pb.AggregationType]string{
pb.AggregationType_TARGET_POD: targetPodLabel,
pb.AggregationType_TARGET_DEPLOY: targetDeployLabel,
pb.AggregationType_SOURCE_POD: sourcePodLabel,
pb.AggregationType_SOURCE_DEPLOY: sourceDeployLabel,
pb.AggregationType_MESH: jobLabel,
}
emptyMetadata = pb.MetricMetadata{}
)
func newGrpcServer(telemetryClient telemPb.TelemetryClient, tapClient tapPb.TapClient) *grpcServer {
return &grpcServer{telemetryClient: telemetryClient, tapClient: tapClient}
}
func (s *grpcServer) Stat(ctx context.Context, req *pb.MetricRequest) (*pb.MetricResponse, error) {
metrics := make([]*pb.MetricSeries, 0)
for _, metric := range req.Metrics {
var err error
var series []*pb.MetricSeries
switch metric {
case pb.MetricName_REQUEST_RATE:
if req.GroupBy == pb.AggregationType_MESH {
series, err = s.requestRateMesh(ctx, req)
} else {
series, err = s.requestRate(ctx, req)
}
case pb.MetricName_SUCCESS_RATE:
if req.GroupBy == pb.AggregationType_MESH {
series, err = s.successRateMesh(ctx, req)
} else {
series, err = s.successRate(ctx, req)
}
case pb.MetricName_LATENCY:
if req.GroupBy == pb.AggregationType_MESH {
return nil, fmt.Errorf("latency not supported for MESH queries")
} else {
series, err = s.latency(ctx, req)
}
default:
return nil, fmt.Errorf("unsupported metric: %s", metric)
}
if err != nil {
return nil, err
}
metrics = append(metrics, series...)
}
return &pb.MetricResponse{Metrics: metrics}, nil
}
func (_ *grpcServer) Version(ctx context.Context, req *pb.Empty) (*pb.VersionInfo, error) {
return &pb.VersionInfo{GoVersion: runtime.Version(), ReleaseVersion: controller.Version, BuildDate: "1970-01-01T00:00:00Z"}, nil
}
func (s *grpcServer) ListPods(ctx context.Context, req *pb.Empty) (*pb.ListPodsResponse, error) {
resp, err := s.telemetryClient.ListPods(ctx, &telemPb.ListPodsRequest{})
if err != nil {
return nil, err
}
return resp, nil
}
// Pass through to tap service
func (s *grpcServer) Tap(req *pb.TapRequest, stream pb.Api_TapServer) error {
tapStream := stream.(tapServer)
rsp, err := s.tapClient.Tap(tapStream.Context(), req)
if err != nil {
return nil
}
for {
select {
case <-tapStream.Context().Done():
return nil
default:
event, err := rsp.Recv()
if err != nil {
return err
}
tapStream.Send(event)
}
}
}
func (s *grpcServer) requestRate(ctx context.Context, req *pb.MetricRequest) ([]*pb.MetricSeries, error) {
queryRsp, err := s.queryCount(ctx, req, countQuery, "")
if err != nil {
return nil, err
}
return processRequestRate(queryRsp.Metrics, extractMetadata)
}
func (s *grpcServer) requestRateMesh(ctx context.Context, req *pb.MetricRequest) ([]*pb.MetricSeries, error) {
httpQueryRsp, err := s.queryCount(ctx, req, countHttpQuery, "")
if err != nil {
return nil, err
}
grpcQueryRsp, err := s.queryCount(ctx, req, countGrpcQuery, "")
if err != nil {
return nil, err
}
metrics := append(httpQueryRsp.Metrics, grpcQueryRsp.Metrics...)
return processRequestRate(metrics, extractMetadataMesh)
}
func (s *grpcServer) successRate(ctx context.Context, req *pb.MetricRequest) ([]*pb.MetricSeries, error) {
queryRsp, err := s.queryCount(ctx, req, countQuery, "classification")
if err != nil {
return nil, err
}
return processSuccessRate(queryRsp.Metrics, extractMetadata, isSuccess)
}
func (s *grpcServer) successRateMesh(ctx context.Context, req *pb.MetricRequest) ([]*pb.MetricSeries, error) {
httpQueryRsp, err := s.queryCount(ctx, req, countHttpQuery, "code")
if err != nil {
return nil, err
}
grpcQueryRsp, err := s.queryCount(ctx, req, countGrpcQuery, "grpc_code")
if err != nil {
return nil, err
}
metrics := append(httpQueryRsp.Metrics, grpcQueryRsp.Metrics...)
return processSuccessRate(metrics, extractMetadataMesh, isSuccessMesh)
}
func (s *grpcServer) latency(ctx context.Context, req *pb.MetricRequest) ([]*pb.MetricSeries, error) {
timestamps := make(map[int64]struct{})
latencies := make(map[pb.MetricMetadata]map[int64][]*pb.HistogramValue)
series := make([]*pb.MetricSeries, 0)
queryRsps, err := s.queryLatency(ctx, req)
if err != nil {
return nil, err
}
for label, queryRsp := range queryRsps {
for _, metric := range queryRsp.Metrics {
if len(metric.Values) == 0 {
continue
}
metadata := extractMetadata(metric)
if metadata == emptyMetadata {
continue
}
if _, ok := latencies[metadata]; !ok {
latencies[metadata] = make(map[int64][]*pb.HistogramValue)
}
for _, value := range metric.Values {
if math.IsNaN(value.Value) {
continue
}
timestamp := value.TimestampMs
timestamps[timestamp] = struct{}{}
if _, ok := latencies[metadata][timestamp]; !ok {
latencies[metadata][timestamp] = make([]*pb.HistogramValue, 0)
}
hv := &pb.HistogramValue{
Label: label,
Value: int64(value.Value),
}
latencies[metadata][timestamp] = append(latencies[metadata][timestamp], hv)
}
}
}
sortedTimestamps := sortTimestamps(timestamps)
for metadata, latenciesByTime := range latencies {
m := metadata
datapoints := make([]*pb.MetricDatapoint, 0)
for _, ts := range sortedTimestamps {
if histogram, ok := latenciesByTime[ts]; ok {
datapoint := &pb.MetricDatapoint{
Value: &pb.MetricValue{
Value: &pb.MetricValue_Histogram{
Histogram: &pb.Histogram{Values: histogram},
},
},
TimestampMs: ts,
}
datapoints = append(datapoints, datapoint)
}
}
s := &pb.MetricSeries{
Name: pb.MetricName_LATENCY,
Metadata: &m,
Datapoints: datapoints,
}
series = append(series, s)
}
return series, nil
}
func (s *grpcServer) queryCount(ctx context.Context, req *pb.MetricRequest, rawQuery, sumBy string) (*telemPb.QueryResponse, error) {
query, err := formatQuery(rawQuery, req, sumBy)
if err != nil {
return nil, err
}
start, end, step, err := queryParams(req)
if err != nil {
return nil, err
}
queryReq := &telemPb.QueryRequest{
Query: query,
StartMs: start,
EndMs: end,
Step: step,
}
queryRsp, err := s.telemetryClient.Query(ctx, queryReq)
if err != nil {
return nil, err
}
if req.Summarize {
filterQueryRsp(queryRsp, end)
}
return queryRsp, nil
}
// TODO: make these requests in parallel
func (s *grpcServer) queryLatency(ctx context.Context, req *pb.MetricRequest) (map[pb.HistogramLabel]*telemPb.QueryResponse, error) {
queryRsps := make(map[pb.HistogramLabel]*telemPb.QueryResponse)
query, err := formatQuery(latencyQuery, req, "le")
if err != nil {
return nil, err
}
start, end, step, err := queryParams(req)
if err != nil {
return nil, err
}
for quantile, label := range quantileMap {
q := fmt.Sprintf(quantileQuery, quantile, query)
queryReq := &telemPb.QueryRequest{
Query: q,
StartMs: start,
EndMs: end,
Step: step,
}
queryRsp, err := s.telemetryClient.Query(ctx, queryReq)
if err != nil {
return nil, err
}
if req.Summarize {
filterQueryRsp(queryRsp, end)
}
queryRsps[label] = queryRsp
}
return queryRsps, nil
}
func formatQuery(query string, req *pb.MetricRequest, sumBy string) (string, error) {
sumLabels := make([]string, 0)
filterLabels := make([]string, 0)
if str, ok := aggregationMap[req.GroupBy]; ok {
sumLabels = append(sumLabels, str)
} else {
return "", fmt.Errorf("unsupported AggregationType")
}
if sumBy != "" {
sumLabels = append(sumLabels, sumBy)
}
if metadata := req.FilterBy; metadata != nil {
if metadata.TargetPod != "" {
filterLabels = append(filterLabels, fmt.Sprintf("%s=\"%s\"", targetPodLabel, metadata.TargetPod))
sumLabels = append(sumLabels, targetPodLabel)
}
if metadata.TargetDeploy != "" {
filterLabels = append(filterLabels, fmt.Sprintf("%s=\"%s\"", targetDeployLabel, metadata.TargetDeploy))
sumLabels = append(sumLabels, targetDeployLabel)
}
if metadata.SourcePod != "" {
filterLabels = append(filterLabels, fmt.Sprintf("%s=\"%s\"", sourcePodLabel, metadata.SourcePod))
sumLabels = append(sumLabels, sourcePodLabel)
}
if metadata.SourceDeploy != "" {
filterLabels = append(filterLabels, fmt.Sprintf("%s=\"%s\"", sourceDeployLabel, metadata.SourceDeploy))
sumLabels = append(sumLabels, sourceDeployLabel)
}
if metadata.Component != "" {
filterLabels = append(filterLabels, fmt.Sprintf("%s=\"%s\"", jobLabel, metadata.Component))
sumLabels = append(sumLabels, jobLabel)
}
}
duration := defaultVectorRange
if req.Summarize {
durationStr, err := util.GetWindowString(req.Window)
if err != nil {
return "", err
}
duration = durationStr
}
return fmt.Sprintf(
query,
strings.Join(filterLabels, ","),
duration,
strings.Join(sumLabels, ","),
), nil
}
func queryParams(req *pb.MetricRequest) (int64, int64, string, error) {
durationStr, err := util.GetWindowString(req.Window)
if err != nil {
return 0, 0, "", err
}
duration, err := time.ParseDuration(durationStr)
if err != nil {
return 0, 0, "", err
}
end := time.Now()
start := end.Add(-1 * duration)
step, ok := stepMap[req.Window]
if !ok {
return 0, 0, "", fmt.Errorf("unsupported Window")
}
ms := int64(time.Millisecond)
return start.UnixNano() / ms, end.UnixNano() / ms, step, nil
}
func filterQueryRsp(rsp *telemPb.QueryResponse, end int64) {
for _, metric := range rsp.Metrics {
values := make([]*telemPb.SampleValue, 0)
for _, v := range metric.Values {
if v.TimestampMs == end {
values = append(values, v)
}
}
metric.Values = values
}
return
}
func extractMetadata(metric *telemPb.Sample) pb.MetricMetadata {
return pb.MetricMetadata{
TargetPod: metric.Labels[targetPodLabel],
TargetDeploy: metric.Labels[targetDeployLabel],
SourcePod: metric.Labels[sourcePodLabel],
SourceDeploy: metric.Labels[sourceDeployLabel],
}
}
func extractMetadataMesh(metric *telemPb.Sample) pb.MetricMetadata {
return pb.MetricMetadata{
Component: metric.Labels[jobLabel],
}
}
func isSuccess(labels map[string]string) bool {
return labels["classification"] == "success"
}
func isSuccessMesh(labels map[string]string) (success bool) {
// check to see if the http status code is anything but a 5xx error
if v, ok := labels["code"]; ok && !strings.HasPrefix(v, "5") {
success = true
}
// or check to see if the grpc status code is OK
if v, ok := labels["grpc_code"]; ok && v == "OK" {
success = true
}
return
}
func processRequestRate(
metrics []*telemPb.Sample,
metadataFn func(*telemPb.Sample) pb.MetricMetadata,
) ([]*pb.MetricSeries, error) {
series := make([]*pb.MetricSeries, 0)
for _, metric := range metrics {
if len(metric.Values) == 0 {
continue
}
datapoints := make([]*pb.MetricDatapoint, 0)
for _, value := range metric.Values {
datapoint := pb.MetricDatapoint{
Value: &pb.MetricValue{
Value: &pb.MetricValue_Gauge{Gauge: value.Value},
},
TimestampMs: value.TimestampMs,
}
datapoints = append(datapoints, &datapoint)
}
metadata := metadataFn(metric)
if metadata == emptyMetadata {
continue
}
s := &pb.MetricSeries{
Name: pb.MetricName_REQUEST_RATE,
Metadata: &metadata,
Datapoints: datapoints,
}
series = append(series, s)
}
return series, nil
}
func processSuccessRate(
metrics []*telemPb.Sample,
metadataFn func(*telemPb.Sample) pb.MetricMetadata,
successRateFn func(map[string]string) bool,
) ([]*pb.MetricSeries, error) {
timestamps := make(map[int64]struct{})
successRates := make(map[pb.MetricMetadata]map[int64]*successRate)
series := make([]*pb.MetricSeries, 0)
for _, metric := range metrics {
if len(metric.Values) == 0 {
continue
}
isSuccess := successRateFn(metric.Labels)
metadata := metadataFn(metric)
if metadata == emptyMetadata {
continue
}
if _, ok := successRates[metadata]; !ok {
successRates[metadata] = make(map[int64]*successRate)
}
for _, value := range metric.Values {
timestamp := value.TimestampMs
timestamps[timestamp] = struct{}{}
if _, ok := successRates[metadata][timestamp]; !ok {
successRates[metadata][timestamp] = &successRate{}
}
if isSuccess {
successRates[metadata][timestamp].success += value.Value
} else {
successRates[metadata][timestamp].failure += value.Value
}
}
}
sortedTimestamps := sortTimestamps(timestamps)
for metadata, successRateByTime := range successRates {
m := metadata
datapoints := make([]*pb.MetricDatapoint, 0)
for _, ts := range sortedTimestamps {
if sr, ok := successRateByTime[ts]; ok {
if requests := sr.success + sr.failure; requests > 0 {
datapoint := &pb.MetricDatapoint{
Value: &pb.MetricValue{
Value: &pb.MetricValue_Gauge{Gauge: sr.success / requests},
},
TimestampMs: ts,
}
datapoints = append(datapoints, datapoint)
}
}
}
s := &pb.MetricSeries{
Name: pb.MetricName_SUCCESS_RATE,
Metadata: &m,
Datapoints: datapoints,
}
series = append(series, s)
}
return series, nil
}
func (a timestamps) Len() int { return len(a) }
func (a timestamps) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a timestamps) Less(i, j int) bool { return a[i] < a[j] }
func sortTimestamps(timestampMap map[int64]struct{}) timestamps {
sorted := make(timestamps, len(timestampMap))
for t, _ := range timestampMap {
sorted = append(sorted, t)
}
sort.Sort(sorted)
return sorted
}

View File

@ -0,0 +1,235 @@
package public
import (
"encoding/binary"
"io/ioutil"
"net/http"
"github.com/prometheus/client_golang/prometheus/promhttp"
common "github.com/runconduit/conduit/controller/gen/common"
tapPb "github.com/runconduit/conduit/controller/gen/controller/tap"
telemPb "github.com/runconduit/conduit/controller/gen/controller/telemetry"
pb "github.com/runconduit/conduit/controller/gen/public"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
type (
handler struct {
grpcServer pb.ApiServer
}
tapServer struct {
w http.ResponseWriter
req *http.Request
}
)
const (
apiRoot = "/" // Must be absolute (with a leading slash).
apiVersion = "v1"
apiPrefix = "api/" + apiVersion + "/" // Must be relative (without a leading slash).
jsonContentType = "application/json"
protobufContentType = "application/octet-stream"
)
var (
jsonMarshaler = jsonpb.Marshaler{EmitDefaults: true}
jsonUnmarshaler = jsonpb.Unmarshaler{}
)
func NewServer(addr string, telemetryClient telemPb.TelemetryClient, tapClient tapPb.TapClient) *http.Server {
var baseHandler http.Handler
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
prometheus.MustRegister(counter)
baseHandler = &handler{
grpcServer: newGrpcServer(telemetryClient, tapClient),
}
instrumentedHandler := promhttp.InstrumentHandlerCounter(counter, baseHandler)
return &http.Server{
Addr: addr,
Handler: instrumentedHandler,
}
}
func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Validate request method
if req.Method != http.MethodPost {
http.Error(w, "POST required", http.StatusMethodNotAllowed)
return
}
// Validate request content type
switch req.Header.Get("Content-Type") {
case "", protobufContentType, jsonContentType:
default:
http.Error(w, "unsupported Content-Type", http.StatusUnsupportedMediaType)
return
}
// Serve request
switch req.URL.Path {
case apiRoot + apiPrefix + "Stat":
h.handleStat(w, req)
case apiRoot + apiPrefix + "Version":
h.handleVersion(w, req)
case apiRoot + apiPrefix + "ListPods":
h.handleListPods(w, req)
case apiRoot + apiPrefix + "Tap":
h.handleTap(w, req)
default:
http.NotFound(w, req)
}
}
func (h *handler) handleStat(w http.ResponseWriter, req *http.Request) {
var metricRequest pb.MetricRequest
err := serverUnmarshal(req, &metricRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
rsp, err := h.grpcServer.Stat(req.Context(), &metricRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = serverMarshal(w, req, rsp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) handleVersion(w http.ResponseWriter, req *http.Request) {
var emptyRequest pb.Empty
err := serverUnmarshal(req, &emptyRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
rsp, err := h.grpcServer.Version(req.Context(), &emptyRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = serverMarshal(w, req, rsp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) handleListPods(w http.ResponseWriter, req *http.Request) {
var emptyRequest pb.Empty
err := serverUnmarshal(req, &emptyRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
rsp, err := h.grpcServer.ListPods(req.Context(), &emptyRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = serverMarshal(w, req, rsp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) handleTap(w http.ResponseWriter, req *http.Request) {
var tapRequest pb.TapRequest
err := serverUnmarshal(req, &tapRequest)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if _, ok := w.(http.Flusher); !ok {
http.Error(w, "streaming not supported", http.StatusBadRequest)
return
}
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Transfer-Encoding", "chunked")
server := tapServer{w: w, req: req}
err = h.grpcServer.Tap(&tapRequest, server)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func serverUnmarshal(req *http.Request, msg proto.Message) error {
switch req.Header.Get("Content-Type") {
case "", protobufContentType:
bytes, err := ioutil.ReadAll(req.Body)
if err != nil {
return err
}
return proto.Unmarshal(bytes, msg)
case jsonContentType:
return jsonUnmarshaler.Unmarshal(req.Body, msg)
}
return nil
}
func serverMarshal(w http.ResponseWriter, req *http.Request, msg proto.Message) error {
switch req.Header.Get("Content-Type") {
case "", protobufContentType:
bytes, err := proto.Marshal(msg)
if err != nil {
return err
}
byteSize := make([]byte, 4)
binary.LittleEndian.PutUint32(byteSize, uint32(len(bytes)))
_, err = w.Write(append(byteSize, bytes...))
return err
case jsonContentType:
str, err := jsonMarshaler.MarshalToString(msg)
if err != nil {
return err
}
_, err = w.Write(append([]byte(str), '\n'))
return err
}
return nil
}
func (s tapServer) Send(msg *common.TapEvent) error {
err := serverMarshal(s.w, s.req, msg)
if err != nil {
return err
}
s.w.(http.Flusher).Flush()
return nil
}
// satisfy the pb.Api_TapServer interface
func (s tapServer) SetHeader(metadata.MD) error { return nil }
func (s tapServer) SendHeader(metadata.MD) error { return nil }
func (s tapServer) SetTrailer(metadata.MD) { return }
func (s tapServer) Context() context.Context { return s.req.Context() }
func (s tapServer) SendMsg(interface{}) error { return nil }
func (s tapServer) RecvMsg(interface{}) error { return nil }

View File

@ -0,0 +1,67 @@
package util
import (
"errors"
pb "github.com/runconduit/conduit/controller/gen/public"
)
/*
Shared utilities for interacting with the controller public api
*/
func GetWindow(timeWindow string) (pb.TimeWindow, error) {
switch timeWindow {
case "10s":
return pb.TimeWindow_TEN_SEC, nil
case "1m":
return pb.TimeWindow_ONE_MIN, nil
case "10m":
return pb.TimeWindow_TEN_MIN, nil
case "1h":
return pb.TimeWindow_ONE_HOUR, nil
}
return pb.TimeWindow_ONE_MIN, errors.New("invalid time-window " + timeWindow)
}
func GetWindowString(timeWindow pb.TimeWindow) (string, error) {
switch timeWindow {
case pb.TimeWindow_TEN_SEC:
return "10s", nil
case pb.TimeWindow_ONE_MIN:
return "1m", nil
case pb.TimeWindow_TEN_MIN:
return "10m", nil
case pb.TimeWindow_ONE_HOUR:
return "1h", nil
}
return "", errors.New("invalid time-window " + timeWindow.String())
}
func GetMetricName(metricName string) (pb.MetricName, error) {
switch metricName {
case "requests":
return pb.MetricName_REQUEST_RATE, nil
case "latency":
return pb.MetricName_LATENCY, nil
case "successRate":
return pb.MetricName_SUCCESS_RATE, nil
}
return pb.MetricName_REQUEST_RATE, errors.New("invalid metric name " + metricName)
}
func GetAggregationType(aggregationType string) (pb.AggregationType, error) {
switch aggregationType {
case "target_pod":
return pb.AggregationType_TARGET_POD, nil
case "target_deploy":
return pb.AggregationType_TARGET_DEPLOY, nil
case "source_pod":
return pb.AggregationType_SOURCE_POD, nil
case "source_deploy":
return pb.AggregationType_SOURCE_DEPLOY, nil
case "mesh":
return pb.AggregationType_MESH, nil
}
return pb.AggregationType_TARGET_POD, errors.New("invalid aggregation type " + aggregationType)
}

View File

@ -0,0 +1,50 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/runconduit/conduit/controller/destination"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
func main() {
addr := flag.String("addr", ":8089", "address to serve on")
metricsAddr := flag.String("metrics-addr", ":9999", "address to serve scrapable metrics on")
kubeConfigPath := flag.String("kubeconfig", "", "path to kube config")
flag.Parse()
log.SetLevel(log.DebugLevel) // TODO: make configurable
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
done := make(chan struct{})
server, lis, err := destination.NewServer(*addr, *kubeConfigPath, done)
if err != nil {
log.Fatal(err)
}
go func() {
log.Infof("starting gRPC server on %s\n", *addr)
server.Serve(lis)
}()
go func() {
fmt.Println("serving scrapable metrics on", *metricsAddr)
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*metricsAddr, nil)
}()
<-stop
log.Infof("shutting down gRPC server on %s\n", *addr)
done <- struct{}{}
server.GracefulStop()
}

View File

@ -0,0 +1,60 @@
package main
import (
"flag"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/runconduit/conduit/controller/api/proxy"
"github.com/runconduit/conduit/controller/destination"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
func main() {
addr := flag.String("addr", ":8086", "address to serve on")
metricsAddr := flag.String("metrics-addr", ":9996", "address to serve scrapable metrics on")
telemetryAddr := flag.String("telemetry-addr", ":8087", "address of telemetry service")
destinationAddr := flag.String("destination-addr", ":8089", "address of destination service")
flag.Parse()
log.SetLevel(log.DebugLevel) // TODO: make configurable
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
telemetryClient, conn, err := proxy.NewTelemetryClient(*telemetryAddr)
if err != nil {
log.Fatal(err)
}
defer conn.Close()
destinationClient, conn, err := destination.NewClient(*destinationAddr)
if err != nil {
log.Fatal(err)
}
defer conn.Close()
server, lis, err := proxy.NewServer(*addr, telemetryClient, destinationClient)
if err != nil {
log.Fatal(err)
}
go func() {
log.Infof("starting gRPC server on %s", *addr)
server.Serve(lis)
}()
go func() {
log.Infof("serving scrapable metrics on %s", *metricsAddr)
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*metricsAddr, nil)
}()
<-stop
log.Infof("shutting down gRPC server on %s", *addr)
server.GracefulStop()
}

View File

@ -0,0 +1,59 @@
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
log "github.com/sirupsen/logrus"
"github.com/runconduit/conduit/controller/api/public"
"github.com/runconduit/conduit/controller/tap"
"github.com/runconduit/conduit/controller/telemetry"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func main() {
addr := flag.String("addr", ":8085", "address to serve on")
metricsAddr := flag.String("metrics-addr", ":9995", "address to serve scrapable metrics on")
telemetryAddr := flag.String("telemetry-addr", ":8087", "address of telemetry service")
tapAddr := flag.String("tap-addr", ":8088", "address of tap service")
flag.Parse()
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
telemetryClient, telemetryConn, err := telemetry.NewClient(*telemetryAddr)
if err != nil {
log.Fatal(err.Error())
}
defer telemetryConn.Close()
tapClient, tapConn, err := tap.NewClient(*tapAddr)
if err != nil {
log.Fatal(err.Error())
}
defer tapConn.Close()
server := public.NewServer(*addr, telemetryClient, tapClient)
go func() {
fmt.Println("starting HTTP server on", *addr)
server.ListenAndServe()
}()
go func() {
fmt.Println("serving scrapable metrics on", *metricsAddr)
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*metricsAddr, nil)
}()
<-stop
fmt.Println("shutting down HTTP server on", *addr)
server.Shutdown(context.Background())
}

View File

@ -0,0 +1,47 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/runconduit/conduit/controller/tap"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
func main() {
addr := flag.String("addr", ":8088", "address to serve on")
metricsAddr := flag.String("metrics-addr", ":9998", "address to serve scrapable metrics on")
kubeConfigPath := flag.String("kubeconfig", "", "path to kube config")
tapPort := flag.Uint("tap-port", 4190, "proxy tap port to connect to")
flag.Parse()
log.SetLevel(log.DebugLevel) // TODO: make configurable
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
server, lis, err := tap.NewServer(*addr, *tapPort, *kubeConfigPath)
if err != nil {
log.Fatal(err.Error())
}
go func() {
log.Println("starting gRPC server on", *addr)
server.Serve(lis)
}()
go func() {
fmt.Println("serving scrapable metrics on", *metricsAddr)
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*metricsAddr, nil)
}()
<-stop
log.Println("shutting down gRPC server on", *addr)
server.GracefulStop()
}

View File

@ -0,0 +1,48 @@
package main
import (
"flag"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"github.com/runconduit/conduit/controller/telemetry"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
func main() {
addr := flag.String("addr", ":8087", "address to serve on")
metricsAddr := flag.String("metrics-addr", ":9997", "address to serve scrapable metrics on")
prometheusUrl := flag.String("prometheus-url", "http://127.0.0.1:9090", "prometheus url")
ignoredNamespaces := flag.String("ignore-namespaces", "", "comma separated list of namespaces to not list pods from")
kubeConfigPath := flag.String("kubeconfig", "", "path to kube config")
flag.Parse()
log.SetLevel(log.DebugLevel) // TODO: make configurable
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
server, lis, err := telemetry.NewServer(*addr, *prometheusUrl, strings.Split(*ignoredNamespaces, ","), *kubeConfigPath)
if err != nil {
log.Fatal(err.Error())
}
go func() {
log.Println("starting gRPC server on", *addr)
server.Serve(lis)
}()
go func() {
log.Info("serving scrapable metrics on", *metricsAddr)
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(*metricsAddr, nil)
}()
<-stop
log.Println("shutting down gRPC server on", *addr)
server.GracefulStop()
}

View File

@ -0,0 +1,15 @@
package destination
import (
pb "github.com/runconduit/conduit/controller/gen/proxy/destination"
"google.golang.org/grpc"
)
func NewClient(addr string) (pb.DestinationClient, *grpc.ClientConn, error) {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
return pb.NewDestinationClient(conn), conn, nil
}

View File

@ -0,0 +1,147 @@
package destination
import (
"fmt"
"net"
"strconv"
"strings"
common "github.com/runconduit/conduit/controller/gen/common"
pb "github.com/runconduit/conduit/controller/gen/proxy/destination"
"github.com/runconduit/conduit/controller/k8s"
"github.com/runconduit/conduit/controller/util"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
type (
server struct {
endpoints *k8s.EndpointsWatcher
}
)
// The Destination service serves service discovery information to the proxy.
// This implementation supports the "k8s" destination scheme and expects
// destination paths to be of the form:
// <service>.<namespace>.svc.cluster.local:<port>
//
// If the port is omitted, 80 is used as a default. If the namespace is
// omitted, "default" is used as a default.append
//
// Addresses for the given destination are fetched from the Kubernetes Endpoints
// API.
func NewServer(addr, kubeconfig string, done chan struct{}) (*grpc.Server, net.Listener, error) {
clientSet, err := k8s.NewClientSet(kubeconfig)
if err != nil {
return nil, nil, err
}
endpoints := k8s.NewEndpointsWatcher(clientSet)
go endpoints.Run()
srv := &server{
endpoints: endpoints,
}
lis, err := net.Listen("tcp", addr)
if err != nil {
return nil, nil, err
}
s := util.NewGrpcServer()
pb.RegisterDestinationServer(s, srv)
go func() {
<-done
endpoints.Stop()
}()
return s, lis, nil
}
func (s *server) Get(dest *common.Destination, stream pb.Destination_GetServer) error {
log.Debugf("Get %v", dest)
if dest.Scheme != "k8s" {
err := fmt.Errorf("Unsupported scheme %v", dest.Scheme)
log.Error(err)
return err
}
hostPort := strings.Split(dest.Path, ":")
if len(hostPort) > 2 {
err := fmt.Errorf("Invalid destination %s", dest.Path)
log.Error(err)
return err
}
host := hostPort[0]
port := 80
if len(hostPort) == 2 {
var err error
port, err = strconv.Atoi(hostPort[1])
if err != nil {
err := fmt.Errorf("Invalid port %s", hostPort[1])
log.Error(err)
return err
}
}
// service.namespace.svc.cluster.local
domains := strings.Split(host, ".")
service := domains[0]
namespace := "default"
if len(domains) > 1 {
namespace = domains[1]
}
id := namespace + "/" + service
listener := endpointListener{stream: stream}
s.endpoints.Subscribe(id, uint32(port), listener)
<-stream.Context().Done()
s.endpoints.Unsubscribe(id, uint32(port), listener)
return nil
}
type endpointListener struct {
stream pb.Destination_GetServer
}
func (listener endpointListener) Update(add []common.TcpAddress, remove []common.TcpAddress) {
if len(add) > 0 {
update := &pb.Update{
Update: &pb.Update_Add{
Add: toWeightedAddrSet(add),
},
}
listener.stream.Send(update)
}
if len(remove) > 0 {
update := &pb.Update{
Update: &pb.Update_Remove{
Remove: toAddrSet(remove),
},
}
listener.stream.Send(update)
}
}
func toWeightedAddrSet(endpoints []common.TcpAddress) *pb.WeightedAddrSet {
addrs := make([]*pb.WeightedAddr, 0)
for i := range endpoints {
addrs = append(addrs, &pb.WeightedAddr{
Addr: &endpoints[i],
Weight: 1,
})
}
return &pb.WeightedAddrSet{Addrs: addrs}
}
func toAddrSet(endpoints []common.TcpAddress) *pb.AddrSet {
addrs := make([]*common.TcpAddress, 0)
for i := range endpoints {
addrs = append(addrs, &endpoints[i])
}
return &pb.AddrSet{Addrs: addrs}
}

997
controller/gen/common/common.pb.go generated Normal file
View File

@ -0,0 +1,997 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: common/common.proto
/*
Package conduit_common is a generated protocol buffer package.
It is generated from these files:
common/common.proto
It has these top-level messages:
HttpMethod
Scheme
IPAddress
IPv6
TcpAddress
Destination
TapEvent
*/
package conduit_common
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/duration"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type HttpMethod_Registered int32
const (
HttpMethod_GET HttpMethod_Registered = 0
HttpMethod_POST HttpMethod_Registered = 1
HttpMethod_PUT HttpMethod_Registered = 2
HttpMethod_DELETE HttpMethod_Registered = 3
HttpMethod_PATCH HttpMethod_Registered = 4
HttpMethod_OPTIONS HttpMethod_Registered = 5
HttpMethod_CONNECT HttpMethod_Registered = 6
HttpMethod_HEAD HttpMethod_Registered = 7
HttpMethod_TRACE HttpMethod_Registered = 8
)
var HttpMethod_Registered_name = map[int32]string{
0: "GET",
1: "POST",
2: "PUT",
3: "DELETE",
4: "PATCH",
5: "OPTIONS",
6: "CONNECT",
7: "HEAD",
8: "TRACE",
}
var HttpMethod_Registered_value = map[string]int32{
"GET": 0,
"POST": 1,
"PUT": 2,
"DELETE": 3,
"PATCH": 4,
"OPTIONS": 5,
"CONNECT": 6,
"HEAD": 7,
"TRACE": 8,
}
func (x HttpMethod_Registered) String() string {
return proto.EnumName(HttpMethod_Registered_name, int32(x))
}
func (HttpMethod_Registered) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
type Scheme_Registered int32
const (
Scheme_HTTP Scheme_Registered = 0
Scheme_HTTPS Scheme_Registered = 1
)
var Scheme_Registered_name = map[int32]string{
0: "HTTP",
1: "HTTPS",
}
var Scheme_Registered_value = map[string]int32{
"HTTP": 0,
"HTTPS": 1,
}
func (x Scheme_Registered) String() string {
return proto.EnumName(Scheme_Registered_name, int32(x))
}
func (Scheme_Registered) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
type HttpMethod struct {
// Types that are valid to be assigned to Type:
// *HttpMethod_Registered_
// *HttpMethod_Unregistered
Type isHttpMethod_Type `protobuf_oneof:"type"`
}
func (m *HttpMethod) Reset() { *m = HttpMethod{} }
func (m *HttpMethod) String() string { return proto.CompactTextString(m) }
func (*HttpMethod) ProtoMessage() {}
func (*HttpMethod) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type isHttpMethod_Type interface {
isHttpMethod_Type()
}
type HttpMethod_Registered_ struct {
Registered HttpMethod_Registered `protobuf:"varint,1,opt,name=registered,enum=conduit.common.HttpMethod_Registered,oneof"`
}
type HttpMethod_Unregistered struct {
Unregistered string `protobuf:"bytes,2,opt,name=unregistered,oneof"`
}
func (*HttpMethod_Registered_) isHttpMethod_Type() {}
func (*HttpMethod_Unregistered) isHttpMethod_Type() {}
func (m *HttpMethod) GetType() isHttpMethod_Type {
if m != nil {
return m.Type
}
return nil
}
func (m *HttpMethod) GetRegistered() HttpMethod_Registered {
if x, ok := m.GetType().(*HttpMethod_Registered_); ok {
return x.Registered
}
return HttpMethod_GET
}
func (m *HttpMethod) GetUnregistered() string {
if x, ok := m.GetType().(*HttpMethod_Unregistered); ok {
return x.Unregistered
}
return ""
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*HttpMethod) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _HttpMethod_OneofMarshaler, _HttpMethod_OneofUnmarshaler, _HttpMethod_OneofSizer, []interface{}{
(*HttpMethod_Registered_)(nil),
(*HttpMethod_Unregistered)(nil),
}
}
func _HttpMethod_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*HttpMethod)
// type
switch x := m.Type.(type) {
case *HttpMethod_Registered_:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.Registered))
case *HttpMethod_Unregistered:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Unregistered)
case nil:
default:
return fmt.Errorf("HttpMethod.Type has unexpected type %T", x)
}
return nil
}
func _HttpMethod_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*HttpMethod)
switch tag {
case 1: // type.registered
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Type = &HttpMethod_Registered_{HttpMethod_Registered(x)}
return true, err
case 2: // type.unregistered
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Type = &HttpMethod_Unregistered{x}
return true, err
default:
return false, nil
}
}
func _HttpMethod_OneofSizer(msg proto.Message) (n int) {
m := msg.(*HttpMethod)
// type
switch x := m.Type.(type) {
case *HttpMethod_Registered_:
n += proto.SizeVarint(1<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.Registered))
case *HttpMethod_Unregistered:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Unregistered)))
n += len(x.Unregistered)
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Scheme struct {
// Types that are valid to be assigned to Type:
// *Scheme_Registered_
// *Scheme_Unregistered
Type isScheme_Type `protobuf_oneof:"type"`
}
func (m *Scheme) Reset() { *m = Scheme{} }
func (m *Scheme) String() string { return proto.CompactTextString(m) }
func (*Scheme) ProtoMessage() {}
func (*Scheme) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type isScheme_Type interface {
isScheme_Type()
}
type Scheme_Registered_ struct {
Registered Scheme_Registered `protobuf:"varint,1,opt,name=registered,enum=conduit.common.Scheme_Registered,oneof"`
}
type Scheme_Unregistered struct {
Unregistered string `protobuf:"bytes,2,opt,name=unregistered,oneof"`
}
func (*Scheme_Registered_) isScheme_Type() {}
func (*Scheme_Unregistered) isScheme_Type() {}
func (m *Scheme) GetType() isScheme_Type {
if m != nil {
return m.Type
}
return nil
}
func (m *Scheme) GetRegistered() Scheme_Registered {
if x, ok := m.GetType().(*Scheme_Registered_); ok {
return x.Registered
}
return Scheme_HTTP
}
func (m *Scheme) GetUnregistered() string {
if x, ok := m.GetType().(*Scheme_Unregistered); ok {
return x.Unregistered
}
return ""
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Scheme) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Scheme_OneofMarshaler, _Scheme_OneofUnmarshaler, _Scheme_OneofSizer, []interface{}{
(*Scheme_Registered_)(nil),
(*Scheme_Unregistered)(nil),
}
}
func _Scheme_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Scheme)
// type
switch x := m.Type.(type) {
case *Scheme_Registered_:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.Registered))
case *Scheme_Unregistered:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Unregistered)
case nil:
default:
return fmt.Errorf("Scheme.Type has unexpected type %T", x)
}
return nil
}
func _Scheme_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Scheme)
switch tag {
case 1: // type.registered
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Type = &Scheme_Registered_{Scheme_Registered(x)}
return true, err
case 2: // type.unregistered
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Type = &Scheme_Unregistered{x}
return true, err
default:
return false, nil
}
}
func _Scheme_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Scheme)
// type
switch x := m.Type.(type) {
case *Scheme_Registered_:
n += proto.SizeVarint(1<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.Registered))
case *Scheme_Unregistered:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Unregistered)))
n += len(x.Unregistered)
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type IPAddress struct {
// Types that are valid to be assigned to Ip:
// *IPAddress_Ipv4
// *IPAddress_Ipv6
Ip isIPAddress_Ip `protobuf_oneof:"ip"`
}
func (m *IPAddress) Reset() { *m = IPAddress{} }
func (m *IPAddress) String() string { return proto.CompactTextString(m) }
func (*IPAddress) ProtoMessage() {}
func (*IPAddress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isIPAddress_Ip interface {
isIPAddress_Ip()
}
type IPAddress_Ipv4 struct {
Ipv4 uint32 `protobuf:"fixed32,1,opt,name=ipv4,oneof"`
}
type IPAddress_Ipv6 struct {
Ipv6 *IPv6 `protobuf:"bytes,2,opt,name=ipv6,oneof"`
}
func (*IPAddress_Ipv4) isIPAddress_Ip() {}
func (*IPAddress_Ipv6) isIPAddress_Ip() {}
func (m *IPAddress) GetIp() isIPAddress_Ip {
if m != nil {
return m.Ip
}
return nil
}
func (m *IPAddress) GetIpv4() uint32 {
if x, ok := m.GetIp().(*IPAddress_Ipv4); ok {
return x.Ipv4
}
return 0
}
func (m *IPAddress) GetIpv6() *IPv6 {
if x, ok := m.GetIp().(*IPAddress_Ipv6); ok {
return x.Ipv6
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*IPAddress) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _IPAddress_OneofMarshaler, _IPAddress_OneofUnmarshaler, _IPAddress_OneofSizer, []interface{}{
(*IPAddress_Ipv4)(nil),
(*IPAddress_Ipv6)(nil),
}
}
func _IPAddress_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*IPAddress)
// ip
switch x := m.Ip.(type) {
case *IPAddress_Ipv4:
b.EncodeVarint(1<<3 | proto.WireFixed32)
b.EncodeFixed32(uint64(x.Ipv4))
case *IPAddress_Ipv6:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Ipv6); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("IPAddress.Ip has unexpected type %T", x)
}
return nil
}
func _IPAddress_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*IPAddress)
switch tag {
case 1: // ip.ipv4
if wire != proto.WireFixed32 {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeFixed32()
m.Ip = &IPAddress_Ipv4{uint32(x)}
return true, err
case 2: // ip.ipv6
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(IPv6)
err := b.DecodeMessage(msg)
m.Ip = &IPAddress_Ipv6{msg}
return true, err
default:
return false, nil
}
}
func _IPAddress_OneofSizer(msg proto.Message) (n int) {
m := msg.(*IPAddress)
// ip
switch x := m.Ip.(type) {
case *IPAddress_Ipv4:
n += proto.SizeVarint(1<<3 | proto.WireFixed32)
n += 4
case *IPAddress_Ipv6:
s := proto.Size(x.Ipv6)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type IPv6 struct {
First uint64 `protobuf:"fixed64,1,opt,name=first" json:"first,omitempty"`
Last uint64 `protobuf:"fixed64,2,opt,name=last" json:"last,omitempty"`
}
func (m *IPv6) Reset() { *m = IPv6{} }
func (m *IPv6) String() string { return proto.CompactTextString(m) }
func (*IPv6) ProtoMessage() {}
func (*IPv6) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *IPv6) GetFirst() uint64 {
if m != nil {
return m.First
}
return 0
}
func (m *IPv6) GetLast() uint64 {
if m != nil {
return m.Last
}
return 0
}
type TcpAddress struct {
Ip *IPAddress `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"`
Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
}
func (m *TcpAddress) Reset() { *m = TcpAddress{} }
func (m *TcpAddress) String() string { return proto.CompactTextString(m) }
func (*TcpAddress) ProtoMessage() {}
func (*TcpAddress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *TcpAddress) GetIp() *IPAddress {
if m != nil {
return m.Ip
}
return nil
}
func (m *TcpAddress) GetPort() uint32 {
if m != nil {
return m.Port
}
return 0
}
type Destination struct {
Scheme string `protobuf:"bytes,1,opt,name=scheme" json:"scheme,omitempty"`
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
}
func (m *Destination) Reset() { *m = Destination{} }
func (m *Destination) String() string { return proto.CompactTextString(m) }
func (*Destination) ProtoMessage() {}
func (*Destination) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Destination) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *Destination) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
type TapEvent struct {
Source *TcpAddress `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"`
Target *TcpAddress `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"`
// Types that are valid to be assigned to Event:
// *TapEvent_Http_
Event isTapEvent_Event `protobuf_oneof:"event"`
}
func (m *TapEvent) Reset() { *m = TapEvent{} }
func (m *TapEvent) String() string { return proto.CompactTextString(m) }
func (*TapEvent) ProtoMessage() {}
func (*TapEvent) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type isTapEvent_Event interface {
isTapEvent_Event()
}
type TapEvent_Http_ struct {
Http *TapEvent_Http `protobuf:"bytes,3,opt,name=http,oneof"`
}
func (*TapEvent_Http_) isTapEvent_Event() {}
func (m *TapEvent) GetEvent() isTapEvent_Event {
if m != nil {
return m.Event
}
return nil
}
func (m *TapEvent) GetSource() *TcpAddress {
if m != nil {
return m.Source
}
return nil
}
func (m *TapEvent) GetTarget() *TcpAddress {
if m != nil {
return m.Target
}
return nil
}
func (m *TapEvent) GetHttp() *TapEvent_Http {
if x, ok := m.GetEvent().(*TapEvent_Http_); ok {
return x.Http
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*TapEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _TapEvent_OneofMarshaler, _TapEvent_OneofUnmarshaler, _TapEvent_OneofSizer, []interface{}{
(*TapEvent_Http_)(nil),
}
}
func _TapEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*TapEvent)
// event
switch x := m.Event.(type) {
case *TapEvent_Http_:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Http); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("TapEvent.Event has unexpected type %T", x)
}
return nil
}
func _TapEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*TapEvent)
switch tag {
case 3: // event.http
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(TapEvent_Http)
err := b.DecodeMessage(msg)
m.Event = &TapEvent_Http_{msg}
return true, err
default:
return false, nil
}
}
func _TapEvent_OneofSizer(msg proto.Message) (n int) {
m := msg.(*TapEvent)
// event
switch x := m.Event.(type) {
case *TapEvent_Http_:
s := proto.Size(x.Http)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type TapEvent_Http struct {
// Types that are valid to be assigned to Event:
// *TapEvent_Http_RequestInit_
// *TapEvent_Http_ResponseInit_
// *TapEvent_Http_ResponseEnd_
Event isTapEvent_Http_Event `protobuf_oneof:"event"`
}
func (m *TapEvent_Http) Reset() { *m = TapEvent_Http{} }
func (m *TapEvent_Http) String() string { return proto.CompactTextString(m) }
func (*TapEvent_Http) ProtoMessage() {}
func (*TapEvent_Http) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0} }
type isTapEvent_Http_Event interface {
isTapEvent_Http_Event()
}
type TapEvent_Http_RequestInit_ struct {
RequestInit *TapEvent_Http_RequestInit `protobuf:"bytes,1,opt,name=request_init,json=requestInit,oneof"`
}
type TapEvent_Http_ResponseInit_ struct {
ResponseInit *TapEvent_Http_ResponseInit `protobuf:"bytes,2,opt,name=response_init,json=responseInit,oneof"`
}
type TapEvent_Http_ResponseEnd_ struct {
ResponseEnd *TapEvent_Http_ResponseEnd `protobuf:"bytes,3,opt,name=response_end,json=responseEnd,oneof"`
}
func (*TapEvent_Http_RequestInit_) isTapEvent_Http_Event() {}
func (*TapEvent_Http_ResponseInit_) isTapEvent_Http_Event() {}
func (*TapEvent_Http_ResponseEnd_) isTapEvent_Http_Event() {}
func (m *TapEvent_Http) GetEvent() isTapEvent_Http_Event {
if m != nil {
return m.Event
}
return nil
}
func (m *TapEvent_Http) GetRequestInit() *TapEvent_Http_RequestInit {
if x, ok := m.GetEvent().(*TapEvent_Http_RequestInit_); ok {
return x.RequestInit
}
return nil
}
func (m *TapEvent_Http) GetResponseInit() *TapEvent_Http_ResponseInit {
if x, ok := m.GetEvent().(*TapEvent_Http_ResponseInit_); ok {
return x.ResponseInit
}
return nil
}
func (m *TapEvent_Http) GetResponseEnd() *TapEvent_Http_ResponseEnd {
if x, ok := m.GetEvent().(*TapEvent_Http_ResponseEnd_); ok {
return x.ResponseEnd
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*TapEvent_Http) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _TapEvent_Http_OneofMarshaler, _TapEvent_Http_OneofUnmarshaler, _TapEvent_Http_OneofSizer, []interface{}{
(*TapEvent_Http_RequestInit_)(nil),
(*TapEvent_Http_ResponseInit_)(nil),
(*TapEvent_Http_ResponseEnd_)(nil),
}
}
func _TapEvent_Http_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*TapEvent_Http)
// event
switch x := m.Event.(type) {
case *TapEvent_Http_RequestInit_:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.RequestInit); err != nil {
return err
}
case *TapEvent_Http_ResponseInit_:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ResponseInit); err != nil {
return err
}
case *TapEvent_Http_ResponseEnd_:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ResponseEnd); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("TapEvent_Http.Event has unexpected type %T", x)
}
return nil
}
func _TapEvent_Http_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*TapEvent_Http)
switch tag {
case 1: // event.request_init
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(TapEvent_Http_RequestInit)
err := b.DecodeMessage(msg)
m.Event = &TapEvent_Http_RequestInit_{msg}
return true, err
case 2: // event.response_init
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(TapEvent_Http_ResponseInit)
err := b.DecodeMessage(msg)
m.Event = &TapEvent_Http_ResponseInit_{msg}
return true, err
case 3: // event.response_end
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(TapEvent_Http_ResponseEnd)
err := b.DecodeMessage(msg)
m.Event = &TapEvent_Http_ResponseEnd_{msg}
return true, err
default:
return false, nil
}
}
func _TapEvent_Http_OneofSizer(msg proto.Message) (n int) {
m := msg.(*TapEvent_Http)
// event
switch x := m.Event.(type) {
case *TapEvent_Http_RequestInit_:
s := proto.Size(x.RequestInit)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *TapEvent_Http_ResponseInit_:
s := proto.Size(x.ResponseInit)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *TapEvent_Http_ResponseEnd_:
s := proto.Size(x.ResponseEnd)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type TapEvent_Http_StreamId struct {
// A randomized base (stable across a process's runtime)
Base uint32 `protobuf:"varint,1,opt,name=base" json:"base,omitempty"`
// A stream id unique within the lifetime of `base`.
Stream uint64 `protobuf:"varint,2,opt,name=stream" json:"stream,omitempty"`
}
func (m *TapEvent_Http_StreamId) Reset() { *m = TapEvent_Http_StreamId{} }
func (m *TapEvent_Http_StreamId) String() string { return proto.CompactTextString(m) }
func (*TapEvent_Http_StreamId) ProtoMessage() {}
func (*TapEvent_Http_StreamId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0, 0} }
func (m *TapEvent_Http_StreamId) GetBase() uint32 {
if m != nil {
return m.Base
}
return 0
}
func (m *TapEvent_Http_StreamId) GetStream() uint64 {
if m != nil {
return m.Stream
}
return 0
}
type TapEvent_Http_RequestInit struct {
Id *TapEvent_Http_StreamId `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Method *HttpMethod `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"`
Scheme *Scheme `protobuf:"bytes,3,opt,name=scheme" json:"scheme,omitempty"`
Authority string `protobuf:"bytes,4,opt,name=authority" json:"authority,omitempty"`
Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
}
func (m *TapEvent_Http_RequestInit) Reset() { *m = TapEvent_Http_RequestInit{} }
func (m *TapEvent_Http_RequestInit) String() string { return proto.CompactTextString(m) }
func (*TapEvent_Http_RequestInit) ProtoMessage() {}
func (*TapEvent_Http_RequestInit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0, 1} }
func (m *TapEvent_Http_RequestInit) GetId() *TapEvent_Http_StreamId {
if m != nil {
return m.Id
}
return nil
}
func (m *TapEvent_Http_RequestInit) GetMethod() *HttpMethod {
if m != nil {
return m.Method
}
return nil
}
func (m *TapEvent_Http_RequestInit) GetScheme() *Scheme {
if m != nil {
return m.Scheme
}
return nil
}
func (m *TapEvent_Http_RequestInit) GetAuthority() string {
if m != nil {
return m.Authority
}
return ""
}
func (m *TapEvent_Http_RequestInit) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
type TapEvent_Http_ResponseInit struct {
Id *TapEvent_Http_StreamId `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
SinceRequestInit *google_protobuf.Duration `protobuf:"bytes,2,opt,name=since_request_init,json=sinceRequestInit" json:"since_request_init,omitempty"`
HttpStatus uint32 `protobuf:"varint,3,opt,name=http_status,json=httpStatus" json:"http_status,omitempty"`
}
func (m *TapEvent_Http_ResponseInit) Reset() { *m = TapEvent_Http_ResponseInit{} }
func (m *TapEvent_Http_ResponseInit) String() string { return proto.CompactTextString(m) }
func (*TapEvent_Http_ResponseInit) ProtoMessage() {}
func (*TapEvent_Http_ResponseInit) Descriptor() ([]byte, []int) {
return fileDescriptor0, []int{6, 0, 2}
}
func (m *TapEvent_Http_ResponseInit) GetId() *TapEvent_Http_StreamId {
if m != nil {
return m.Id
}
return nil
}
func (m *TapEvent_Http_ResponseInit) GetSinceRequestInit() *google_protobuf.Duration {
if m != nil {
return m.SinceRequestInit
}
return nil
}
func (m *TapEvent_Http_ResponseInit) GetHttpStatus() uint32 {
if m != nil {
return m.HttpStatus
}
return 0
}
type TapEvent_Http_ResponseEnd struct {
Id *TapEvent_Http_StreamId `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
SinceRequestInit *google_protobuf.Duration `protobuf:"bytes,2,opt,name=since_request_init,json=sinceRequestInit" json:"since_request_init,omitempty"`
SinceResponseInit *google_protobuf.Duration `protobuf:"bytes,3,opt,name=since_response_init,json=sinceResponseInit" json:"since_response_init,omitempty"`
ResponseBytes uint64 `protobuf:"varint,4,opt,name=response_bytes,json=responseBytes" json:"response_bytes,omitempty"`
GrpcStatus uint32 `protobuf:"varint,5,opt,name=grpc_status,json=grpcStatus" json:"grpc_status,omitempty"`
}
func (m *TapEvent_Http_ResponseEnd) Reset() { *m = TapEvent_Http_ResponseEnd{} }
func (m *TapEvent_Http_ResponseEnd) String() string { return proto.CompactTextString(m) }
func (*TapEvent_Http_ResponseEnd) ProtoMessage() {}
func (*TapEvent_Http_ResponseEnd) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6, 0, 3} }
func (m *TapEvent_Http_ResponseEnd) GetId() *TapEvent_Http_StreamId {
if m != nil {
return m.Id
}
return nil
}
func (m *TapEvent_Http_ResponseEnd) GetSinceRequestInit() *google_protobuf.Duration {
if m != nil {
return m.SinceRequestInit
}
return nil
}
func (m *TapEvent_Http_ResponseEnd) GetSinceResponseInit() *google_protobuf.Duration {
if m != nil {
return m.SinceResponseInit
}
return nil
}
func (m *TapEvent_Http_ResponseEnd) GetResponseBytes() uint64 {
if m != nil {
return m.ResponseBytes
}
return 0
}
func (m *TapEvent_Http_ResponseEnd) GetGrpcStatus() uint32 {
if m != nil {
return m.GrpcStatus
}
return 0
}
func init() {
proto.RegisterType((*HttpMethod)(nil), "conduit.common.HttpMethod")
proto.RegisterType((*Scheme)(nil), "conduit.common.Scheme")
proto.RegisterType((*IPAddress)(nil), "conduit.common.IPAddress")
proto.RegisterType((*IPv6)(nil), "conduit.common.IPv6")
proto.RegisterType((*TcpAddress)(nil), "conduit.common.TcpAddress")
proto.RegisterType((*Destination)(nil), "conduit.common.Destination")
proto.RegisterType((*TapEvent)(nil), "conduit.common.TapEvent")
proto.RegisterType((*TapEvent_Http)(nil), "conduit.common.TapEvent.Http")
proto.RegisterType((*TapEvent_Http_StreamId)(nil), "conduit.common.TapEvent.Http.StreamId")
proto.RegisterType((*TapEvent_Http_RequestInit)(nil), "conduit.common.TapEvent.Http.RequestInit")
proto.RegisterType((*TapEvent_Http_ResponseInit)(nil), "conduit.common.TapEvent.Http.ResponseInit")
proto.RegisterType((*TapEvent_Http_ResponseEnd)(nil), "conduit.common.TapEvent.Http.ResponseEnd")
proto.RegisterEnum("conduit.common.HttpMethod_Registered", HttpMethod_Registered_name, HttpMethod_Registered_value)
proto.RegisterEnum("conduit.common.Scheme_Registered", Scheme_Registered_name, Scheme_Registered_value)
}
func init() { proto.RegisterFile("common/common.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 789 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x54, 0xdb, 0x6e, 0xf2, 0x46,
0x10, 0xc6, 0xc6, 0x18, 0x18, 0x20, 0x72, 0xf7, 0x8f, 0x7e, 0xa5, 0xa8, 0x87, 0x3f, 0xa8, 0xa9,
0x92, 0x5c, 0x38, 0x15, 0xa9, 0x90, 0x7a, 0xc9, 0xc1, 0x0a, 0xa8, 0x2d, 0xd0, 0xc5, 0xbd, 0x8e,
0x0c, 0xde, 0x80, 0xa5, 0x60, 0xbb, 0xbb, 0xeb, 0x48, 0x79, 0x99, 0x4a, 0xed, 0x13, 0xf4, 0x7d,
0xfa, 0x10, 0xbd, 0xe8, 0x0b, 0x54, 0x7b, 0xc0, 0x98, 0x24, 0x4d, 0xaa, 0xf6, 0xa2, 0x57, 0x9e,
0x19, 0xcf, 0x7c, 0xfb, 0xcd, 0x37, 0xb3, 0x0b, 0xef, 0x56, 0xc9, 0x76, 0x9b, 0xc4, 0x57, 0xea,
0xe3, 0xa6, 0x34, 0xe1, 0x09, 0x3a, 0x5a, 0x25, 0x71, 0x98, 0x45, 0xdc, 0x55, 0xd1, 0xf6, 0x67,
0xeb, 0x24, 0x59, 0xdf, 0x93, 0x2b, 0xf9, 0x77, 0x99, 0xdd, 0x5d, 0x85, 0x19, 0x0d, 0x78, 0xb4,
0xcb, 0xef, 0xfc, 0x69, 0x00, 0x8c, 0x39, 0x4f, 0xbf, 0x27, 0x7c, 0x93, 0x84, 0xe8, 0x06, 0x80,
0x92, 0x75, 0xc4, 0x38, 0xa1, 0x24, 0x3c, 0x31, 0x3e, 0x18, 0xe7, 0x47, 0xdd, 0x33, 0xf7, 0x10,
0xd3, 0xdd, 0xe7, 0xbb, 0x38, 0x4f, 0x1e, 0x97, 0x70, 0xa1, 0x14, 0x7d, 0x01, 0xcd, 0x2c, 0x2e,
0x40, 0x99, 0x1f, 0x8c, 0xf3, 0xfa, 0xb8, 0x84, 0x0f, 0xa2, 0x9d, 0x18, 0x60, 0x8f, 0x80, 0xaa,
0x50, 0xbe, 0xf1, 0x7c, 0xa7, 0x84, 0x6a, 0x60, 0xcd, 0x67, 0x0b, 0xdf, 0x31, 0x44, 0x68, 0xfe,
0xa3, 0xef, 0x98, 0x08, 0xc0, 0x1e, 0x79, 0xdf, 0x79, 0xbe, 0xe7, 0x94, 0x51, 0x1d, 0x2a, 0xf3,
0xbe, 0x3f, 0x1c, 0x3b, 0x16, 0x6a, 0x40, 0x75, 0x36, 0xf7, 0x27, 0xb3, 0xe9, 0xc2, 0xa9, 0x08,
0x67, 0x38, 0x9b, 0x4e, 0xbd, 0xa1, 0xef, 0xd8, 0x02, 0x63, 0xec, 0xf5, 0x47, 0x4e, 0x55, 0xa4,
0xfb, 0xb8, 0x3f, 0xf4, 0x9c, 0xda, 0xc0, 0x06, 0x8b, 0x3f, 0xa6, 0xa4, 0xf3, 0xb3, 0x01, 0xf6,
0x62, 0xb5, 0x21, 0x5b, 0x82, 0x86, 0x2f, 0x74, 0x7c, 0xfa, 0xb4, 0x63, 0x95, 0xfb, 0x5f, 0xbb,
0x3d, 0x3d, 0xe8, 0x56, 0x10, 0xf4, 0xfd, 0xb9, 0x53, 0x12, 0x04, 0x85, 0xb5, 0x70, 0x8c, 0x9c,
0xe0, 0x02, 0xea, 0x93, 0x79, 0x3f, 0x0c, 0x29, 0x61, 0x0c, 0x1d, 0x83, 0x15, 0xa5, 0x0f, 0x5f,
0x4b, 0x72, 0xd5, 0x71, 0x09, 0x4b, 0x0f, 0x5d, 0xca, 0x68, 0x4f, 0x9e, 0xd5, 0xe8, 0x1e, 0x3f,
0xa5, 0x3c, 0x99, 0x3f, 0xf4, 0x74, 0x6e, 0x6f, 0x60, 0x81, 0x19, 0xa5, 0x9d, 0xaf, 0xc0, 0x12,
0x51, 0x74, 0x0c, 0x95, 0xbb, 0x88, 0x32, 0x2e, 0x01, 0x6d, 0xac, 0x1c, 0x84, 0xc0, 0xba, 0x0f,
0x18, 0x97, 0x78, 0x36, 0x96, 0x76, 0xe7, 0x5b, 0x00, 0x7f, 0x95, 0xee, 0x78, 0x5c, 0x08, 0x14,
0x59, 0xd4, 0xe8, 0x7e, 0xfc, 0xfc, 0x3c, 0x9d, 0x86, 0xcd, 0x28, 0x15, 0x60, 0x69, 0x42, 0x15,
0x58, 0x0b, 0x4b, 0xbb, 0xf3, 0x0d, 0x34, 0x46, 0x84, 0xf1, 0x28, 0x96, 0xfb, 0x87, 0xde, 0x83,
0xcd, 0xa4, 0xac, 0x12, 0xb1, 0x8e, 0xb5, 0x27, 0x4b, 0x03, 0xbe, 0x51, 0x1a, 0x62, 0x69, 0x77,
0x7e, 0xad, 0x43, 0xcd, 0x0f, 0x52, 0xef, 0x81, 0xc4, 0x1c, 0x75, 0xc1, 0x66, 0x49, 0x46, 0x57,
0x44, 0x53, 0x69, 0x3f, 0xa5, 0xb2, 0xa7, 0x8c, 0x75, 0xa6, 0xa8, 0xe1, 0x01, 0x5d, 0x13, 0xae,
0xe5, 0x7a, 0xb5, 0x46, 0x65, 0xa2, 0x6b, 0xb0, 0x36, 0x9c, 0xa7, 0x27, 0x65, 0x59, 0xf1, 0xe9,
0xb3, 0x0a, 0xcd, 0x47, 0x5e, 0x07, 0xa1, 0xb4, 0x48, 0x6e, 0xff, 0x51, 0x05, 0x4b, 0x04, 0xd0,
0x14, 0x9a, 0x94, 0xfc, 0x94, 0x11, 0xc6, 0x6f, 0xa3, 0x38, 0xe2, 0x9a, 0xeb, 0xc5, 0xab, 0x28,
0x2e, 0x56, 0x15, 0x93, 0x38, 0xe2, 0xe3, 0x12, 0x6e, 0xd0, 0xbd, 0x8b, 0x7e, 0x80, 0x16, 0x25,
0x2c, 0x4d, 0x62, 0x46, 0x14, 0xa0, 0x6a, 0xe4, 0xf2, 0x2d, 0x40, 0x55, 0xa2, 0x11, 0x9b, 0xb4,
0xe0, 0x2b, 0x8a, 0x1a, 0x92, 0xc4, 0xa1, 0x6e, 0xf4, 0xe2, 0x9f, 0x21, 0x7a, 0x71, 0xa8, 0x28,
0xe6, 0x6e, 0xbb, 0x07, 0xb5, 0x05, 0xa7, 0x24, 0xd8, 0x4e, 0x42, 0x31, 0xc5, 0x65, 0xc0, 0xd4,
0x88, 0x5a, 0x58, 0xda, 0x72, 0xe2, 0xf2, 0xbf, 0xe4, 0x6e, 0x61, 0xed, 0xb5, 0x7f, 0x37, 0xa0,
0x51, 0xe8, 0x1c, 0xf5, 0xc0, 0x8c, 0x42, 0x2d, 0xd8, 0x97, 0xaf, 0xb3, 0xd9, 0x9d, 0x87, 0xcd,
0x28, 0x14, 0x43, 0xde, 0xca, 0x67, 0xe9, 0xef, 0x86, 0xbc, 0x7f, 0xb8, 0xb0, 0xce, 0x44, 0x6e,
0xbe, 0x85, 0xaa, 0xfb, 0xf7, 0x2f, 0x5f, 0xfd, 0x7c, 0x3b, 0x3f, 0x81, 0x7a, 0x90, 0xf1, 0x4d,
0x42, 0x23, 0xfe, 0x78, 0x62, 0xc9, 0x15, 0xdd, 0x07, 0xf2, 0xdd, 0xad, 0xec, 0x77, 0xb7, 0xfd,
0x9b, 0x01, 0xcd, 0xe2, 0x18, 0xfe, 0x75, 0x7b, 0x37, 0x80, 0x58, 0x14, 0xaf, 0xc8, 0xed, 0xc1,
0x5e, 0x99, 0xfa, 0x3a, 0xaa, 0x77, 0xde, 0xdd, 0xbd, 0xf3, 0xee, 0x48, 0xbf, 0xf3, 0xd8, 0x91,
0x45, 0x45, 0x7d, 0x3f, 0x87, 0x86, 0xd8, 0xd5, 0x5b, 0xc6, 0x03, 0x9e, 0x31, 0xd9, 0x78, 0x0b,
0x83, 0x08, 0x2d, 0x64, 0xa4, 0xfd, 0x8b, 0x29, 0x06, 0x92, 0x0f, 0xf6, 0xff, 0x67, 0x3c, 0x81,
0x77, 0x3b, 0xa0, 0xe2, 0x15, 0x28, 0xbf, 0x85, 0xf4, 0x91, 0x46, 0x2a, 0xa8, 0x7f, 0x06, 0x47,
0x39, 0xc8, 0xf2, 0x91, 0x13, 0x26, 0xa7, 0x68, 0xe1, 0xfc, 0x76, 0x0d, 0x44, 0x50, 0x68, 0xb4,
0xa6, 0xe9, 0x6a, 0xa7, 0x51, 0x45, 0x69, 0x24, 0x42, 0x4a, 0xa3, 0x41, 0x15, 0x2a, 0x44, 0xb4,
0x9d, 0x1b, 0x4b, 0x5b, 0x9e, 0x7f, 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xa0, 0x18,
0x48, 0x98, 0x07, 0x00, 0x00,
}

148
controller/gen/controller/tap/tap.pb.go generated Normal file
View File

@ -0,0 +1,148 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: controller/tap/tap.proto
/*
Package conduit_controller_tap is a generated protocol buffer package.
It is generated from these files:
controller/tap/tap.proto
It has these top-level messages:
*/
package conduit_controller_tap
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import conduit_common "github.com/runconduit/conduit/controller/gen/common"
import conduit_public "github.com/runconduit/conduit/controller/gen/public"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Tap service
type TapClient interface {
Tap(ctx context.Context, in *conduit_public.TapRequest, opts ...grpc.CallOption) (Tap_TapClient, error)
}
type tapClient struct {
cc *grpc.ClientConn
}
func NewTapClient(cc *grpc.ClientConn) TapClient {
return &tapClient{cc}
}
func (c *tapClient) Tap(ctx context.Context, in *conduit_public.TapRequest, opts ...grpc.CallOption) (Tap_TapClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Tap_serviceDesc.Streams[0], c.cc, "/conduit.controller.tap.Tap/Tap", opts...)
if err != nil {
return nil, err
}
x := &tapTapClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Tap_TapClient interface {
Recv() (*conduit_common.TapEvent, error)
grpc.ClientStream
}
type tapTapClient struct {
grpc.ClientStream
}
func (x *tapTapClient) Recv() (*conduit_common.TapEvent, error) {
m := new(conduit_common.TapEvent)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for Tap service
type TapServer interface {
Tap(*conduit_public.TapRequest, Tap_TapServer) error
}
func RegisterTapServer(s *grpc.Server, srv TapServer) {
s.RegisterService(&_Tap_serviceDesc, srv)
}
func _Tap_Tap_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(conduit_public.TapRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(TapServer).Tap(m, &tapTapServer{stream})
}
type Tap_TapServer interface {
Send(*conduit_common.TapEvent) error
grpc.ServerStream
}
type tapTapServer struct {
grpc.ServerStream
}
func (x *tapTapServer) Send(m *conduit_common.TapEvent) error {
return x.ServerStream.SendMsg(m)
}
var _Tap_serviceDesc = grpc.ServiceDesc{
ServiceName: "conduit.controller.tap.Tap",
HandlerType: (*TapServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Tap",
Handler: _Tap_Tap_Handler,
ServerStreams: true,
},
},
Metadata: "controller/tap/tap.proto",
}
func init() { proto.RegisterFile("controller/tap/tap.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 142 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xce, 0xcf, 0x2b,
0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2f, 0x49, 0x2c, 0x00, 0x61, 0xbd, 0x82, 0xa2, 0xfc,
0x92, 0x7c, 0x21, 0xb1, 0xe4, 0xfc, 0xbc, 0x94, 0xd2, 0xcc, 0x12, 0x3d, 0x84, 0x0a, 0xbd, 0x92,
0xc4, 0x02, 0x29, 0xe1, 0xe4, 0xfc, 0xdc, 0xdc, 0xfc, 0x3c, 0x7d, 0x08, 0x05, 0x51, 0x2c, 0x25,
0x50, 0x50, 0x9a, 0x94, 0x93, 0x99, 0xac, 0x9f, 0x58, 0x90, 0x09, 0x11, 0x31, 0x72, 0xe3, 0x62,
0x0e, 0x49, 0x2c, 0x10, 0xb2, 0x87, 0x50, 0x52, 0x7a, 0x30, 0xd3, 0x20, 0x0a, 0xf5, 0x42, 0x12,
0x0b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xa4, 0x24, 0xf4, 0x10, 0x36, 0x81, 0x8d, 0x0c,
0x49, 0x2c, 0x70, 0x2d, 0x4b, 0xcd, 0x2b, 0x51, 0x62, 0x30, 0x60, 0x4c, 0x62, 0x03, 0x1b, 0x67,
0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x17, 0x0c, 0x64, 0xf7, 0xa9, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,294 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: controller/telemetry/telemetry.proto
/*
Package conduit_controller_telemetry is a generated protocol buffer package.
It is generated from these files:
controller/telemetry/telemetry.proto
It has these top-level messages:
QueryRequest
QueryResponse
Sample
SampleValue
ListPodsRequest
*/
package conduit_controller_telemetry
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import conduit_public "github.com/runconduit/conduit/controller/gen/public"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type QueryRequest struct {
Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"`
StartMs int64 `protobuf:"varint,2,opt,name=start_ms,json=startMs" json:"start_ms,omitempty"`
EndMs int64 `protobuf:"varint,3,opt,name=end_ms,json=endMs" json:"end_ms,omitempty"`
Step string `protobuf:"bytes,4,opt,name=step" json:"step,omitempty"`
}
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (m *QueryRequest) String() string { return proto.CompactTextString(m) }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *QueryRequest) GetQuery() string {
if m != nil {
return m.Query
}
return ""
}
func (m *QueryRequest) GetStartMs() int64 {
if m != nil {
return m.StartMs
}
return 0
}
func (m *QueryRequest) GetEndMs() int64 {
if m != nil {
return m.EndMs
}
return 0
}
func (m *QueryRequest) GetStep() string {
if m != nil {
return m.Step
}
return ""
}
type QueryResponse struct {
Metrics []*Sample `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
}
func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (m *QueryResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *QueryResponse) GetMetrics() []*Sample {
if m != nil {
return m.Metrics
}
return nil
}
type Sample struct {
Values []*SampleValue `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Sample) GetValues() []*SampleValue {
if m != nil {
return m.Values
}
return nil
}
func (m *Sample) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
type SampleValue struct {
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
func (m *SampleValue) Reset() { *m = SampleValue{} }
func (m *SampleValue) String() string { return proto.CompactTextString(m) }
func (*SampleValue) ProtoMessage() {}
func (*SampleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *SampleValue) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *SampleValue) GetTimestampMs() int64 {
if m != nil {
return m.TimestampMs
}
return 0
}
type ListPodsRequest struct {
}
func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} }
func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) }
func (*ListPodsRequest) ProtoMessage() {}
func (*ListPodsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func init() {
proto.RegisterType((*QueryRequest)(nil), "conduit.controller.telemetry.QueryRequest")
proto.RegisterType((*QueryResponse)(nil), "conduit.controller.telemetry.QueryResponse")
proto.RegisterType((*Sample)(nil), "conduit.controller.telemetry.Sample")
proto.RegisterType((*SampleValue)(nil), "conduit.controller.telemetry.SampleValue")
proto.RegisterType((*ListPodsRequest)(nil), "conduit.controller.telemetry.ListPodsRequest")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Telemetry service
type TelemetryClient interface {
Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error)
ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*conduit_public.ListPodsResponse, error)
}
type telemetryClient struct {
cc *grpc.ClientConn
}
func NewTelemetryClient(cc *grpc.ClientConn) TelemetryClient {
return &telemetryClient{cc}
}
func (c *telemetryClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) {
out := new(QueryResponse)
err := grpc.Invoke(ctx, "/conduit.controller.telemetry.Telemetry/Query", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *telemetryClient) ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*conduit_public.ListPodsResponse, error) {
out := new(conduit_public.ListPodsResponse)
err := grpc.Invoke(ctx, "/conduit.controller.telemetry.Telemetry/ListPods", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Telemetry service
type TelemetryServer interface {
Query(context.Context, *QueryRequest) (*QueryResponse, error)
ListPods(context.Context, *ListPodsRequest) (*conduit_public.ListPodsResponse, error)
}
func RegisterTelemetryServer(s *grpc.Server, srv TelemetryServer) {
s.RegisterService(&_Telemetry_serviceDesc, srv)
}
func _Telemetry_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TelemetryServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/conduit.controller.telemetry.Telemetry/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TelemetryServer).Query(ctx, req.(*QueryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Telemetry_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListPodsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TelemetryServer).ListPods(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/conduit.controller.telemetry.Telemetry/ListPods",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TelemetryServer).ListPods(ctx, req.(*ListPodsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Telemetry_serviceDesc = grpc.ServiceDesc{
ServiceName: "conduit.controller.telemetry.Telemetry",
HandlerType: (*TelemetryServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _Telemetry_Query_Handler,
},
{
MethodName: "ListPods",
Handler: _Telemetry_ListPods_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "controller/telemetry/telemetry.proto",
}
func init() { proto.RegisterFile("controller/telemetry/telemetry.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 388 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4d, 0x4f, 0xdb, 0x40,
0x10, 0xed, 0xc6, 0x89, 0x93, 0x8c, 0x53, 0x35, 0x5d, 0xb5, 0x92, 0x6b, 0xf5, 0xe0, 0x5a, 0x39,
0xa4, 0xad, 0xea, 0x54, 0xe9, 0xa5, 0x70, 0x40, 0xe2, 0x00, 0xe2, 0x90, 0x08, 0x30, 0x88, 0x1b,
0x42, 0xfe, 0x98, 0x83, 0x61, 0xfd, 0x11, 0xef, 0x1a, 0x29, 0x7f, 0x12, 0xf1, 0x93, 0xd0, 0xae,
0xed, 0xd8, 0xe2, 0x10, 0xe5, 0x36, 0x33, 0xfb, 0xde, 0xdb, 0x37, 0x1f, 0x30, 0x0b, 0xb3, 0x54,
0x14, 0x19, 0x63, 0x58, 0x2c, 0x04, 0x32, 0x4c, 0x50, 0x14, 0xdb, 0x36, 0x72, 0xf3, 0x22, 0x13,
0x19, 0xfd, 0x1e, 0x66, 0x69, 0x54, 0xc6, 0xc2, 0x6d, 0xd1, 0xee, 0x0e, 0x63, 0x4d, 0xf3, 0x32,
0x60, 0x71, 0xb8, 0xf0, 0xf3, 0xb8, 0xc2, 0x3b, 0x8f, 0x30, 0xb9, 0x2e, 0xb1, 0xd8, 0x7a, 0xb8,
0x29, 0x91, 0x0b, 0xfa, 0x05, 0x06, 0x1b, 0x99, 0x9b, 0xc4, 0x26, 0xf3, 0xb1, 0x57, 0x25, 0xf4,
0x1b, 0x8c, 0xb8, 0xf0, 0x0b, 0xf1, 0x90, 0x70, 0xb3, 0x67, 0x93, 0xb9, 0xe6, 0x0d, 0x55, 0xbe,
0xe6, 0xf4, 0x2b, 0xe8, 0x98, 0x46, 0xf2, 0x41, 0x53, 0x0f, 0x03, 0x4c, 0xa3, 0x35, 0xa7, 0x14,
0xfa, 0x5c, 0x60, 0x6e, 0xf6, 0x95, 0x8c, 0x8a, 0x9d, 0x4b, 0xf8, 0x58, 0xff, 0xc5, 0xf3, 0x2c,
0xe5, 0x48, 0x4f, 0x60, 0x28, 0x7d, 0xc5, 0x21, 0x37, 0x89, 0xad, 0xcd, 0x8d, 0xe5, 0xcc, 0xdd,
0x67, 0xdf, 0xbd, 0xf1, 0x93, 0x9c, 0xa1, 0xd7, 0x90, 0x9c, 0x57, 0x02, 0x7a, 0x55, 0xa3, 0xa7,
0xa0, 0x3f, 0xfb, 0xac, 0xc4, 0x46, 0xe9, 0xe7, 0x21, 0x4a, 0x77, 0x92, 0xe1, 0xd5, 0x44, 0x7a,
0x01, 0x3a, 0xf3, 0x03, 0x64, 0xb2, 0x45, 0x29, 0xf1, 0xf7, 0x10, 0x09, 0x77, 0xa5, 0x28, 0x67,
0xa9, 0x28, 0xb6, 0x5e, 0xcd, 0xb7, 0x8e, 0xc0, 0xe8, 0x94, 0xe9, 0x14, 0xb4, 0x27, 0x6c, 0x26,
0x2a, 0x43, 0x39, 0x65, 0xf5, 0xa9, 0x1a, 0xe6, 0xd8, 0xab, 0x92, 0xe3, 0xde, 0x7f, 0xe2, 0x9c,
0x83, 0xd1, 0xf1, 0xd6, 0x02, 0x25, 0x99, 0xd4, 0x40, 0xfa, 0x03, 0x26, 0x22, 0x4e, 0x90, 0x0b,
0x3f, 0xc9, 0xdb, 0x95, 0x18, 0xbb, 0xda, 0x9a, 0x3b, 0x9f, 0xe1, 0xd3, 0x2a, 0xe6, 0xe2, 0x2a,
0x8b, 0x78, 0xbd, 0xda, 0xe5, 0x0b, 0x81, 0xf1, 0x6d, 0x63, 0x9f, 0x06, 0x30, 0x50, 0xcb, 0xa0,
0xbf, 0xf6, 0xb7, 0xd9, 0xbd, 0x0e, 0xeb, 0xf7, 0x41, 0xd8, 0x6a, 0xbb, 0xce, 0x07, 0x7a, 0x0f,
0xa3, 0xc6, 0x04, 0xfd, 0xb3, 0x9f, 0xfa, 0xce, 0xac, 0x65, 0xef, 0xe0, 0xd5, 0xc9, 0x76, 0x00,
0x8d, 0x7c, 0xa0, 0xab, 0x13, 0xfe, 0xf7, 0x16, 0x00, 0x00, 0xff, 0xff, 0x50, 0x14, 0x20, 0x8d,
0x1a, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,348 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: proxy/destination/destination.proto
/*
Package conduit_proxy_destination is a generated protocol buffer package.
It is generated from these files:
proxy/destination/destination.proto
It has these top-level messages:
Update
AddrSet
WeightedAddrSet
WeightedAddr
*/
package conduit_proxy_destination
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import conduit_common "github.com/runconduit/conduit/controller/gen/common"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Update struct {
// Types that are valid to be assigned to Update:
// *Update_Add
// *Update_Remove
Update isUpdate_Update `protobuf_oneof:"update"`
}
func (m *Update) Reset() { *m = Update{} }
func (m *Update) String() string { return proto.CompactTextString(m) }
func (*Update) ProtoMessage() {}
func (*Update) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type isUpdate_Update interface {
isUpdate_Update()
}
type Update_Add struct {
Add *WeightedAddrSet `protobuf:"bytes,1,opt,name=add,oneof"`
}
type Update_Remove struct {
Remove *AddrSet `protobuf:"bytes,2,opt,name=remove,oneof"`
}
func (*Update_Add) isUpdate_Update() {}
func (*Update_Remove) isUpdate_Update() {}
func (m *Update) GetUpdate() isUpdate_Update {
if m != nil {
return m.Update
}
return nil
}
func (m *Update) GetAdd() *WeightedAddrSet {
if x, ok := m.GetUpdate().(*Update_Add); ok {
return x.Add
}
return nil
}
func (m *Update) GetRemove() *AddrSet {
if x, ok := m.GetUpdate().(*Update_Remove); ok {
return x.Remove
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Update) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Update_OneofMarshaler, _Update_OneofUnmarshaler, _Update_OneofSizer, []interface{}{
(*Update_Add)(nil),
(*Update_Remove)(nil),
}
}
func _Update_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Update)
// update
switch x := m.Update.(type) {
case *Update_Add:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Add); err != nil {
return err
}
case *Update_Remove:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Remove); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Update.Update has unexpected type %T", x)
}
return nil
}
func _Update_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Update)
switch tag {
case 1: // update.add
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(WeightedAddrSet)
err := b.DecodeMessage(msg)
m.Update = &Update_Add{msg}
return true, err
case 2: // update.remove
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(AddrSet)
err := b.DecodeMessage(msg)
m.Update = &Update_Remove{msg}
return true, err
default:
return false, nil
}
}
func _Update_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Update)
// update
switch x := m.Update.(type) {
case *Update_Add:
s := proto.Size(x.Add)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Update_Remove:
s := proto.Size(x.Remove)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type AddrSet struct {
Addrs []*conduit_common.TcpAddress `protobuf:"bytes,1,rep,name=addrs" json:"addrs,omitempty"`
}
func (m *AddrSet) Reset() { *m = AddrSet{} }
func (m *AddrSet) String() string { return proto.CompactTextString(m) }
func (*AddrSet) ProtoMessage() {}
func (*AddrSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AddrSet) GetAddrs() []*conduit_common.TcpAddress {
if m != nil {
return m.Addrs
}
return nil
}
type WeightedAddrSet struct {
Addrs []*WeightedAddr `protobuf:"bytes,1,rep,name=addrs" json:"addrs,omitempty"`
}
func (m *WeightedAddrSet) Reset() { *m = WeightedAddrSet{} }
func (m *WeightedAddrSet) String() string { return proto.CompactTextString(m) }
func (*WeightedAddrSet) ProtoMessage() {}
func (*WeightedAddrSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *WeightedAddrSet) GetAddrs() []*WeightedAddr {
if m != nil {
return m.Addrs
}
return nil
}
type WeightedAddr struct {
Addr *conduit_common.TcpAddress `protobuf:"bytes,1,opt,name=addr" json:"addr,omitempty"`
Weight uint32 `protobuf:"varint,3,opt,name=weight" json:"weight,omitempty"`
}
func (m *WeightedAddr) Reset() { *m = WeightedAddr{} }
func (m *WeightedAddr) String() string { return proto.CompactTextString(m) }
func (*WeightedAddr) ProtoMessage() {}
func (*WeightedAddr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *WeightedAddr) GetAddr() *conduit_common.TcpAddress {
if m != nil {
return m.Addr
}
return nil
}
func (m *WeightedAddr) GetWeight() uint32 {
if m != nil {
return m.Weight
}
return 0
}
func init() {
proto.RegisterType((*Update)(nil), "conduit.proxy.destination.Update")
proto.RegisterType((*AddrSet)(nil), "conduit.proxy.destination.AddrSet")
proto.RegisterType((*WeightedAddrSet)(nil), "conduit.proxy.destination.WeightedAddrSet")
proto.RegisterType((*WeightedAddr)(nil), "conduit.proxy.destination.WeightedAddr")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Destination service
type DestinationClient interface {
// Given a destination, return all addresses in that destination as a long-
// running stream of updates.
Get(ctx context.Context, in *conduit_common.Destination, opts ...grpc.CallOption) (Destination_GetClient, error)
}
type destinationClient struct {
cc *grpc.ClientConn
}
func NewDestinationClient(cc *grpc.ClientConn) DestinationClient {
return &destinationClient{cc}
}
func (c *destinationClient) Get(ctx context.Context, in *conduit_common.Destination, opts ...grpc.CallOption) (Destination_GetClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Destination_serviceDesc.Streams[0], c.cc, "/conduit.proxy.destination.Destination/Get", opts...)
if err != nil {
return nil, err
}
x := &destinationGetClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Destination_GetClient interface {
Recv() (*Update, error)
grpc.ClientStream
}
type destinationGetClient struct {
grpc.ClientStream
}
func (x *destinationGetClient) Recv() (*Update, error) {
m := new(Update)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for Destination service
type DestinationServer interface {
// Given a destination, return all addresses in that destination as a long-
// running stream of updates.
Get(*conduit_common.Destination, Destination_GetServer) error
}
func RegisterDestinationServer(s *grpc.Server, srv DestinationServer) {
s.RegisterService(&_Destination_serviceDesc, srv)
}
func _Destination_Get_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(conduit_common.Destination)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(DestinationServer).Get(m, &destinationGetServer{stream})
}
type Destination_GetServer interface {
Send(*Update) error
grpc.ServerStream
}
type destinationGetServer struct {
grpc.ServerStream
}
func (x *destinationGetServer) Send(m *Update) error {
return x.ServerStream.SendMsg(m)
}
var _Destination_serviceDesc = grpc.ServiceDesc{
ServiceName: "conduit.proxy.destination.Destination",
HandlerType: (*DestinationServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Get",
Handler: _Destination_Get_Handler,
ServerStreams: true,
},
},
Metadata: "proxy/destination/destination.proto",
}
func init() { proto.RegisterFile("proxy/destination/destination.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 281 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x4b, 0x4b, 0xc4, 0x30,
0x14, 0x85, 0x27, 0x56, 0xa3, 0xdc, 0x2a, 0x42, 0x04, 0xa9, 0x75, 0x53, 0xe3, 0xc2, 0xe2, 0x22,
0x33, 0xd4, 0xa5, 0x0f, 0x50, 0x04, 0x75, 0x27, 0xf5, 0xb9, 0xad, 0xbd, 0x41, 0xbb, 0x68, 0x53,
0xd2, 0x8c, 0x8f, 0x7f, 0xe1, 0x4f, 0x96, 0xa6, 0xd1, 0xa9, 0x03, 0x16, 0x57, 0x79, 0xdc, 0x73,
0xbe, 0x9c, 0x9b, 0x0b, 0xbb, 0xb5, 0x56, 0xef, 0x1f, 0x63, 0x94, 0x8d, 0x29, 0xaa, 0xcc, 0x14,
0xaa, 0xea, 0xef, 0x45, 0xad, 0x95, 0x51, 0x6c, 0x2b, 0x57, 0x15, 0x4e, 0x0b, 0x23, 0xac, 0x58,
0xf4, 0x04, 0xe1, 0x46, 0xae, 0xca, 0x52, 0x55, 0xe3, 0x6e, 0xe9, 0xf4, 0xfc, 0x93, 0x00, 0xbd,
0xab, 0x31, 0x33, 0x92, 0x9d, 0x80, 0x97, 0x21, 0x06, 0x24, 0x22, 0xb1, 0x9f, 0xec, 0x8b, 0x3f,
0x41, 0xe2, 0x41, 0x16, 0xcf, 0x2f, 0x46, 0xe2, 0x29, 0xa2, 0xbe, 0x91, 0xe6, 0x72, 0x94, 0xb6,
0x46, 0x76, 0x04, 0x54, 0xcb, 0x52, 0xbd, 0xca, 0x60, 0xc1, 0x22, 0xf8, 0x00, 0x62, 0x66, 0x75,
0x9e, 0xb3, 0x15, 0xa0, 0x53, 0x9b, 0x83, 0x1f, 0xc2, 0xb2, 0x2b, 0xb3, 0x09, 0x2c, 0x65, 0x88,
0xba, 0x09, 0x48, 0xe4, 0xc5, 0x7e, 0x12, 0xfe, 0x10, 0x5d, 0x0f, 0xb7, 0x79, 0xdd, 0x4a, 0x65,
0xd3, 0xa4, 0x9d, 0x90, 0x5f, 0xc3, 0xfa, 0x5c, 0x3c, 0x76, 0xfc, 0x1b, 0xb2, 0xf7, 0xcf, 0xce,
0xbe, 0x89, 0xf7, 0xb0, 0xda, 0xbf, 0x66, 0x02, 0x16, 0xdb, 0x82, 0xfb, 0xa7, 0xa1, 0x48, 0x56,
0xc7, 0x36, 0x81, 0xbe, 0x59, 0x7f, 0xe0, 0x45, 0x24, 0x5e, 0x4b, 0xdd, 0x29, 0x79, 0x04, 0xff,
0x7c, 0xf6, 0x34, 0xbb, 0x02, 0xef, 0x42, 0x1a, 0xb6, 0x3d, 0xcf, 0xeb, 0x69, 0xc2, 0x9d, 0x81,
0xe8, 0xdd, 0x10, 0xf9, 0x68, 0x42, 0x9e, 0xa8, 0x1d, 0xed, 0xc1, 0x57, 0x00, 0x00, 0x00, 0xff,
0xff, 0xf8, 0xcd, 0x94, 0x88, 0x31, 0x02, 0x00, 0x00,
}

947
controller/gen/proxy/tap/tap.pb.go generated Normal file
View File

@ -0,0 +1,947 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: proxy/tap/tap.proto
/*
Package conduit_proxy_tap is a generated protocol buffer package.
It is generated from these files:
proxy/tap/tap.proto
It has these top-level messages:
ObserveRequest
*/
package conduit_proxy_tap
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import conduit_common "github.com/runconduit/conduit/controller/gen/common"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ObserveRequest struct {
// Limits the number of event keys that will be returned by this tap.
Limit uint32 `protobuf:"varint,1,opt,name=limit" json:"limit,omitempty"`
// Encodes request-matching logic.
Match *ObserveRequest_Match `protobuf:"bytes,2,opt,name=match" json:"match,omitempty"`
}
func (m *ObserveRequest) Reset() { *m = ObserveRequest{} }
func (m *ObserveRequest) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest) ProtoMessage() {}
func (*ObserveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ObserveRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ObserveRequest) GetMatch() *ObserveRequest_Match {
if m != nil {
return m.Match
}
return nil
}
type ObserveRequest_Match struct {
// Types that are valid to be assigned to Match:
// *ObserveRequest_Match_All
// *ObserveRequest_Match_Any
// *ObserveRequest_Match_Not
// *ObserveRequest_Match_Source
// *ObserveRequest_Match_Destination
// *ObserveRequest_Match_Http_
Match isObserveRequest_Match_Match `protobuf_oneof:"match"`
}
func (m *ObserveRequest_Match) Reset() { *m = ObserveRequest_Match{} }
func (m *ObserveRequest_Match) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match) ProtoMessage() {}
func (*ObserveRequest_Match) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
type isObserveRequest_Match_Match interface {
isObserveRequest_Match_Match()
}
type ObserveRequest_Match_All struct {
All *ObserveRequest_Match_Seq `protobuf:"bytes,1,opt,name=all,oneof"`
}
type ObserveRequest_Match_Any struct {
Any *ObserveRequest_Match_Seq `protobuf:"bytes,2,opt,name=any,oneof"`
}
type ObserveRequest_Match_Not struct {
Not *ObserveRequest_Match `protobuf:"bytes,3,opt,name=not,oneof"`
}
type ObserveRequest_Match_Source struct {
Source *ObserveRequest_Match_Tcp `protobuf:"bytes,4,opt,name=source,oneof"`
}
type ObserveRequest_Match_Destination struct {
Destination *ObserveRequest_Match_Tcp `protobuf:"bytes,5,opt,name=destination,oneof"`
}
type ObserveRequest_Match_Http_ struct {
Http *ObserveRequest_Match_Http `protobuf:"bytes,6,opt,name=http,oneof"`
}
func (*ObserveRequest_Match_All) isObserveRequest_Match_Match() {}
func (*ObserveRequest_Match_Any) isObserveRequest_Match_Match() {}
func (*ObserveRequest_Match_Not) isObserveRequest_Match_Match() {}
func (*ObserveRequest_Match_Source) isObserveRequest_Match_Match() {}
func (*ObserveRequest_Match_Destination) isObserveRequest_Match_Match() {}
func (*ObserveRequest_Match_Http_) isObserveRequest_Match_Match() {}
func (m *ObserveRequest_Match) GetMatch() isObserveRequest_Match_Match {
if m != nil {
return m.Match
}
return nil
}
func (m *ObserveRequest_Match) GetAll() *ObserveRequest_Match_Seq {
if x, ok := m.GetMatch().(*ObserveRequest_Match_All); ok {
return x.All
}
return nil
}
func (m *ObserveRequest_Match) GetAny() *ObserveRequest_Match_Seq {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Any); ok {
return x.Any
}
return nil
}
func (m *ObserveRequest_Match) GetNot() *ObserveRequest_Match {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Not); ok {
return x.Not
}
return nil
}
func (m *ObserveRequest_Match) GetSource() *ObserveRequest_Match_Tcp {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Source); ok {
return x.Source
}
return nil
}
func (m *ObserveRequest_Match) GetDestination() *ObserveRequest_Match_Tcp {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Destination); ok {
return x.Destination
}
return nil
}
func (m *ObserveRequest_Match) GetHttp() *ObserveRequest_Match_Http {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_); ok {
return x.Http
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ObserveRequest_Match) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ObserveRequest_Match_OneofMarshaler, _ObserveRequest_Match_OneofUnmarshaler, _ObserveRequest_Match_OneofSizer, []interface{}{
(*ObserveRequest_Match_All)(nil),
(*ObserveRequest_Match_Any)(nil),
(*ObserveRequest_Match_Not)(nil),
(*ObserveRequest_Match_Source)(nil),
(*ObserveRequest_Match_Destination)(nil),
(*ObserveRequest_Match_Http_)(nil),
}
}
func _ObserveRequest_Match_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ObserveRequest_Match)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_All:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.All); err != nil {
return err
}
case *ObserveRequest_Match_Any:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Any); err != nil {
return err
}
case *ObserveRequest_Match_Not:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Not); err != nil {
return err
}
case *ObserveRequest_Match_Source:
b.EncodeVarint(4<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Source); err != nil {
return err
}
case *ObserveRequest_Match_Destination:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Destination); err != nil {
return err
}
case *ObserveRequest_Match_Http_:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Http); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("ObserveRequest_Match.Match has unexpected type %T", x)
}
return nil
}
func _ObserveRequest_Match_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ObserveRequest_Match)
switch tag {
case 1: // match.all
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Seq)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_All{msg}
return true, err
case 2: // match.any
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Seq)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Any{msg}
return true, err
case 3: // match.not
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Not{msg}
return true, err
case 4: // match.source
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Tcp)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Source{msg}
return true, err
case 5: // match.destination
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Tcp)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Destination{msg}
return true, err
case 6: // match.http
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Http)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Http_{msg}
return true, err
default:
return false, nil
}
}
func _ObserveRequest_Match_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ObserveRequest_Match)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_All:
s := proto.Size(x.All)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Any:
s := proto.Size(x.Any)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Not:
s := proto.Size(x.Not)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Source:
s := proto.Size(x.Source)
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Destination:
s := proto.Size(x.Destination)
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Http_:
s := proto.Size(x.Http)
n += proto.SizeVarint(6<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ObserveRequest_Match_Seq struct {
Matches []*ObserveRequest_Match `protobuf:"bytes,1,rep,name=matches" json:"matches,omitempty"`
}
func (m *ObserveRequest_Match_Seq) Reset() { *m = ObserveRequest_Match_Seq{} }
func (m *ObserveRequest_Match_Seq) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Seq) ProtoMessage() {}
func (*ObserveRequest_Match_Seq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0, 0} }
func (m *ObserveRequest_Match_Seq) GetMatches() []*ObserveRequest_Match {
if m != nil {
return m.Matches
}
return nil
}
type ObserveRequest_Match_Tcp struct {
// Types that are valid to be assigned to Match:
// *ObserveRequest_Match_Tcp_Netmask_
// *ObserveRequest_Match_Tcp_Ports
Match isObserveRequest_Match_Tcp_Match `protobuf_oneof:"match"`
}
func (m *ObserveRequest_Match_Tcp) Reset() { *m = ObserveRequest_Match_Tcp{} }
func (m *ObserveRequest_Match_Tcp) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Tcp) ProtoMessage() {}
func (*ObserveRequest_Match_Tcp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0, 1} }
type isObserveRequest_Match_Tcp_Match interface {
isObserveRequest_Match_Tcp_Match()
}
type ObserveRequest_Match_Tcp_Netmask_ struct {
Netmask *ObserveRequest_Match_Tcp_Netmask `protobuf:"bytes,1,opt,name=netmask,oneof"`
}
type ObserveRequest_Match_Tcp_Ports struct {
Ports *ObserveRequest_Match_Tcp_PortRange `protobuf:"bytes,3,opt,name=ports,oneof"`
}
func (*ObserveRequest_Match_Tcp_Netmask_) isObserveRequest_Match_Tcp_Match() {}
func (*ObserveRequest_Match_Tcp_Ports) isObserveRequest_Match_Tcp_Match() {}
func (m *ObserveRequest_Match_Tcp) GetMatch() isObserveRequest_Match_Tcp_Match {
if m != nil {
return m.Match
}
return nil
}
func (m *ObserveRequest_Match_Tcp) GetNetmask() *ObserveRequest_Match_Tcp_Netmask {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Tcp_Netmask_); ok {
return x.Netmask
}
return nil
}
func (m *ObserveRequest_Match_Tcp) GetPorts() *ObserveRequest_Match_Tcp_PortRange {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Tcp_Ports); ok {
return x.Ports
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ObserveRequest_Match_Tcp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ObserveRequest_Match_Tcp_OneofMarshaler, _ObserveRequest_Match_Tcp_OneofUnmarshaler, _ObserveRequest_Match_Tcp_OneofSizer, []interface{}{
(*ObserveRequest_Match_Tcp_Netmask_)(nil),
(*ObserveRequest_Match_Tcp_Ports)(nil),
}
}
func _ObserveRequest_Match_Tcp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ObserveRequest_Match_Tcp)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Tcp_Netmask_:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Netmask); err != nil {
return err
}
case *ObserveRequest_Match_Tcp_Ports:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Ports); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("ObserveRequest_Match_Tcp.Match has unexpected type %T", x)
}
return nil
}
func _ObserveRequest_Match_Tcp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ObserveRequest_Match_Tcp)
switch tag {
case 1: // match.netmask
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Tcp_Netmask)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Tcp_Netmask_{msg}
return true, err
case 3: // match.ports
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Tcp_PortRange)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Tcp_Ports{msg}
return true, err
default:
return false, nil
}
}
func _ObserveRequest_Match_Tcp_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ObserveRequest_Match_Tcp)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Tcp_Netmask_:
s := proto.Size(x.Netmask)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Tcp_Ports:
s := proto.Size(x.Ports)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ObserveRequest_Match_Tcp_Netmask struct {
Ip *conduit_common.IPAddress `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"`
Mask uint32 `protobuf:"varint,2,opt,name=mask" json:"mask,omitempty"`
}
func (m *ObserveRequest_Match_Tcp_Netmask) Reset() { *m = ObserveRequest_Match_Tcp_Netmask{} }
func (m *ObserveRequest_Match_Tcp_Netmask) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Tcp_Netmask) ProtoMessage() {}
func (*ObserveRequest_Match_Tcp_Netmask) Descriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 0, 1, 0}
}
func (m *ObserveRequest_Match_Tcp_Netmask) GetIp() *conduit_common.IPAddress {
if m != nil {
return m.Ip
}
return nil
}
func (m *ObserveRequest_Match_Tcp_Netmask) GetMask() uint32 {
if m != nil {
return m.Mask
}
return 0
}
// If either a minimum or maximum is not specified, the range is considered to be
// over a discrete value.
type ObserveRequest_Match_Tcp_PortRange struct {
// Minimum matching port value (inclusive), if specified.
Min uint32 `protobuf:"varint,1,opt,name=min" json:"min,omitempty"`
// Maximum matching port value (inclusive), if specified.
Max uint32 `protobuf:"varint,2,opt,name=max" json:"max,omitempty"`
}
func (m *ObserveRequest_Match_Tcp_PortRange) Reset() { *m = ObserveRequest_Match_Tcp_PortRange{} }
func (m *ObserveRequest_Match_Tcp_PortRange) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Tcp_PortRange) ProtoMessage() {}
func (*ObserveRequest_Match_Tcp_PortRange) Descriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 0, 1, 1}
}
func (m *ObserveRequest_Match_Tcp_PortRange) GetMin() uint32 {
if m != nil {
return m.Min
}
return 0
}
func (m *ObserveRequest_Match_Tcp_PortRange) GetMax() uint32 {
if m != nil {
return m.Max
}
return 0
}
type ObserveRequest_Match_Http struct {
// Types that are valid to be assigned to Match:
// *ObserveRequest_Match_Http_Scheme
// *ObserveRequest_Match_Http_Method
// *ObserveRequest_Match_Http_Authority
// *ObserveRequest_Match_Http_Path
Match isObserveRequest_Match_Http_Match `protobuf_oneof:"match"`
}
func (m *ObserveRequest_Match_Http) Reset() { *m = ObserveRequest_Match_Http{} }
func (m *ObserveRequest_Match_Http) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Http) ProtoMessage() {}
func (*ObserveRequest_Match_Http) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0, 2} }
type isObserveRequest_Match_Http_Match interface {
isObserveRequest_Match_Http_Match()
}
type ObserveRequest_Match_Http_Scheme struct {
Scheme *conduit_common.Scheme `protobuf:"bytes,1,opt,name=scheme,oneof"`
}
type ObserveRequest_Match_Http_Method struct {
Method *conduit_common.HttpMethod `protobuf:"bytes,3,opt,name=method,oneof"`
}
type ObserveRequest_Match_Http_Authority struct {
Authority *ObserveRequest_Match_Http_StringMatch `protobuf:"bytes,2,opt,name=authority,oneof"`
}
type ObserveRequest_Match_Http_Path struct {
Path *ObserveRequest_Match_Http_StringMatch `protobuf:"bytes,4,opt,name=path,oneof"`
}
func (*ObserveRequest_Match_Http_Scheme) isObserveRequest_Match_Http_Match() {}
func (*ObserveRequest_Match_Http_Method) isObserveRequest_Match_Http_Match() {}
func (*ObserveRequest_Match_Http_Authority) isObserveRequest_Match_Http_Match() {}
func (*ObserveRequest_Match_Http_Path) isObserveRequest_Match_Http_Match() {}
func (m *ObserveRequest_Match_Http) GetMatch() isObserveRequest_Match_Http_Match {
if m != nil {
return m.Match
}
return nil
}
func (m *ObserveRequest_Match_Http) GetScheme() *conduit_common.Scheme {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_Scheme); ok {
return x.Scheme
}
return nil
}
func (m *ObserveRequest_Match_Http) GetMethod() *conduit_common.HttpMethod {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_Method); ok {
return x.Method
}
return nil
}
func (m *ObserveRequest_Match_Http) GetAuthority() *ObserveRequest_Match_Http_StringMatch {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_Authority); ok {
return x.Authority
}
return nil
}
func (m *ObserveRequest_Match_Http) GetPath() *ObserveRequest_Match_Http_StringMatch {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_Path); ok {
return x.Path
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ObserveRequest_Match_Http) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ObserveRequest_Match_Http_OneofMarshaler, _ObserveRequest_Match_Http_OneofUnmarshaler, _ObserveRequest_Match_Http_OneofSizer, []interface{}{
(*ObserveRequest_Match_Http_Scheme)(nil),
(*ObserveRequest_Match_Http_Method)(nil),
(*ObserveRequest_Match_Http_Authority)(nil),
(*ObserveRequest_Match_Http_Path)(nil),
}
}
func _ObserveRequest_Match_Http_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ObserveRequest_Match_Http)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Http_Scheme:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Scheme); err != nil {
return err
}
case *ObserveRequest_Match_Http_Method:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Method); err != nil {
return err
}
case *ObserveRequest_Match_Http_Authority:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Authority); err != nil {
return err
}
case *ObserveRequest_Match_Http_Path:
b.EncodeVarint(4<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Path); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("ObserveRequest_Match_Http.Match has unexpected type %T", x)
}
return nil
}
func _ObserveRequest_Match_Http_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ObserveRequest_Match_Http)
switch tag {
case 1: // match.scheme
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(conduit_common.Scheme)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Http_Scheme{msg}
return true, err
case 3: // match.method
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(conduit_common.HttpMethod)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Http_Method{msg}
return true, err
case 2: // match.authority
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Http_StringMatch)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Http_Authority{msg}
return true, err
case 4: // match.path
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ObserveRequest_Match_Http_StringMatch)
err := b.DecodeMessage(msg)
m.Match = &ObserveRequest_Match_Http_Path{msg}
return true, err
default:
return false, nil
}
}
func _ObserveRequest_Match_Http_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ObserveRequest_Match_Http)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Http_Scheme:
s := proto.Size(x.Scheme)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Http_Method:
s := proto.Size(x.Method)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Http_Authority:
s := proto.Size(x.Authority)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ObserveRequest_Match_Http_Path:
s := proto.Size(x.Path)
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type ObserveRequest_Match_Http_StringMatch struct {
// Types that are valid to be assigned to Match:
// *ObserveRequest_Match_Http_StringMatch_Exact
// *ObserveRequest_Match_Http_StringMatch_Prefix
Match isObserveRequest_Match_Http_StringMatch_Match `protobuf_oneof:"match"`
}
func (m *ObserveRequest_Match_Http_StringMatch) Reset() { *m = ObserveRequest_Match_Http_StringMatch{} }
func (m *ObserveRequest_Match_Http_StringMatch) String() string { return proto.CompactTextString(m) }
func (*ObserveRequest_Match_Http_StringMatch) ProtoMessage() {}
func (*ObserveRequest_Match_Http_StringMatch) Descriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 0, 2, 0}
}
type isObserveRequest_Match_Http_StringMatch_Match interface {
isObserveRequest_Match_Http_StringMatch_Match()
}
type ObserveRequest_Match_Http_StringMatch_Exact struct {
Exact string `protobuf:"bytes,1,opt,name=exact,oneof"`
}
type ObserveRequest_Match_Http_StringMatch_Prefix struct {
Prefix string `protobuf:"bytes,2,opt,name=prefix,oneof"`
}
func (*ObserveRequest_Match_Http_StringMatch_Exact) isObserveRequest_Match_Http_StringMatch_Match() {}
func (*ObserveRequest_Match_Http_StringMatch_Prefix) isObserveRequest_Match_Http_StringMatch_Match() {}
func (m *ObserveRequest_Match_Http_StringMatch) GetMatch() isObserveRequest_Match_Http_StringMatch_Match {
if m != nil {
return m.Match
}
return nil
}
func (m *ObserveRequest_Match_Http_StringMatch) GetExact() string {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_StringMatch_Exact); ok {
return x.Exact
}
return ""
}
func (m *ObserveRequest_Match_Http_StringMatch) GetPrefix() string {
if x, ok := m.GetMatch().(*ObserveRequest_Match_Http_StringMatch_Prefix); ok {
return x.Prefix
}
return ""
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ObserveRequest_Match_Http_StringMatch) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ObserveRequest_Match_Http_StringMatch_OneofMarshaler, _ObserveRequest_Match_Http_StringMatch_OneofUnmarshaler, _ObserveRequest_Match_Http_StringMatch_OneofSizer, []interface{}{
(*ObserveRequest_Match_Http_StringMatch_Exact)(nil),
(*ObserveRequest_Match_Http_StringMatch_Prefix)(nil),
}
}
func _ObserveRequest_Match_Http_StringMatch_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ObserveRequest_Match_Http_StringMatch)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Http_StringMatch_Exact:
b.EncodeVarint(1<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Exact)
case *ObserveRequest_Match_Http_StringMatch_Prefix:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Prefix)
case nil:
default:
return fmt.Errorf("ObserveRequest_Match_Http_StringMatch.Match has unexpected type %T", x)
}
return nil
}
func _ObserveRequest_Match_Http_StringMatch_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ObserveRequest_Match_Http_StringMatch)
switch tag {
case 1: // match.exact
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Match = &ObserveRequest_Match_Http_StringMatch_Exact{x}
return true, err
case 2: // match.prefix
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Match = &ObserveRequest_Match_Http_StringMatch_Prefix{x}
return true, err
default:
return false, nil
}
}
func _ObserveRequest_Match_Http_StringMatch_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ObserveRequest_Match_Http_StringMatch)
// match
switch x := m.Match.(type) {
case *ObserveRequest_Match_Http_StringMatch_Exact:
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Exact)))
n += len(x.Exact)
case *ObserveRequest_Match_Http_StringMatch_Prefix:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Prefix)))
n += len(x.Prefix)
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
func init() {
proto.RegisterType((*ObserveRequest)(nil), "conduit.proxy.tap.ObserveRequest")
proto.RegisterType((*ObserveRequest_Match)(nil), "conduit.proxy.tap.ObserveRequest.Match")
proto.RegisterType((*ObserveRequest_Match_Seq)(nil), "conduit.proxy.tap.ObserveRequest.Match.Seq")
proto.RegisterType((*ObserveRequest_Match_Tcp)(nil), "conduit.proxy.tap.ObserveRequest.Match.Tcp")
proto.RegisterType((*ObserveRequest_Match_Tcp_Netmask)(nil), "conduit.proxy.tap.ObserveRequest.Match.Tcp.Netmask")
proto.RegisterType((*ObserveRequest_Match_Tcp_PortRange)(nil), "conduit.proxy.tap.ObserveRequest.Match.Tcp.PortRange")
proto.RegisterType((*ObserveRequest_Match_Http)(nil), "conduit.proxy.tap.ObserveRequest.Match.Http")
proto.RegisterType((*ObserveRequest_Match_Http_StringMatch)(nil), "conduit.proxy.tap.ObserveRequest.Match.Http.StringMatch")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Tap service
type TapClient interface {
Observe(ctx context.Context, in *ObserveRequest, opts ...grpc.CallOption) (Tap_ObserveClient, error)
}
type tapClient struct {
cc *grpc.ClientConn
}
func NewTapClient(cc *grpc.ClientConn) TapClient {
return &tapClient{cc}
}
func (c *tapClient) Observe(ctx context.Context, in *ObserveRequest, opts ...grpc.CallOption) (Tap_ObserveClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Tap_serviceDesc.Streams[0], c.cc, "/conduit.proxy.tap.Tap/Observe", opts...)
if err != nil {
return nil, err
}
x := &tapObserveClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Tap_ObserveClient interface {
Recv() (*conduit_common.TapEvent, error)
grpc.ClientStream
}
type tapObserveClient struct {
grpc.ClientStream
}
func (x *tapObserveClient) Recv() (*conduit_common.TapEvent, error) {
m := new(conduit_common.TapEvent)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for Tap service
type TapServer interface {
Observe(*ObserveRequest, Tap_ObserveServer) error
}
func RegisterTapServer(s *grpc.Server, srv TapServer) {
s.RegisterService(&_Tap_serviceDesc, srv)
}
func _Tap_Observe_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ObserveRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(TapServer).Observe(m, &tapObserveServer{stream})
}
type Tap_ObserveServer interface {
Send(*conduit_common.TapEvent) error
grpc.ServerStream
}
type tapObserveServer struct {
grpc.ServerStream
}
func (x *tapObserveServer) Send(m *conduit_common.TapEvent) error {
return x.ServerStream.SendMsg(m)
}
var _Tap_serviceDesc = grpc.ServiceDesc{
ServiceName: "conduit.proxy.tap.Tap",
HandlerType: (*TapServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Observe",
Handler: _Tap_Observe_Handler,
ServerStreams: true,
},
},
Metadata: "proxy/tap/tap.proto",
}
func init() { proto.RegisterFile("proxy/tap/tap.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 553 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xc1, 0x6e, 0xd3, 0x4c,
0x10, 0xc7, 0x13, 0x3b, 0x76, 0x94, 0x89, 0xbe, 0x4f, 0x30, 0x45, 0x95, 0xf1, 0xa9, 0xf4, 0x42,
0x11, 0xc8, 0xa9, 0x52, 0x90, 0x90, 0x10, 0x42, 0xad, 0x54, 0xc9, 0x20, 0xa5, 0x29, 0x4e, 0x0e,
0x5c, 0xb7, 0xce, 0x52, 0x5b, 0xc4, 0xbb, 0x1b, 0x7b, 0x52, 0x25, 0x4f, 0xc1, 0x73, 0xf0, 0x6c,
0x3c, 0x00, 0x57, 0xb4, 0xeb, 0x4d, 0x28, 0xe9, 0x81, 0x1a, 0x0e, 0x96, 0x67, 0x77, 0xe7, 0xff,
0xdb, 0xd9, 0x99, 0x9d, 0x85, 0x3d, 0x55, 0xca, 0xd5, 0x7a, 0x40, 0x4c, 0xe9, 0x2f, 0x52, 0xa5,
0x24, 0x89, 0x0f, 0x53, 0x29, 0x66, 0xcb, 0x9c, 0x22, 0xb3, 0x18, 0x11, 0x53, 0xe1, 0x5e, 0x2a,
0x8b, 0x42, 0x8a, 0x41, 0xfd, 0xab, 0xfd, 0x0e, 0x7f, 0xf4, 0xe0, 0xff, 0xf1, 0x55, 0xc5, 0xcb,
0x1b, 0x9e, 0xf0, 0xc5, 0x92, 0x57, 0x84, 0x8f, 0xc0, 0x9b, 0xe7, 0x45, 0x4e, 0x41, 0xfb, 0xa0,
0x7d, 0xf4, 0x5f, 0x52, 0x0f, 0xf0, 0x2d, 0x78, 0x05, 0xa3, 0x34, 0x0b, 0x9c, 0x83, 0xf6, 0x51,
0x7f, 0xf8, 0x34, 0xba, 0xb3, 0x41, 0xf4, 0x3b, 0x27, 0x1a, 0x69, 0xf7, 0xa4, 0x56, 0x85, 0x5f,
0x7b, 0xe0, 0x99, 0x09, 0x7c, 0x07, 0x2e, 0x9b, 0xcf, 0x0d, 0xbc, 0x3f, 0x7c, 0x7e, 0x4f, 0x4c,
0x34, 0xe1, 0x8b, 0xb8, 0x95, 0x68, 0xa5, 0x01, 0x88, 0xb5, 0x8d, 0xa3, 0x31, 0x40, 0xac, 0xf1,
0x0d, 0xb8, 0x42, 0x52, 0xe0, 0x36, 0x3a, 0x88, 0x16, 0x0b, 0x49, 0x78, 0x0e, 0x7e, 0x25, 0x97,
0x65, 0xca, 0x83, 0x4e, 0xb3, 0x00, 0xa6, 0xa9, 0x8a, 0x5b, 0x89, 0x15, 0xe3, 0x18, 0xfa, 0x33,
0x5e, 0x51, 0x2e, 0x18, 0xe5, 0x52, 0x04, 0xde, 0xdf, 0xb0, 0x6e, 0x13, 0xf0, 0x0c, 0x3a, 0x19,
0x91, 0x0a, 0x7c, 0x43, 0x7a, 0x71, 0x5f, 0x52, 0x4c, 0xa4, 0x51, 0x46, 0x1b, 0xc6, 0xe0, 0x4e,
0xf8, 0x02, 0x4f, 0xa1, 0x6b, 0x8a, 0xc6, 0xab, 0xa0, 0x7d, 0xe0, 0x36, 0x29, 0xf6, 0x46, 0x17,
0x7e, 0x73, 0xc0, 0x9d, 0xa6, 0x0a, 0xc7, 0xd0, 0x15, 0x9c, 0x0a, 0x56, 0x7d, 0xb1, 0x05, 0x3f,
0x69, 0x70, 0xc4, 0xe8, 0xa2, 0x96, 0xc6, 0xad, 0x64, 0x43, 0xc1, 0x11, 0x78, 0x4a, 0x96, 0x54,
0xd9, 0xea, 0xbd, 0x6a, 0x82, 0xbb, 0x94, 0x25, 0x25, 0x4c, 0x5c, 0xf3, 0xb8, 0x95, 0xd4, 0x94,
0x30, 0x86, 0xae, 0xdd, 0x04, 0x9f, 0x81, 0x93, 0x2b, 0x1b, 0xe5, 0xe3, 0x2d, 0xd6, 0x36, 0xcb,
0xfb, 0xcb, 0xd3, 0xd9, 0xac, 0xe4, 0x55, 0x95, 0x38, 0xb9, 0x42, 0x84, 0x8e, 0x39, 0x92, 0x63,
0x1a, 0xc4, 0xd8, 0xe1, 0x00, 0x7a, 0x5b, 0x3e, 0x3e, 0x00, 0xb7, 0xc8, 0x85, 0x6d, 0x20, 0x6d,
0x9a, 0x19, 0xb6, 0xb2, 0x0a, 0x6d, 0x9e, 0x75, 0x6d, 0x43, 0x85, 0xdf, 0x1d, 0xe8, 0xe8, 0x32,
0xe0, 0x31, 0xf8, 0x55, 0x9a, 0xf1, 0x82, 0xdb, 0x28, 0xf6, 0x77, 0xa3, 0x98, 0x98, 0x55, 0x73,
0x8b, 0x8c, 0x85, 0x2f, 0xc1, 0x2f, 0x38, 0x65, 0x72, 0x66, 0xd3, 0x11, 0xee, 0x2a, 0x34, 0x77,
0x64, 0x3c, 0xb4, 0xaa, 0xf6, 0xc5, 0x4f, 0xd0, 0x63, 0x4b, 0xca, 0x64, 0x99, 0xd3, 0xa6, 0x8d,
0x5e, 0x37, 0xb9, 0x2f, 0xd1, 0x84, 0xca, 0x5c, 0x5c, 0x6f, 0xda, 0xe2, 0x17, 0x0c, 0x2f, 0xa0,
0xa3, 0x18, 0x65, 0xb6, 0x35, 0xfe, 0x05, 0x6a, 0x38, 0x61, 0x0c, 0xfd, 0x5b, 0xd3, 0xb8, 0x0f,
0x1e, 0x5f, 0xb1, 0xb4, 0x7e, 0x99, 0x7a, 0xba, 0x8a, 0x66, 0x88, 0x01, 0xf8, 0xaa, 0xe4, 0x9f,
0xf3, 0x3a, 0xbf, 0x7a, 0xc1, 0x8e, 0xb7, 0x49, 0xbe, 0x6b, 0x0c, 0x3f, 0x82, 0x3b, 0x65, 0x0a,
0x3f, 0x40, 0xd7, 0xc6, 0x84, 0x4f, 0xfe, 0x18, 0x6f, 0x18, 0xec, 0x26, 0x78, 0xca, 0xd4, 0xf9,
0x0d, 0x17, 0x74, 0xd8, 0x3a, 0x6e, 0x5f, 0xf9, 0xe6, 0x4d, 0x3d, 0xf9, 0x19, 0x00, 0x00, 0xff,
0xff, 0x72, 0x21, 0x20, 0xc9, 0x92, 0x05, 0x00, 0x00,
}

View File

@ -0,0 +1,751 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: proxy/telemetry/telemetry.proto
/*
Package conduit_proxy_telemetry is a generated protocol buffer package.
It is generated from these files:
proxy/telemetry/telemetry.proto
It has these top-level messages:
ReportRequest
Process
ServerTransport
ClientTransport
TransportSummary
RequestScope
RequestCtx
Latency
ResponseScope
ResponseCtx
EosScope
EosCtx
StreamSummary
ReportResponse
*/
package conduit_proxy_telemetry
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import conduit_common "github.com/runconduit/conduit/controller/gen/common"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ReportRequest_Proxy int32
const (
ReportRequest_INBOUND ReportRequest_Proxy = 0
ReportRequest_OUTBOUND ReportRequest_Proxy = 1
)
var ReportRequest_Proxy_name = map[int32]string{
0: "INBOUND",
1: "OUTBOUND",
}
var ReportRequest_Proxy_value = map[string]int32{
"INBOUND": 0,
"OUTBOUND": 1,
}
func (x ReportRequest_Proxy) String() string {
return proto.EnumName(ReportRequest_Proxy_name, int32(x))
}
func (ReportRequest_Proxy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
type ReportRequest struct {
Process *Process `protobuf:"bytes,1,opt,name=process" json:"process,omitempty"`
Proxy ReportRequest_Proxy `protobuf:"varint,2,opt,name=proxy,enum=conduit.proxy.telemetry.ReportRequest_Proxy" json:"proxy,omitempty"`
ServerTransports []*ServerTransport `protobuf:"bytes,3,rep,name=server_transports,json=serverTransports" json:"server_transports,omitempty"`
ClientTransports []*ClientTransport `protobuf:"bytes,4,rep,name=client_transports,json=clientTransports" json:"client_transports,omitempty"`
Requests []*RequestScope `protobuf:"bytes,5,rep,name=requests" json:"requests,omitempty"`
}
func (m *ReportRequest) Reset() { *m = ReportRequest{} }
func (m *ReportRequest) String() string { return proto.CompactTextString(m) }
func (*ReportRequest) ProtoMessage() {}
func (*ReportRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ReportRequest) GetProcess() *Process {
if m != nil {
return m.Process
}
return nil
}
func (m *ReportRequest) GetProxy() ReportRequest_Proxy {
if m != nil {
return m.Proxy
}
return ReportRequest_INBOUND
}
func (m *ReportRequest) GetServerTransports() []*ServerTransport {
if m != nil {
return m.ServerTransports
}
return nil
}
func (m *ReportRequest) GetClientTransports() []*ClientTransport {
if m != nil {
return m.ClientTransports
}
return nil
}
func (m *ReportRequest) GetRequests() []*RequestScope {
if m != nil {
return m.Requests
}
return nil
}
type Process struct {
Node string `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"`
ScheduledInstance string `protobuf:"bytes,2,opt,name=scheduled_instance,json=scheduledInstance" json:"scheduled_instance,omitempty"`
ScheduledNamespace string `protobuf:"bytes,3,opt,name=scheduled_namespace,json=scheduledNamespace" json:"scheduled_namespace,omitempty"`
}
func (m *Process) Reset() { *m = Process{} }
func (m *Process) String() string { return proto.CompactTextString(m) }
func (*Process) ProtoMessage() {}
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Process) GetNode() string {
if m != nil {
return m.Node
}
return ""
}
func (m *Process) GetScheduledInstance() string {
if m != nil {
return m.ScheduledInstance
}
return ""
}
func (m *Process) GetScheduledNamespace() string {
if m != nil {
return m.ScheduledNamespace
}
return ""
}
type ServerTransport struct {
SourceIp *conduit_common.IPAddress `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp" json:"source_ip,omitempty"`
Connects uint32 `protobuf:"varint,2,opt,name=connects" json:"connects,omitempty"`
Disconnects []*TransportSummary `protobuf:"bytes,3,rep,name=disconnects" json:"disconnects,omitempty"`
}
func (m *ServerTransport) Reset() { *m = ServerTransport{} }
func (m *ServerTransport) String() string { return proto.CompactTextString(m) }
func (*ServerTransport) ProtoMessage() {}
func (*ServerTransport) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *ServerTransport) GetSourceIp() *conduit_common.IPAddress {
if m != nil {
return m.SourceIp
}
return nil
}
func (m *ServerTransport) GetConnects() uint32 {
if m != nil {
return m.Connects
}
return 0
}
func (m *ServerTransport) GetDisconnects() []*TransportSummary {
if m != nil {
return m.Disconnects
}
return nil
}
type ClientTransport struct {
TargetAddr *conduit_common.TcpAddress `protobuf:"bytes,1,opt,name=target_addr,json=targetAddr" json:"target_addr,omitempty"`
Connects uint32 `protobuf:"varint,2,opt,name=connects" json:"connects,omitempty"`
Disconnects []*TransportSummary `protobuf:"bytes,3,rep,name=disconnects" json:"disconnects,omitempty"`
}
func (m *ClientTransport) Reset() { *m = ClientTransport{} }
func (m *ClientTransport) String() string { return proto.CompactTextString(m) }
func (*ClientTransport) ProtoMessage() {}
func (*ClientTransport) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ClientTransport) GetTargetAddr() *conduit_common.TcpAddress {
if m != nil {
return m.TargetAddr
}
return nil
}
func (m *ClientTransport) GetConnects() uint32 {
if m != nil {
return m.Connects
}
return 0
}
func (m *ClientTransport) GetDisconnects() []*TransportSummary {
if m != nil {
return m.Disconnects
}
return nil
}
type TransportSummary struct {
DurationMs uint64 `protobuf:"varint,1,opt,name=duration_ms,json=durationMs" json:"duration_ms,omitempty"`
BytesSent uint64 `protobuf:"varint,2,opt,name=bytes_sent,json=bytesSent" json:"bytes_sent,omitempty"`
}
func (m *TransportSummary) Reset() { *m = TransportSummary{} }
func (m *TransportSummary) String() string { return proto.CompactTextString(m) }
func (*TransportSummary) ProtoMessage() {}
func (*TransportSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *TransportSummary) GetDurationMs() uint64 {
if m != nil {
return m.DurationMs
}
return 0
}
func (m *TransportSummary) GetBytesSent() uint64 {
if m != nil {
return m.BytesSent
}
return 0
}
type RequestScope struct {
Ctx *RequestCtx `protobuf:"bytes,1,opt,name=ctx" json:"ctx,omitempty"`
Count uint32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
Responses []*ResponseScope `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"`
}
func (m *RequestScope) Reset() { *m = RequestScope{} }
func (m *RequestScope) String() string { return proto.CompactTextString(m) }
func (*RequestScope) ProtoMessage() {}
func (*RequestScope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *RequestScope) GetCtx() *RequestCtx {
if m != nil {
return m.Ctx
}
return nil
}
func (m *RequestScope) GetCount() uint32 {
if m != nil {
return m.Count
}
return 0
}
func (m *RequestScope) GetResponses() []*ResponseScope {
if m != nil {
return m.Responses
}
return nil
}
type RequestCtx struct {
SourceIp *conduit_common.IPAddress `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp" json:"source_ip,omitempty"`
TargetAddr *conduit_common.TcpAddress `protobuf:"bytes,2,opt,name=target_addr,json=targetAddr" json:"target_addr,omitempty"`
Authority string `protobuf:"bytes,3,opt,name=authority" json:"authority,omitempty"`
Method *conduit_common.HttpMethod `protobuf:"bytes,4,opt,name=method" json:"method,omitempty"`
Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
}
func (m *RequestCtx) Reset() { *m = RequestCtx{} }
func (m *RequestCtx) String() string { return proto.CompactTextString(m) }
func (*RequestCtx) ProtoMessage() {}
func (*RequestCtx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *RequestCtx) GetSourceIp() *conduit_common.IPAddress {
if m != nil {
return m.SourceIp
}
return nil
}
func (m *RequestCtx) GetTargetAddr() *conduit_common.TcpAddress {
if m != nil {
return m.TargetAddr
}
return nil
}
func (m *RequestCtx) GetAuthority() string {
if m != nil {
return m.Authority
}
return ""
}
func (m *RequestCtx) GetMethod() *conduit_common.HttpMethod {
if m != nil {
return m.Method
}
return nil
}
func (m *RequestCtx) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
// A latency value in tenths of a millisecond and a count of the times
// that latency was observed.
type Latency struct {
// Latency value in tenths of a millisecond.
Latency uint32 `protobuf:"varint,1,opt,name=latency" json:"latency,omitempty"`
// Count of occurences of this latency value.
Count uint32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
}
func (m *Latency) Reset() { *m = Latency{} }
func (m *Latency) String() string { return proto.CompactTextString(m) }
func (*Latency) ProtoMessage() {}
func (*Latency) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Latency) GetLatency() uint32 {
if m != nil {
return m.Latency
}
return 0
}
func (m *Latency) GetCount() uint32 {
if m != nil {
return m.Count
}
return 0
}
type ResponseScope struct {
Ctx *ResponseCtx `protobuf:"bytes,1,opt,name=ctx" json:"ctx,omitempty"`
// Response latencies (time from request headers sent to response headers received),
// represented as an array of observed latency values with precision to 100µs and
// the number of times those values were observed, ordered by the latency value.
ResponseLatencies []*Latency `protobuf:"bytes,2,rep,name=response_latencies,json=responseLatencies" json:"response_latencies,omitempty"`
Ends []*EosScope `protobuf:"bytes,3,rep,name=ends" json:"ends,omitempty"`
}
func (m *ResponseScope) Reset() { *m = ResponseScope{} }
func (m *ResponseScope) String() string { return proto.CompactTextString(m) }
func (*ResponseScope) ProtoMessage() {}
func (*ResponseScope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ResponseScope) GetCtx() *ResponseCtx {
if m != nil {
return m.Ctx
}
return nil
}
func (m *ResponseScope) GetResponseLatencies() []*Latency {
if m != nil {
return m.ResponseLatencies
}
return nil
}
func (m *ResponseScope) GetEnds() []*EosScope {
if m != nil {
return m.Ends
}
return nil
}
type ResponseCtx struct {
HttpStatusCode uint32 `protobuf:"varint,1,opt,name=http_status_code,json=httpStatusCode" json:"http_status_code,omitempty"`
}
func (m *ResponseCtx) Reset() { *m = ResponseCtx{} }
func (m *ResponseCtx) String() string { return proto.CompactTextString(m) }
func (*ResponseCtx) ProtoMessage() {}
func (*ResponseCtx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *ResponseCtx) GetHttpStatusCode() uint32 {
if m != nil {
return m.HttpStatusCode
}
return 0
}
type EosScope struct {
Ctx *EosCtx `protobuf:"bytes,1,opt,name=ctx" json:"ctx,omitempty"`
Streams []*StreamSummary `protobuf:"bytes,2,rep,name=streams" json:"streams,omitempty"`
}
func (m *EosScope) Reset() { *m = EosScope{} }
func (m *EosScope) String() string { return proto.CompactTextString(m) }
func (*EosScope) ProtoMessage() {}
func (*EosScope) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *EosScope) GetCtx() *EosCtx {
if m != nil {
return m.Ctx
}
return nil
}
func (m *EosScope) GetStreams() []*StreamSummary {
if m != nil {
return m.Streams
}
return nil
}
type EosCtx struct {
// Types that are valid to be assigned to End:
// *EosCtx_GrpcStatusCode
// *EosCtx_ResetErrorCode
// *EosCtx_Other
End isEosCtx_End `protobuf_oneof:"end"`
}
func (m *EosCtx) Reset() { *m = EosCtx{} }
func (m *EosCtx) String() string { return proto.CompactTextString(m) }
func (*EosCtx) ProtoMessage() {}
func (*EosCtx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
type isEosCtx_End interface {
isEosCtx_End()
}
type EosCtx_GrpcStatusCode struct {
GrpcStatusCode uint32 `protobuf:"varint,1,opt,name=grpc_status_code,json=grpcStatusCode,oneof"`
}
type EosCtx_ResetErrorCode struct {
ResetErrorCode uint32 `protobuf:"varint,2,opt,name=reset_error_code,json=resetErrorCode,oneof"`
}
type EosCtx_Other struct {
Other bool `protobuf:"varint,3,opt,name=other,oneof"`
}
func (*EosCtx_GrpcStatusCode) isEosCtx_End() {}
func (*EosCtx_ResetErrorCode) isEosCtx_End() {}
func (*EosCtx_Other) isEosCtx_End() {}
func (m *EosCtx) GetEnd() isEosCtx_End {
if m != nil {
return m.End
}
return nil
}
func (m *EosCtx) GetGrpcStatusCode() uint32 {
if x, ok := m.GetEnd().(*EosCtx_GrpcStatusCode); ok {
return x.GrpcStatusCode
}
return 0
}
func (m *EosCtx) GetResetErrorCode() uint32 {
if x, ok := m.GetEnd().(*EosCtx_ResetErrorCode); ok {
return x.ResetErrorCode
}
return 0
}
func (m *EosCtx) GetOther() bool {
if x, ok := m.GetEnd().(*EosCtx_Other); ok {
return x.Other
}
return false
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*EosCtx) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _EosCtx_OneofMarshaler, _EosCtx_OneofUnmarshaler, _EosCtx_OneofSizer, []interface{}{
(*EosCtx_GrpcStatusCode)(nil),
(*EosCtx_ResetErrorCode)(nil),
(*EosCtx_Other)(nil),
}
}
func _EosCtx_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*EosCtx)
// end
switch x := m.End.(type) {
case *EosCtx_GrpcStatusCode:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.GrpcStatusCode))
case *EosCtx_ResetErrorCode:
b.EncodeVarint(2<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.ResetErrorCode))
case *EosCtx_Other:
t := uint64(0)
if x.Other {
t = 1
}
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(t)
case nil:
default:
return fmt.Errorf("EosCtx.End has unexpected type %T", x)
}
return nil
}
func _EosCtx_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*EosCtx)
switch tag {
case 1: // end.grpc_status_code
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.End = &EosCtx_GrpcStatusCode{uint32(x)}
return true, err
case 2: // end.reset_error_code
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.End = &EosCtx_ResetErrorCode{uint32(x)}
return true, err
case 3: // end.other
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.End = &EosCtx_Other{x != 0}
return true, err
default:
return false, nil
}
}
func _EosCtx_OneofSizer(msg proto.Message) (n int) {
m := msg.(*EosCtx)
// end
switch x := m.End.(type) {
case *EosCtx_GrpcStatusCode:
n += proto.SizeVarint(1<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.GrpcStatusCode))
case *EosCtx_ResetErrorCode:
n += proto.SizeVarint(2<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.ResetErrorCode))
case *EosCtx_Other:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += 1
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type StreamSummary struct {
DurationMs uint64 `protobuf:"varint,1,opt,name=duration_ms,json=durationMs" json:"duration_ms,omitempty"`
BytesSent uint64 `protobuf:"varint,2,opt,name=bytes_sent,json=bytesSent" json:"bytes_sent,omitempty"`
FramesSent uint32 `protobuf:"varint,3,opt,name=frames_sent,json=framesSent" json:"frames_sent,omitempty"`
}
func (m *StreamSummary) Reset() { *m = StreamSummary{} }
func (m *StreamSummary) String() string { return proto.CompactTextString(m) }
func (*StreamSummary) ProtoMessage() {}
func (*StreamSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
func (m *StreamSummary) GetDurationMs() uint64 {
if m != nil {
return m.DurationMs
}
return 0
}
func (m *StreamSummary) GetBytesSent() uint64 {
if m != nil {
return m.BytesSent
}
return 0
}
func (m *StreamSummary) GetFramesSent() uint32 {
if m != nil {
return m.FramesSent
}
return 0
}
type ReportResponse struct {
}
func (m *ReportResponse) Reset() { *m = ReportResponse{} }
func (m *ReportResponse) String() string { return proto.CompactTextString(m) }
func (*ReportResponse) ProtoMessage() {}
func (*ReportResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func init() {
proto.RegisterType((*ReportRequest)(nil), "conduit.proxy.telemetry.ReportRequest")
proto.RegisterType((*Process)(nil), "conduit.proxy.telemetry.Process")
proto.RegisterType((*ServerTransport)(nil), "conduit.proxy.telemetry.ServerTransport")
proto.RegisterType((*ClientTransport)(nil), "conduit.proxy.telemetry.ClientTransport")
proto.RegisterType((*TransportSummary)(nil), "conduit.proxy.telemetry.TransportSummary")
proto.RegisterType((*RequestScope)(nil), "conduit.proxy.telemetry.RequestScope")
proto.RegisterType((*RequestCtx)(nil), "conduit.proxy.telemetry.RequestCtx")
proto.RegisterType((*Latency)(nil), "conduit.proxy.telemetry.Latency")
proto.RegisterType((*ResponseScope)(nil), "conduit.proxy.telemetry.ResponseScope")
proto.RegisterType((*ResponseCtx)(nil), "conduit.proxy.telemetry.ResponseCtx")
proto.RegisterType((*EosScope)(nil), "conduit.proxy.telemetry.EosScope")
proto.RegisterType((*EosCtx)(nil), "conduit.proxy.telemetry.EosCtx")
proto.RegisterType((*StreamSummary)(nil), "conduit.proxy.telemetry.StreamSummary")
proto.RegisterType((*ReportResponse)(nil), "conduit.proxy.telemetry.ReportResponse")
proto.RegisterEnum("conduit.proxy.telemetry.ReportRequest_Proxy", ReportRequest_Proxy_name, ReportRequest_Proxy_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Telemetry service
type TelemetryClient interface {
Report(ctx context.Context, in *ReportRequest, opts ...grpc.CallOption) (*ReportResponse, error)
}
type telemetryClient struct {
cc *grpc.ClientConn
}
func NewTelemetryClient(cc *grpc.ClientConn) TelemetryClient {
return &telemetryClient{cc}
}
func (c *telemetryClient) Report(ctx context.Context, in *ReportRequest, opts ...grpc.CallOption) (*ReportResponse, error) {
out := new(ReportResponse)
err := grpc.Invoke(ctx, "/conduit.proxy.telemetry.Telemetry/Report", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Telemetry service
type TelemetryServer interface {
Report(context.Context, *ReportRequest) (*ReportResponse, error)
}
func RegisterTelemetryServer(s *grpc.Server, srv TelemetryServer) {
s.RegisterService(&_Telemetry_serviceDesc, srv)
}
func _Telemetry_Report_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TelemetryServer).Report(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/conduit.proxy.telemetry.Telemetry/Report",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TelemetryServer).Report(ctx, req.(*ReportRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Telemetry_serviceDesc = grpc.ServiceDesc{
ServiceName: "conduit.proxy.telemetry.Telemetry",
HandlerType: (*TelemetryServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Report",
Handler: _Telemetry_Report_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "proxy/telemetry/telemetry.proto",
}
func init() { proto.RegisterFile("proxy/telemetry/telemetry.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 867 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x23, 0x35,
0x14, 0xee, 0x34, 0x49, 0x93, 0x9c, 0x6c, 0xba, 0xa9, 0x17, 0x41, 0xa8, 0x40, 0x2d, 0x03, 0x2c,
0x01, 0x41, 0x2a, 0x82, 0x76, 0x11, 0x70, 0xc3, 0xb6, 0xbb, 0x52, 0x2b, 0x76, 0xdb, 0x95, 0xd3,
0x5e, 0x71, 0x31, 0x9a, 0xf5, 0x1c, 0x36, 0x23, 0x75, 0xec, 0xc1, 0xf6, 0xa0, 0xe6, 0x02, 0xed,
0xb3, 0x20, 0xf1, 0x04, 0xdc, 0xf0, 0x18, 0x3c, 0x06, 0xaf, 0x81, 0x6c, 0x8f, 0x27, 0xd3, 0xd0,
0x34, 0xfc, 0x48, 0x7b, 0x15, 0xfb, 0x9c, 0xef, 0xfb, 0x7c, 0xfe, 0xec, 0x09, 0xec, 0xe5, 0x52,
0x5c, 0xcd, 0x0f, 0x34, 0x5e, 0x62, 0x86, 0x5a, 0xd6, 0x56, 0xe3, 0x5c, 0x0a, 0x2d, 0xc8, 0x5b,
0x4c, 0xf0, 0xa4, 0x48, 0xf5, 0xd8, 0x02, 0xc7, 0x95, 0x7b, 0xf7, 0x1e, 0x13, 0x59, 0x26, 0xf8,
0x81, 0xfb, 0x71, 0xe8, 0xf0, 0xd7, 0x06, 0xf4, 0x29, 0xe6, 0x42, 0x6a, 0x8a, 0x3f, 0x16, 0xa8,
0x34, 0xf9, 0x1a, 0xda, 0xb9, 0x14, 0x0c, 0x95, 0x1a, 0x06, 0xfb, 0xc1, 0xa8, 0x37, 0xd9, 0x1f,
0xaf, 0x50, 0x1c, 0x3f, 0x77, 0x38, 0xea, 0x09, 0xe4, 0x10, 0x5a, 0x16, 0x33, 0xdc, 0xdc, 0x0f,
0x46, 0xdb, 0x93, 0x4f, 0x57, 0x32, 0xaf, 0x1d, 0x69, 0x74, 0xae, 0xe6, 0xd4, 0x51, 0xc9, 0x05,
0xec, 0x28, 0x94, 0x3f, 0xa1, 0x8c, 0xb4, 0x8c, 0xb9, 0x32, 0x38, 0x35, 0x6c, 0xec, 0x37, 0x46,
0xbd, 0xc9, 0x68, 0xa5, 0xde, 0xd4, 0x32, 0xce, 0x3d, 0x81, 0x0e, 0xd4, 0x75, 0x83, 0x32, 0xb2,
0xec, 0x32, 0x45, 0xae, 0xeb, 0xb2, 0xcd, 0x35, 0xb2, 0x47, 0x96, 0x51, 0x93, 0x65, 0xd7, 0x0d,
0x8a, 0x3c, 0x82, 0x8e, 0x74, 0x59, 0xa8, 0x61, 0xcb, 0xaa, 0x7d, 0x78, 0x4b, 0xd2, 0x16, 0x38,
0x65, 0x22, 0x47, 0x5a, 0xd1, 0xc2, 0x10, 0x5a, 0xb6, 0x00, 0xa4, 0x07, 0xed, 0x93, 0xd3, 0xc3,
0xb3, 0x8b, 0xd3, 0xc7, 0x83, 0x0d, 0x72, 0x07, 0x3a, 0x67, 0x17, 0xe7, 0x6e, 0x17, 0x84, 0x3f,
0x43, 0xbb, 0x2c, 0x36, 0x21, 0xd0, 0xe4, 0x22, 0x41, 0xdb, 0x9c, 0x2e, 0xb5, 0x6b, 0xf2, 0x19,
0x10, 0xc5, 0x66, 0x98, 0x14, 0x97, 0x98, 0x44, 0x29, 0x57, 0x3a, 0xe6, 0x0c, 0x6d, 0x13, 0xba,
0x74, 0xa7, 0xf2, 0x9c, 0x94, 0x0e, 0x72, 0x00, 0xf7, 0x16, 0x70, 0x1e, 0x67, 0xa8, 0xf2, 0x98,
0xe1, 0xb0, 0x61, 0xf1, 0x0b, 0xa5, 0x53, 0xef, 0x09, 0x7f, 0x0b, 0xe0, 0xee, 0x52, 0x89, 0xc9,
0x43, 0xe8, 0x2a, 0x51, 0x48, 0x86, 0x51, 0x9a, 0x97, 0x93, 0xf2, 0x76, 0x95, 0x7a, 0x39, 0x63,
0x27, 0xcf, 0x1f, 0x25, 0x89, 0x34, 0x23, 0xd2, 0x71, 0xd8, 0x93, 0x9c, 0xec, 0x42, 0x87, 0x09,
0xce, 0x91, 0x69, 0x65, 0x23, 0xec, 0xd3, 0x6a, 0x4f, 0xbe, 0x83, 0x5e, 0x92, 0xaa, 0xca, 0xed,
0xba, 0xfe, 0xf1, 0xca, 0x82, 0x56, 0xc1, 0x4c, 0x8b, 0x2c, 0x8b, 0xe5, 0x9c, 0xd6, 0xd9, 0xe1,
0xef, 0x01, 0xdc, 0x5d, 0x6a, 0x20, 0xf9, 0x06, 0x7a, 0x3a, 0x96, 0x2f, 0x51, 0x47, 0x71, 0x92,
0xc8, 0x32, 0xec, 0xdd, 0xe5, 0xb0, 0xcf, 0x59, 0xee, 0xe3, 0x06, 0x07, 0x37, 0xdb, 0xd7, 0x17,
0x39, 0x85, 0xc1, 0x32, 0x80, 0xec, 0x41, 0x2f, 0x29, 0x64, 0xac, 0x53, 0xc1, 0xa3, 0xcc, 0x5d,
0xcd, 0x26, 0x05, 0x6f, 0x7a, 0xa6, 0xc8, 0xbb, 0x00, 0x2f, 0xe6, 0x1a, 0x55, 0xa4, 0x90, 0x6b,
0x1b, 0x5f, 0x93, 0x76, 0xad, 0x65, 0x8a, 0x5c, 0x87, 0xbf, 0x04, 0x70, 0xa7, 0x3e, 0x80, 0xe4,
0x01, 0x34, 0x98, 0xbe, 0x2a, 0x4b, 0xf0, 0xfe, 0xba, 0xa1, 0x3d, 0xd2, 0x57, 0xd4, 0xe0, 0xc9,
0x1b, 0xd0, 0x62, 0xa2, 0x28, 0x4f, 0xe8, 0x53, 0xb7, 0x21, 0x8f, 0xa1, 0x2b, 0x51, 0xe5, 0x82,
0x2b, 0xf4, 0xc9, 0xdf, 0xbf, 0x45, 0xd2, 0x21, 0xdd, 0x45, 0x58, 0x10, 0xc3, 0x3f, 0x03, 0x80,
0xc5, 0x79, 0xff, 0x79, 0xc2, 0x96, 0x9a, 0xbc, 0xf9, 0xaf, 0x9a, 0xfc, 0x0e, 0x74, 0xe3, 0x42,
0xcf, 0x84, 0x4c, 0xf5, 0xbc, 0xbc, 0x11, 0x0b, 0x03, 0x99, 0xc0, 0x56, 0x86, 0x7a, 0x26, 0x92,
0x61, 0xf3, 0x66, 0xd5, 0x63, 0xad, 0xf3, 0x67, 0x16, 0x41, 0x4b, 0xa4, 0xb9, 0xb0, 0x79, 0xac,
0x67, 0xc3, 0x96, 0xbb, 0xb0, 0x66, 0x1d, 0x7e, 0x05, 0xed, 0xa7, 0xb1, 0x46, 0xce, 0xe6, 0x64,
0x08, 0xed, 0x4b, 0xb7, 0xb4, 0x39, 0xf6, 0xa9, 0xdf, 0xde, 0x5c, 0xea, 0xf0, 0x8f, 0xc0, 0xbc,
0xd8, 0xb5, 0x0a, 0x92, 0x87, 0xf5, 0x4e, 0x7e, 0xb0, 0xb6, 0xec, 0x55, 0x2b, 0xcf, 0x80, 0xf8,
0xda, 0x47, 0xee, 0xcc, 0x14, 0xcd, 0x64, 0x37, 0x6e, 0x7d, 0xf4, 0xcb, 0xb8, 0xe9, 0x8e, 0xe7,
0x3e, 0xf5, 0x54, 0xf2, 0x00, 0x9a, 0xc8, 0x13, 0x3f, 0x00, 0xef, 0xad, 0x94, 0x78, 0x22, 0x94,
0xeb, 0xbd, 0x85, 0x87, 0x5f, 0x42, 0xaf, 0x16, 0x1b, 0x19, 0xc1, 0x60, 0xa6, 0x75, 0x1e, 0x29,
0x1d, 0xeb, 0x42, 0x45, 0xcc, 0x3f, 0x76, 0x7d, 0xba, 0x6d, 0xec, 0x53, 0x6b, 0x3e, 0x12, 0x09,
0x86, 0xaf, 0xa0, 0xe3, 0xa5, 0xc8, 0xe7, 0xf5, 0x22, 0xec, 0xdd, 0x76, 0x74, 0x95, 0xff, 0xb7,
0xd0, 0x56, 0x5a, 0x62, 0x9c, 0xf9, 0xa4, 0x57, 0x8f, 0xec, 0xd4, 0xe2, 0xfc, 0x65, 0xf5, 0xb4,
0xf0, 0x15, 0x6c, 0x39, 0x41, 0xf2, 0x09, 0x0c, 0x5e, 0xca, 0x9c, 0xfd, 0x3d, 0xe8, 0xe3, 0x0d,
0xba, 0x6d, 0x3c, 0x8b, 0xb0, 0x0d, 0x56, 0xa2, 0x42, 0x1d, 0xa1, 0x94, 0x42, 0x3a, 0xec, 0xa6,
0xc7, 0x5a, 0xcf, 0x13, 0xe3, 0xb0, 0xd8, 0x37, 0xa1, 0x25, 0xf4, 0x0c, 0xa5, 0x1d, 0xc5, 0xce,
0xf1, 0x06, 0x75, 0xdb, 0xc3, 0x16, 0x34, 0x90, 0x27, 0x61, 0x0e, 0xfd, 0x6b, 0xa1, 0xfd, 0xdf,
0x67, 0xc2, 0xf0, 0x7f, 0x90, 0xe6, 0xdd, 0x77, 0xfe, 0x86, 0xad, 0x3b, 0x38, 0x93, 0x7d, 0x47,
0x06, 0xb0, 0xed, 0x3f, 0xde, 0xae, 0x65, 0x93, 0x19, 0x74, 0xcf, 0x7d, 0xa1, 0xc8, 0xf7, 0xb0,
0xe5, 0xdc, 0xe4, 0xfe, 0x3f, 0xfb, 0xf8, 0xef, 0x7e, 0xb4, 0x16, 0xe7, 0xce, 0x09, 0x37, 0x5e,
0x6c, 0xd9, 0xff, 0x2c, 0x5f, 0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc4, 0xbb, 0x61, 0x04,
0x09, 0x00, 0x00,
}

1120
controller/gen/public/api.pb.go generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
package k8s
import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func NewClientSet(kubeConfig string) (*kubernetes.Clientset, error) {
var config *rest.Config
var err error
if kubeConfig == "" {
// configure client while running inside the k8s cluster
// uses Service Acct token mounted in the Pod
config, err = rest.InClusterConfig()
} else {
// configure access to the cluster from outside
config, err = clientcmd.BuildConfigFromFlags("", kubeConfig)
}
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}

500
controller/k8s/endpoints.go Normal file
View File

@ -0,0 +1,500 @@
package k8s
import (
"fmt"
"sync"
"time"
common "github.com/runconduit/conduit/controller/gen/common"
"github.com/runconduit/conduit/controller/util"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/fields"
"k8s.io/client-go/pkg/util/intstr"
"k8s.io/client-go/tools/cache"
log "github.com/sirupsen/logrus"
)
const kubeSystem = "kube-system"
type EndpointsListener interface {
Update(add []common.TcpAddress, remove []common.TcpAddress)
}
/// EndpointsWatcher ///
type EndpointsWatcher struct {
endpointInformer informer
serviceInformer informer
// a map of service -> service port -> servicePort
servicePorts *map[string]*map[uint32]*servicePort
// This mutex protects the servicePorts data structure (nested map) itself
// and does not protect the servicePort objects themselves. They are locked
// seperately.
mutex *sync.RWMutex
}
// An EndpointsWatcher watches all endpoints and services in the Kubernetes
// cluster. Listeners can subscribe to a particular service and port and
// EndpointsWatcher will publish the address set and all future changes for
// that service:port.
func NewEndpointsWatcher(clientset *kubernetes.Clientset) *EndpointsWatcher {
servicePorts := make(map[string]*map[uint32]*servicePort)
mutex := sync.RWMutex{}
return &EndpointsWatcher{
endpointInformer: newEndpointInformer(clientset, &servicePorts, &mutex),
serviceInformer: newServiceInformer(clientset, &servicePorts, &mutex),
servicePorts: &servicePorts,
mutex: &mutex,
}
}
func (e *EndpointsWatcher) Run() {
e.endpointInformer.run()
e.serviceInformer.run()
}
func (e *EndpointsWatcher) Stop() {
e.endpointInformer.stop()
e.serviceInformer.stop()
}
// Subscribe to a service and service port.
// The provided listener will be updated each time the address set for the
// given service port is changed.
func (e *EndpointsWatcher) Subscribe(service string, port uint32, listener EndpointsListener) error {
log.Printf("Establishing watch on endpoint %s:%d", service, port)
e.mutex.Lock() // Acquire write-lock on servicePorts data structure.
defer e.mutex.Unlock()
svc, ok := (*e.servicePorts)[service]
if !ok {
ports := make(map[uint32]*servicePort)
(*e.servicePorts)[service] = &ports
svc = &ports
}
svcPort, ok := (*svc)[port]
if !ok {
var err error
svcPort, err = newServicePort(service, port, e)
if err != nil {
return err
}
(*svc)[port] = svcPort
}
svcPort.subscribe(listener)
return nil
}
func (e *EndpointsWatcher) Unsubscribe(service string, port uint32, listener EndpointsListener) error {
log.Printf("Stopping watch on endpoint %s:%d", service, port)
e.mutex.Lock() // Acquire write-lock on servicePorts data structure.
defer e.mutex.Unlock()
svc, ok := (*e.servicePorts)[service]
if !ok {
return fmt.Errorf("Cannot unsubscribe from %s: not subscribed", service)
}
svcPort, ok := (*svc)[port]
if !ok {
return fmt.Errorf("Cannot unsubscribe from %s: not subscribed", service)
}
if !svcPort.unsubscribe(listener) {
return fmt.Errorf("Cannot unsubscribe from %s: not subscribed", service)
}
return nil
}
/// informer ///
// Watches a Kubernetes resource type
type informer struct {
informer *cache.Controller
store *cache.Store
stopCh chan struct{}
}
func (i *informer) run() {
go i.informer.Run(i.stopCh)
}
func (i *informer) stop() {
i.stopCh <- struct{}{}
}
func newEndpointInformer(clientset *kubernetes.Clientset, servicePorts *map[string]*map[uint32]*servicePort, mutex *sync.RWMutex) informer {
endpointsListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "Endpoints", v1.NamespaceAll, fields.Everything())
store, inf := cache.NewInformer(
endpointsListWatcher,
&v1.Endpoints{},
time.Duration(0),
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
endpoints := obj.(*v1.Endpoints)
if endpoints.Namespace == kubeSystem {
return
}
id := endpoints.Namespace + "/" + endpoints.Name
mutex.RLock()
service, ok := (*servicePorts)[id]
if ok {
for _, sp := range *service {
sp.updateEndpoints(endpoints)
}
}
mutex.RUnlock()
},
DeleteFunc: func(obj interface{}) {
endpoints := obj.(*v1.Endpoints)
if endpoints.Namespace == kubeSystem {
return
}
id := endpoints.Namespace + "/" + endpoints.Name
mutex.RLock()
service, ok := (*servicePorts)[id]
if ok {
for _, sp := range *service {
sp.deleteEndpoints()
}
}
mutex.RUnlock()
},
UpdateFunc: func(oldObj, newObj interface{}) {
endpoints := newObj.(*v1.Endpoints)
if endpoints.Namespace == kubeSystem {
return
}
id := endpoints.Namespace + "/" + endpoints.Name
mutex.RLock()
service, ok := (*servicePorts)[id]
if ok {
for _, sp := range *service {
sp.updateEndpoints(endpoints)
}
}
mutex.RUnlock()
},
},
)
stopCh := make(chan struct{})
return informer{
informer: inf,
store: &store,
stopCh: stopCh,
}
}
func newServiceInformer(clientset *kubernetes.Clientset, servicePorts *map[string]*map[uint32]*servicePort, mutex *sync.RWMutex) informer {
serviceListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "services", v1.NamespaceAll, fields.Everything())
store, inf := cache.NewInformer(
serviceListWatcher,
&v1.Service{},
time.Duration(0),
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
service := obj.(*v1.Service)
if service.Namespace == kubeSystem {
return
}
id := service.Namespace + "/" + service.Name
mutex.RLock()
svc, ok := (*servicePorts)[id]
if ok {
for _, sp := range *svc {
sp.updateService(service)
}
}
mutex.RUnlock()
},
DeleteFunc: func(obj interface{}) {
service := obj.(*v1.Service)
if service.Namespace == kubeSystem {
return
}
id := service.Namespace + "/" + service.Name
mutex.RLock()
svc, ok := (*servicePorts)[id]
if ok {
for _, sp := range *svc {
sp.deleteService()
}
}
mutex.RUnlock()
},
UpdateFunc: func(oldObj, newObj interface{}) {
service := newObj.(*v1.Service)
if service.Namespace == kubeSystem {
return
}
id := service.Namespace + "/" + service.Name
mutex.RLock()
svc, ok := (*servicePorts)[id]
if ok {
for _, sp := range *svc {
sp.updateService(service)
}
}
mutex.RUnlock()
},
},
)
stopCh := make(chan struct{})
return informer{
informer: inf,
store: &store,
stopCh: stopCh,
}
}
/// servicePort ///
// servicePort represents a service along with a port number. Multiple
// listeners may be subscribed to a servicePort. servicePort maintains the
// current state of the address set and publishes diffs to all listeners when
// updates come from either the endpoints API or the service API.
type servicePort struct {
// these values are immutable properties of the servicePort
service string
port uint32 // service port
// these values hold the current state of the servicePort and are mutable
listeners []EndpointsListener
endpoints *v1.Endpoints
targetPort intstr.IntOrString
addresses []common.TcpAddress
// This mutex protects against concurrent modification of the listeners slice
// as well as prevents updates for occuring while the listeners slice is being
// modified.
mutex sync.Mutex
}
func newServicePort(service string, port uint32, e *EndpointsWatcher) (*servicePort, error) {
endpoints := &v1.Endpoints{}
obj, exists, err := (*e.endpointInformer.store).GetByKey(service)
if err != nil {
return nil, err
}
if exists {
endpoints = obj.(*v1.Endpoints)
}
// Use the service port as the target port by default.
targetPort := intstr.FromInt(int(port))
obj, exists, err = (*e.serviceInformer.store).GetByKey(service)
if err != nil {
return nil, err
}
if exists {
// If a port spec exists with a matching service port, use that port spec's
// target port.
for _, portSpec := range obj.(*v1.Service).Spec.Ports {
if portSpec.Port == int32(port) && portSpec.TargetPort != intstr.FromInt(0) {
targetPort = portSpec.TargetPort
break
}
}
}
addrs := addresses(endpoints, targetPort)
return &servicePort{
service: service,
listeners: make([]EndpointsListener, 0),
port: port,
endpoints: endpoints,
targetPort: targetPort,
addresses: addrs,
mutex: sync.Mutex{},
}, nil
}
func (sp *servicePort) updateEndpoints(newEndpoints *v1.Endpoints) {
sp.mutex.Lock()
defer sp.mutex.Unlock()
newAddresses := addresses(newEndpoints, sp.targetPort)
log.Debugf("Updating %s:%d to %s", sp.service, sp.port, util.AddressesToString(newAddresses))
add, remove := diff(sp.addresses, newAddresses)
for _, listener := range sp.listeners {
listener.Update(add, remove)
}
sp.endpoints = newEndpoints
sp.addresses = newAddresses
}
func (sp *servicePort) deleteEndpoints() {
sp.mutex.Lock()
defer sp.mutex.Unlock()
log.Debugf("Deleting %s:%d", sp.service, sp.port)
for _, listener := range sp.listeners {
listener.Update(nil, sp.addresses)
}
sp.endpoints = &v1.Endpoints{}
sp.addresses = []common.TcpAddress{}
}
func (sp *servicePort) updateService(newService *v1.Service) {
sp.mutex.Lock()
defer sp.mutex.Unlock()
// Use the service port as the target port by default.
newTargetPort := intstr.FromInt(int(sp.port))
// If a port spec exists with a matching service port, use that port spec's
// target port.
for _, portSpec := range newService.Spec.Ports {
if portSpec.Port == int32(sp.port) && portSpec.TargetPort != intstr.FromInt(0) {
newTargetPort = portSpec.TargetPort
break
}
}
if newTargetPort != sp.targetPort {
newAddresses := addresses(sp.endpoints, newTargetPort)
log.Debugf("Updating %s:%d to %s", sp.service, sp.port, util.AddressesToString(newAddresses))
add, remove := diff(sp.addresses, newAddresses)
for _, listener := range sp.listeners {
listener.Update(add, remove)
}
sp.targetPort = newTargetPort
sp.addresses = newAddresses
}
}
func (sp *servicePort) deleteService() {
sp.mutex.Lock()
defer sp.mutex.Unlock()
newTargetPort := intstr.FromInt(int(sp.port))
if newTargetPort != sp.targetPort {
newAddresses := addresses(sp.endpoints, newTargetPort)
log.Debugf("Updating %s:%d to %s", sp.service, sp.port, util.AddressesToString(newAddresses))
add, remove := diff(sp.addresses, newAddresses)
for _, listener := range sp.listeners {
listener.Update(add, remove)
}
sp.targetPort = newTargetPort
sp.addresses = newAddresses
}
}
func (sp *servicePort) subscribe(listener EndpointsListener) {
sp.mutex.Lock()
defer sp.mutex.Unlock()
sp.listeners = append(sp.listeners, listener)
listener.Update(sp.addresses, nil)
}
// true iff the listener was found and removed
func (sp *servicePort) unsubscribe(listener EndpointsListener) bool {
sp.mutex.Lock()
defer sp.mutex.Unlock()
for i, item := range sp.listeners {
if item == listener {
// delete the item from the slice
sp.listeners[i] = sp.listeners[len(sp.listeners)-1]
sp.listeners[len(sp.listeners)-1] = nil
sp.listeners = sp.listeners[:len(sp.listeners)-1]
return true
}
}
return false
}
/// helpers ///
func addresses(endpoints *v1.Endpoints, port intstr.IntOrString) []common.TcpAddress {
ips := make([]common.IPAddress, 0)
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
ip, err := util.ParseIPV4(address.IP)
if err != nil {
log.Printf("%s is not a valid IP address", address.IP)
continue
}
ips = append(ips, *ip)
}
}
var portNum uint32
if port.Type == intstr.String {
outer:
for _, subset := range endpoints.Subsets {
for _, p := range subset.Ports {
if p.Name == port.StrVal {
portNum = uint32(p.Port)
break outer
}
}
}
if portNum == 0 {
log.Printf("Port %s not found", port.StrVal)
return []common.TcpAddress{}
}
} else if port.Type == intstr.Int {
portNum = uint32(port.IntVal)
}
addrs := make([]common.TcpAddress, len(ips))
for i := range ips {
addrs[i] = common.TcpAddress{
Ip: &ips[i],
Port: portNum,
}
}
return addrs
}
func diff(old []common.TcpAddress, new []common.TcpAddress) ([]common.TcpAddress, []common.TcpAddress) {
addSet := make(map[string]common.TcpAddress)
removeSet := make(map[string]common.TcpAddress)
for _, addr := range new {
addSet[util.AddressToString(&addr)] = addr
}
for _, addr := range old {
delete(addSet, util.AddressToString(&addr))
removeSet[util.AddressToString(&addr)] = addr
}
for _, addr := range new {
delete(removeSet, util.AddressToString(&addr))
}
add := make([]common.TcpAddress, 0)
for _, addr := range addSet {
add = append(add, addr)
}
remove := make([]common.TcpAddress, 0)
for _, addr := range removeSet {
remove = append(remove, addr)
}
return add, remove
}

93
controller/k8s/pods.go Normal file
View File

@ -0,0 +1,93 @@
package k8s
import (
"fmt"
"time"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/fields"
"k8s.io/client-go/tools/cache"
)
type PodIndex struct {
indexer *cache.Indexer
reflector *cache.Reflector
stopCh chan struct{}
}
func NewPodIndex(clientset *kubernetes.Clientset, index cache.IndexFunc) (*PodIndex, error) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"index": index})
podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceAll, fields.Everything())
reflector := cache.NewReflector(
podListWatcher,
&v1.Pod{},
indexer,
time.Duration(0),
)
stopCh := make(chan struct{})
return &PodIndex{
indexer: &indexer,
reflector: reflector,
stopCh: stopCh,
}, nil
}
func (p *PodIndex) Run() {
p.reflector.RunUntil(p.stopCh)
}
func (p *PodIndex) Stop() {
p.stopCh <- struct{}{}
}
func (p *PodIndex) GetPod(key string) (*v1.Pod, error) {
item, exists, err := (*p.indexer).GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("No pod exists for key %s", key)
}
pod, ok := item.(*v1.Pod)
if !ok {
return nil, fmt.Errorf("%v is not a Pod", item)
}
return pod, nil
}
func (p *PodIndex) GetPodsByIndex(key string) ([]*v1.Pod, error) {
items, err := (*p.indexer).ByIndex("index", key)
if err != nil {
return nil, err
}
pods := make([]*v1.Pod, len(items))
for i, item := range items {
pod, ok := item.(*v1.Pod)
if !ok {
return nil, fmt.Errorf("%v is not a Pod", item)
}
pods[i] = pod
}
return pods, nil
}
func (p *PodIndex) List() ([]*v1.Pod, error) {
pods := make([]*v1.Pod, 0)
items := (*p.indexer).List()
for _, pod := range items {
pod, ok := pod.(*v1.Pod)
if !ok {
return nil, fmt.Errorf("%v is not a Pod", pod)
}
pods = append(pods, pod)
}
return pods, nil
}

View File

@ -0,0 +1,80 @@
package k8s
import (
"fmt"
"time"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/fields"
"k8s.io/client-go/tools/cache"
)
type ReplicaSetStore struct {
store *cache.Store
reflector *cache.Reflector
stopCh chan struct{}
}
func NewReplicaSetStore(clientset *kubernetes.Clientset) (*ReplicaSetStore, error) {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
replicatSetListWatcher := cache.NewListWatchFromClient(clientset.ExtensionsV1beta1().RESTClient(), "ReplicaSets", v1.NamespaceAll, fields.Everything())
reflector := cache.NewReflector(
replicatSetListWatcher,
&v1beta1.ReplicaSet{},
store,
time.Duration(0),
)
stopCh := make(chan struct{})
return &ReplicaSetStore{
store: &store,
reflector: reflector,
stopCh: stopCh,
}, nil
}
func (p *ReplicaSetStore) Run() {
p.reflector.RunUntil(p.stopCh)
}
func (p *ReplicaSetStore) Stop() {
p.stopCh <- struct{}{}
}
func (p *ReplicaSetStore) GetReplicaSet(key string) (*v1beta1.ReplicaSet, error) {
item, exists, err := (*p.store).GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("No ReplicaSet exists for name %s", key)
}
rs, ok := item.(*v1beta1.ReplicaSet)
if !ok {
return nil, fmt.Errorf("%v is not a ReplicaSet", item)
}
return rs, nil
}
func (p *ReplicaSetStore) GetDeploymentForPod(pod *v1.Pod) (string, error) {
namespace := pod.Namespace
if len(pod.GetOwnerReferences()) == 0 {
return "", fmt.Errorf("Pod %s has no owner", pod.Name)
}
parent := pod.GetOwnerReferences()[0]
if parent.Kind == "ReplicaSet" {
rsName := namespace + "/" + parent.Name
rs, err := p.GetReplicaSet(rsName)
if err != nil {
return "", err
}
return namespace + "/" + rs.GetOwnerReferences()[0].Name, nil
}
return namespace + "/" + parent.Name, nil
}

View File

@ -0,0 +1,63 @@
package main
import (
"context"
"flag"
"io"
log "github.com/sirupsen/logrus"
"math/rand"
"time"
"github.com/runconduit/conduit/controller/destination"
common "github.com/runconduit/conduit/controller/gen/common"
"github.com/runconduit/conduit/controller/util"
)
// This is a throwaway script for testing the destination service
func main() {
rand.Seed(time.Now().UnixNano())
addr := flag.String("addr", ":8089", "address of proxy api")
flag.Parse()
client, conn, err := destination.NewClient(*addr)
if err != nil {
log.Fatal(err.Error())
}
defer conn.Close()
req := &common.Destination{
Scheme: "k8s",
Path: "strest-server.default.svc.cluster.local:8888",
}
rsp, err := client.Get(context.Background(), req)
if err != nil {
log.Fatal(err.Error())
}
for {
update, err := rsp.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err.Error())
}
if add := update.GetAdd(); add != nil {
log.Println("Add:")
for _, addr := range add.Addrs {
log.Printf("- %s:%d", util.IPToString(addr.Addr.GetIp()), addr.Addr.Port)
}
log.Println()
}
if remove := update.GetRemove(); remove != nil {
log.Println("Remove:")
for _, addr := range remove.Addrs {
log.Printf("- %s:%d", util.IPToString(addr.GetIp()), addr.Port)
}
log.Println()
}
}
}

View File

@ -0,0 +1,205 @@
package main
import (
"context"
"flag"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/runconduit/conduit/controller/api/proxy"
common "github.com/runconduit/conduit/controller/gen/common"
pb "github.com/runconduit/conduit/controller/gen/proxy/telemetry"
"github.com/runconduit/conduit/controller/k8s"
"github.com/runconduit/conduit/controller/util"
"google.golang.org/grpc/codes"
k8sV1 "k8s.io/client-go/pkg/api/v1"
)
/* A simple script for posting simulated telemetry data to the proxy api */
var (
responseCodes = []codes.Code{
codes.OK,
codes.PermissionDenied,
codes.Unavailable,
}
streamSummary = &pb.StreamSummary{
BytesSent: 12345,
DurationMs: 10,
FramesSent: 4,
}
ports = []uint32{3333, 6262}
)
func randomPort() uint32 {
return ports[rand.Intn(len(ports))]
}
func randomCount() uint32 {
return uint32(rand.Int31n(100) + 1)
}
func randomLatencies(count uint32) (latencies []*pb.Latency) {
for i := uint32(0); i < count; i++ {
// The latency value with precision to 100µs.
latencyValue := uint32(rand.Int31n(int32(time.Second/(time.Millisecond * 10))))
latency := pb.Latency{
Latency: latencyValue,
Count: rand.Uint32(),
}
latencies = append(latencies, &latency)
}
return
}
func randomEos(count uint32) (eos []*pb.EosScope) {
responseCodes := make(map[uint32]uint32)
for i := uint32(0); i < count; i++ {
responseCodes[randomResponseCode()] += 1
}
for code, streamCount := range responseCodes {
eos = append(eos, &pb.EosScope{
Ctx: &pb.EosCtx{End: &pb.EosCtx_GrpcStatusCode{GrpcStatusCode: code}},
Streams: streamSummaries(streamCount),
})
}
return
}
func randomResponseCode() uint32 {
return uint32(responseCodes[rand.Intn(len(responseCodes))])
}
func streamSummaries(count uint32) (summaries []*pb.StreamSummary) {
for i := uint32(0); i < count; i++ {
summaries = append(summaries, streamSummary)
}
return
}
func stringToIp(str string) *common.IPAddress {
octets := make([]uint8, 0)
for _, num := range strings.Split(str, ".") {
oct, _ := strconv.Atoi(num)
octets = append(octets, uint8(oct))
}
return util.IPV4(octets[0], octets[1], octets[2], octets[3])
}
func podIndexFunc(obj interface{}) ([]string, error) {
return nil, nil
}
func randomPod(pods []*k8sV1.Pod, prvPodIp *common.IPAddress) *common.IPAddress {
var podIp *common.IPAddress
for {
if podIp != nil {
break
}
randomPod := pods[rand.Intn(len(pods))]
podIp = stringToIp(randomPod.Status.PodIP)
if prvPodIp != nil && podIp.GetIpv4() == prvPodIp.GetIpv4() {
podIp = nil
}
}
return podIp
}
func main() {
rand.Seed(time.Now().UnixNano())
addr := flag.String("addr", ":8086", "address of proxy api")
requestCount := flag.Int("requests", 0, "number of api requests to make (default: infinite)")
sleep := flag.Duration("sleep", time.Second, "time to sleep between requests")
maxPods := flag.Int("max-pods", 0, "total number of pods to simulate (default unlimited)")
kubeConfigPath := flag.String("kubeconfig", "", "path to kube config - required")
flag.Parse()
if len(flag.Args()) > 0 {
log.Fatal("Unable to parse command line arguments")
return
}
client, conn, err := proxy.NewTelemetryClient(*addr)
if err != nil {
log.Fatal(err.Error())
}
defer conn.Close()
clientSet, err := k8s.NewClientSet(*kubeConfigPath)
if err != nil {
log.Fatal(err.Error())
}
pods, err := k8s.NewPodIndex(clientSet, podIndexFunc)
if err != nil {
log.Fatal(err.Error())
}
pods.Run()
// required for pods.List() to work -> otherwise the list of pods returned is empty
time.Sleep(2 * time.Second)
podList, err := pods.List()
if err != nil {
log.Fatal(err.Error())
}
allPods := make([]*k8sV1.Pod, 0)
for _, pod := range podList {
if pod.Status.PodIP != "" && (*maxPods == 0 || len(allPods) < *maxPods) {
allPods = append(allPods, pod)
}
}
for i := 0; (*requestCount == 0) || (i < *requestCount); i++ {
count := randomCount()
sourceIp := randomPod(allPods, nil)
targetIp := randomPod(allPods, sourceIp)
req := &pb.ReportRequest{
Process: &pb.Process{
ScheduledInstance: "hello-1mfa0",
ScheduledNamespace: "people",
},
ClientTransports: []*pb.ClientTransport{},
ServerTransports: []*pb.ServerTransport{},
Proxy: pb.ReportRequest_INBOUND,
Requests: []*pb.RequestScope{
&pb.RequestScope{
Ctx: &pb.RequestCtx{
SourceIp: sourceIp,
TargetAddr: &common.TcpAddress{
Ip: targetIp,
Port: randomPort(),
},
Authority: "world.greeting:7778",
Method: &common.HttpMethod{Type: &common.HttpMethod_Registered_{Registered: common.HttpMethod_GET}},
Path: "/World/Greeting",
},
Count: count,
Responses: []*pb.ResponseScope{
&pb.ResponseScope{
Ctx: &pb.ResponseCtx{
HttpStatusCode: http.StatusOK,
},
ResponseLatencies: randomLatencies(count),
Ends: randomEos(count),
},
},
},
},
}
_, err = client.Report(context.Background(), req)
if err != nil {
log.Fatal(err.Error())
}
time.Sleep(*sleep)
}
}

15
controller/tap/client.go Normal file
View File

@ -0,0 +1,15 @@
package tap
import (
pb "github.com/runconduit/conduit/controller/gen/controller/tap"
"google.golang.org/grpc"
)
func NewClient(addr string) (pb.TapClient, *grpc.ClientConn, error) {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
return pb.NewTapClient(conn), conn, nil
}

362
controller/tap/server.go Normal file
View File

@ -0,0 +1,362 @@
package tap
import (
"context"
"fmt"
"io"
"net"
"strings"
"time"
common "github.com/runconduit/conduit/controller/gen/common"
pb "github.com/runconduit/conduit/controller/gen/controller/tap"
proxy "github.com/runconduit/conduit/controller/gen/proxy/tap"
public "github.com/runconduit/conduit/controller/gen/public"
"github.com/runconduit/conduit/controller/k8s"
"github.com/runconduit/conduit/controller/util"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"k8s.io/client-go/pkg/api/v1"
)
var tapInterval = 10 * time.Second
type (
server struct {
tapPort uint
// We use the Kubernetes API to find the IP addresses of pods to tap
replicaSets *k8s.ReplicaSetStore
pods *k8s.PodIndex
}
)
func (s *server) Tap(req *public.TapRequest, stream pb.Tap_TapServer) error {
// TODO: Allow a configurable aperture A.
// If the target contains more than A pods, select A of them at random.
var pods []*v1.Pod
var targetName string
switch target := req.Target.(type) {
case *public.TapRequest_Pod:
targetName = target.Pod
pod, err := s.pods.GetPod(target.Pod)
if err != nil {
return err
}
pods = []*v1.Pod{pod}
case *public.TapRequest_Deployment:
targetName = target.Deployment
var err error
pods, err = (*s.pods).GetPodsByIndex(target.Deployment)
if err != nil {
return err
}
}
log.Printf("Tapping %d pods for target %s", len(pods), targetName)
events := make(chan *common.TapEvent)
go func() { // Stop sending back events if the request is cancelled
<-stream.Context().Done()
close(events)
}()
// divide the rps evenly between all pods to tap
rpsPerPod := req.MaxRps / float32(len(pods))
for _, pod := range pods {
// initiate a tap on the pod
match, err := makeMatch(req)
if err != nil {
return nil
}
go s.tapProxy(stream.Context(), rpsPerPod, match, pod.Status.PodIP, events)
}
// read events from the taps and send them back
for event := range events {
err := stream.Send(event)
if err != nil {
return err
}
}
return nil
}
func validatePort(port uint32) error {
if port > 65535 {
return fmt.Errorf("Port number of range: %d", port)
}
return nil
}
func makeMatch(req *public.TapRequest) (*proxy.ObserveRequest_Match, error) {
matches := make([]*proxy.ObserveRequest_Match, 0)
if req.FromIP != "" {
ip, err := util.ParseIPV4(req.FromIP)
if err != nil {
return nil, err
}
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Source{
Source: &proxy.ObserveRequest_Match_Tcp{
Match: &proxy.ObserveRequest_Match_Tcp_Netmask_{
Netmask: &proxy.ObserveRequest_Match_Tcp_Netmask{
Ip: ip,
Mask: 32,
},
},
},
},
})
}
if req.FromPort != 0 {
if err := validatePort(req.FromPort); err != nil {
return nil, err
}
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Source{
Source: &proxy.ObserveRequest_Match_Tcp{
Match: &proxy.ObserveRequest_Match_Tcp_Ports{
Ports: &proxy.ObserveRequest_Match_Tcp_PortRange{
Min: req.FromPort,
},
},
},
},
})
}
if req.ToIP != "" {
ip, err := util.ParseIPV4(req.ToIP)
if err != nil {
return nil, err
}
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Destination{
Destination: &proxy.ObserveRequest_Match_Tcp{
Match: &proxy.ObserveRequest_Match_Tcp_Netmask_{
Netmask: &proxy.ObserveRequest_Match_Tcp_Netmask{
Ip: ip,
Mask: 32,
},
},
},
},
})
}
if req.ToPort != 0 {
if err := validatePort(req.ToPort); err != nil {
return nil, err
}
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Destination{
Destination: &proxy.ObserveRequest_Match_Tcp{
Match: &proxy.ObserveRequest_Match_Tcp_Ports{
Ports: &proxy.ObserveRequest_Match_Tcp_PortRange{
Min: req.ToPort,
},
},
},
},
})
}
if req.Scheme != "" {
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Http_{
Http: &proxy.ObserveRequest_Match_Http{
Match: &proxy.ObserveRequest_Match_Http_Scheme{
Scheme: parseScheme(req.Scheme),
},
},
},
})
}
if req.Method != "" {
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Http_{
Http: &proxy.ObserveRequest_Match_Http{
Match: &proxy.ObserveRequest_Match_Http_Method{
Method: parseMethod(req.Method),
},
},
},
})
}
// exact match
if req.Authority != "" {
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Http_{
Http: &proxy.ObserveRequest_Match_Http{
Match: &proxy.ObserveRequest_Match_Http_Authority{
Authority: &proxy.ObserveRequest_Match_Http_StringMatch{
Match: &proxy.ObserveRequest_Match_Http_StringMatch_Exact{
Exact: req.Authority,
},
},
},
},
},
})
}
// prefix match
if req.Path != "" {
matches = append(matches, &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_Http_{
Http: &proxy.ObserveRequest_Match_Http{
Match: &proxy.ObserveRequest_Match_Http_Path{
Path: &proxy.ObserveRequest_Match_Http_StringMatch{
Match: &proxy.ObserveRequest_Match_Http_StringMatch_Prefix{
Prefix: req.Path,
},
},
},
},
},
})
}
return &proxy.ObserveRequest_Match{
Match: &proxy.ObserveRequest_Match_All{
All: &proxy.ObserveRequest_Match_Seq{
Matches: matches,
},
},
}, nil
}
// TODO: validate scheme
func parseScheme(scheme string) *common.Scheme {
value, ok := common.Scheme_Registered_value[strings.ToUpper(scheme)]
if ok {
return &common.Scheme{
Type: &common.Scheme_Registered_{
Registered: common.Scheme_Registered(value),
},
}
}
return &common.Scheme{
Type: &common.Scheme_Unregistered{
Unregistered: strings.ToUpper(scheme),
},
}
}
// TODO: validate method
func parseMethod(method string) *common.HttpMethod {
value, ok := common.HttpMethod_Registered_value[strings.ToUpper(method)]
if ok {
return &common.HttpMethod{
Type: &common.HttpMethod_Registered_{
Registered: common.HttpMethod_Registered(value),
},
}
}
return &common.HttpMethod{
Type: &common.HttpMethod_Unregistered{
Unregistered: strings.ToUpper(method),
},
}
}
// Tap a pod.
// This method will run continuously until an error is encountered or the
// request is cancelled via the context. Thus it should be called as a
// go-routine.
// To limit the rps to maxRps, this method calls Observe on the pod with a limit
// of maxRps * 10s at most once per 10s window. If this limit is reached in
// less than 10s, we sleep until the end of the window before calling Observe
// again.
func (s *server) tapProxy(ctx context.Context, maxRps float32, match *proxy.ObserveRequest_Match, addr string, events chan *common.TapEvent) {
tapAddr := fmt.Sprintf("%s:%d", addr, s.tapPort)
log.Printf("Establishing tap on %s", tapAddr)
conn, err := grpc.DialContext(ctx, tapAddr, grpc.WithInsecure())
if err != nil {
log.Println(err)
return
}
client := proxy.NewTapClient(conn)
req := &proxy.ObserveRequest{
Limit: uint32(maxRps * float32(tapInterval.Seconds())),
Match: match,
}
for { // Request loop
windowStart := time.Now()
windowEnd := windowStart.Add(tapInterval)
rsp, err := client.Observe(ctx, req)
if err != nil {
log.Println(err)
return
}
for { // Stream loop
event, err := rsp.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Println(err)
return
}
events <- event
}
if time.Now().Before(windowEnd) {
time.Sleep(time.Until(windowEnd))
}
}
}
func NewServer(addr string, tapPort uint, kubeconfig string) (*grpc.Server, net.Listener, error) {
clientSet, err := k8s.NewClientSet(kubeconfig)
if err != nil {
return nil, nil, err
}
replicaSets, err := k8s.NewReplicaSetStore(clientSet)
if err != nil {
return nil, nil, err
}
replicaSets.Run()
// index pods by deployment
deploymentIndex := func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return nil, fmt.Errorf("Object is not a Pod")
}
deployment, err := replicaSets.GetDeploymentForPod(pod)
return []string{deployment}, err
}
pods, err := k8s.NewPodIndex(clientSet, deploymentIndex)
if err != nil {
return nil, nil, err
}
pods.Run()
lis, err := net.Listen("tcp", addr)
if err != nil {
return nil, nil, err
}
s := util.NewGrpcServer()
srv := server{
tapPort: tapPort,
replicaSets: replicaSets,
pods: pods,
}
pb.RegisterTapServer(s, &srv)
// TODO: register shutdown hook to call pods.Stop() and replicatSets.Stop()
return s, lis, nil
}

View File

@ -0,0 +1,15 @@
package telemetry
import (
pb "github.com/runconduit/conduit/controller/gen/controller/telemetry"
"google.golang.org/grpc"
)
func NewClient(addr string) (pb.TelemetryClient, *grpc.ClientConn, error) {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, nil, err
}
return pb.NewTelemetryClient(conn), conn, nil
}

View File

@ -0,0 +1,402 @@
package telemetry
import (
"errors"
"fmt"
"net"
"strconv"
"sync"
"time"
common "github.com/runconduit/conduit/controller/gen/common"
read "github.com/runconduit/conduit/controller/gen/controller/telemetry"
write "github.com/runconduit/conduit/controller/gen/proxy/telemetry"
public "github.com/runconduit/conduit/controller/gen/public"
"github.com/runconduit/conduit/controller/k8s"
"github.com/runconduit/conduit/controller/util"
"github.com/golang/protobuf/ptypes/duration"
"github.com/prometheus/client_golang/api"
"github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
k8sV1 "k8s.io/client-go/pkg/api/v1"
)
var (
requestLabels = []string{"source", "target", "source_deployment", "target_deployment", "method", "path"}
requestsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "requests_total",
Help: "Total number of requests",
},
requestLabels,
)
responseLabels = append(requestLabels, []string{"http_status_code", "classification"}...)
responsesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "responses_total",
Help: "Total number of responses",
},
responseLabels,
)
responseLatencyBuckets = append(append(append(append(append(
prometheus.LinearBuckets(1, 1, 5),
prometheus.LinearBuckets(10, 10, 5)...),
prometheus.LinearBuckets(100, 100, 5)...),
prometheus.LinearBuckets(1000, 1000, 5)...),
prometheus.LinearBuckets(10000, 10000, 5)...),
)
responseLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "response_latency_ms",
Help: "Response latency in milliseconds",
Buckets: responseLatencyBuckets,
},
requestLabels,
)
)
func init() {
prometheus.MustRegister(requestsTotal)
prometheus.MustRegister(responsesTotal)
prometheus.MustRegister(responseLatency)
}
type (
server struct {
prometheusApi v1.API
pods *k8s.PodIndex
replicaSets *k8s.ReplicaSetStore
instances instanceCache
ignoredNamespaces []string
}
instanceCache struct {
sync.RWMutex
cache map[string]time.Time
}
)
func (c *instanceCache) update(id string) {
c.Lock()
defer c.Unlock()
c.cache[id] = time.Now()
}
func (c *instanceCache) list() []string {
c.RLock()
defer c.RUnlock()
instances := make([]string, 0)
for name, _ := range c.cache {
instances = append(instances, name)
}
return instances
}
func (c *instanceCache) purgeOldInstances() {
c.Lock()
defer c.Unlock()
expiry := time.Now().Add(-10 * time.Minute)
for name, time := range c.cache {
if time.Before(expiry) {
delete(c.cache, name)
}
}
}
func cleanupOldInstances(srv *server) {
for _ = range time.Tick(10 * time.Second) {
srv.instances.purgeOldInstances()
}
}
func podIPKeyFunc(obj interface{}) ([]string, error) {
if pod, ok := obj.(*k8sV1.Pod); ok {
return []string{pod.Status.PodIP}, nil
}
return nil, fmt.Errorf("Object is not a Pod")
}
func NewServer(addr, prometheusUrl string, ignoredNamespaces []string, kubeconfig string) (*grpc.Server, net.Listener, error) {
prometheusClient, err := api.NewClient(api.Config{Address: prometheusUrl})
if err != nil {
return nil, nil, err
}
clientSet, err := k8s.NewClientSet(kubeconfig)
if err != nil {
return nil, nil, err
}
pods, err := k8s.NewPodIndex(clientSet, podIPKeyFunc)
if err != nil {
return nil, nil, err
}
pods.Run()
replicaSets, err := k8s.NewReplicaSetStore(clientSet)
if err != nil {
return nil, nil, err
}
replicaSets.Run()
srv := &server{
prometheusApi: v1.NewAPI(prometheusClient),
pods: pods,
replicaSets: replicaSets,
instances: instanceCache{cache: make(map[string]time.Time, 0)},
ignoredNamespaces: ignoredNamespaces,
}
go cleanupOldInstances(srv)
lis, err := net.Listen("tcp", addr)
if err != nil {
return nil, nil, err
}
s := util.NewGrpcServer()
read.RegisterTelemetryServer(s, srv)
write.RegisterTelemetryServer(s, srv)
// TODO: register shutdown hook to call pods.Stop() and replicatSets.Stop()
return s, lis, nil
}
func (s *server) Query(ctx context.Context, req *read.QueryRequest) (*read.QueryResponse, error) {
start := time.Unix(0, req.StartMs*int64(time.Millisecond))
end := time.Unix(0, req.EndMs*int64(time.Millisecond))
step, err := time.ParseDuration(req.Step)
if err != nil {
return nil, err
}
queryRange := v1.Range{Start: start, End: end, Step: step}
res, err := s.prometheusApi.QueryRange(ctx, req.Query, queryRange)
if err != nil {
return nil, err
}
if res.Type() != model.ValMatrix {
return nil, fmt.Errorf("Unexpected query result type: %s", res.Type())
}
samples := make([]*read.Sample, 0)
for _, s := range res.(model.Matrix) {
samples = append(samples, convertSampleStream(s))
}
return &read.QueryResponse{Metrics: samples}, nil
}
func (s *server) ListPods(ctx context.Context, req *read.ListPodsRequest) (*public.ListPodsResponse, error) {
pods, err := s.pods.List()
if err != nil {
return nil, err
}
podList := make([]*public.Pod, 0)
for _, pod := range pods {
if s.shouldIngore(pod) {
continue
}
deployment, err := s.replicaSets.GetDeploymentForPod(pod)
if err != nil {
log.Println(err.Error())
deployment = ""
}
name := pod.Namespace + "/" + pod.Name
updated, added := s.instances.cache[name]
status := string(pod.Status.Phase)
if pod.DeletionTimestamp != nil {
status = "Terminating"
}
plane, _ := pod.Labels["conduit.io/plane"]
controller, _ := pod.Labels["conduit.io/controller"]
item := &public.Pod{
Name: pod.Namespace + "/" + pod.Name,
Deployment: deployment,
Status: status,
PodIP: pod.Status.PodIP,
Added: added,
ControllerNamespace: controller,
ControlPlane: plane == "control",
}
if added {
since := time.Since(updated)
item.SinceLastReport = &duration.Duration{
Seconds: int64(since / time.Second),
Nanos: int32(since % time.Second),
}
}
podList = append(podList, item)
}
return &public.ListPodsResponse{Pods: podList}, nil
}
func (s *server) Report(ctx context.Context, req *write.ReportRequest) (*write.ReportResponse, error) {
id := "unknown"
if req.Process != nil {
id = req.Process.ScheduledNamespace + "/" + req.Process.ScheduledInstance
}
log := log.WithFields(log.Fields{"id": id})
log.Debugf("received report with %d requests", len(req.Requests))
s.instances.update(id)
for _, requestScope := range req.Requests {
if requestScope.Ctx == nil {
return nil, errors.New("RequestCtx is required")
}
requestLabels := s.requestLabelsFor(requestScope)
requestsTotal.With(requestLabels).Add(float64(requestScope.Count))
latencyStat := responseLatency.With(requestLabels)
for _, responseScope := range requestScope.Responses {
if responseScope.Ctx == nil {
return nil, errors.New("ResponseCtx is required")
}
for _, latency := range responseScope.ResponseLatencies {
// The latencies as received from the proxy are represented as an array of
// latency values in tenths of a millisecond, and a count of the number of
// times a request of that latency was observed.
// First, convert the latency value from tenths of a ms to ms and
// convert from u32 to f64.
latencyMs := float64(latency.Latency * 10)
for i := uint32(0); i < latency.Count; i++ {
// Then, report that latency value to Prometheus a number of times
// equal to the count reported by the proxy.
latencyStat.Observe(latencyMs)
}
}
for _, eosScope := range responseScope.Ends {
if eosScope.Ctx == nil {
return nil, errors.New("EosCtx is required")
}
responseLabels := s.requestLabelsFor(requestScope)
for k, v := range responseLabelsFor(responseScope, eosScope) {
responseLabels[k] = v
}
responsesTotal.With(responseLabels).Add(float64(len(eosScope.Streams)))
}
}
}
return &write.ReportResponse{}, nil
}
func (s *server) shouldIngore(pod *k8sV1.Pod) bool {
for _, namespace := range s.ignoredNamespaces {
if pod.Namespace == namespace {
return true
}
}
return false
}
func (s *server) getNameAndDeployment(ip *common.IPAddress) (string, string) {
ipStr := util.IPToString(ip)
pods, err := s.pods.GetPodsByIndex(ipStr)
if err != nil {
log.Printf("Cannot get pod for IP %s: %s", ipStr, err)
return "", ""
}
if len(pods) == 0 {
log.Printf("No pod exists for IP %s", ipStr)
return "", ""
}
if len(pods) > 1 {
log.Printf("Multiple pods found for IP %s", ipStr)
return "", ""
}
pod := pods[0]
name := pod.Namespace + "/" + pod.Name
deployment, err := (*s.replicaSets).GetDeploymentForPod(pod)
if err != nil {
log.Printf("Cannot get deployment for pod %s: %s", pod.Name, err)
return name, ""
}
return name, deployment
}
func methodString(method *common.HttpMethod) string {
switch method.Type.(type) {
case *common.HttpMethod_Registered_:
return method.GetRegistered().String()
case *common.HttpMethod_Unregistered:
return method.GetUnregistered()
}
return ""
}
func convertSampleStream(sample *model.SampleStream) *read.Sample {
labels := make(map[string]string)
for k, v := range sample.Metric {
labels[string(k)] = string(v)
}
values := make([]*read.SampleValue, 0)
for _, s := range sample.Values {
v := read.SampleValue{
Value: float64(s.Value),
TimestampMs: int64(s.Timestamp),
}
values = append(values, &v)
}
return &read.Sample{Values: values, Labels: labels}
}
func (s *server) requestLabelsFor(requestScope *write.RequestScope) prometheus.Labels {
sourceName, sourceDeployment := s.getNameAndDeployment(requestScope.Ctx.SourceIp)
targetName, targetDeployment := s.getNameAndDeployment(requestScope.Ctx.TargetAddr.Ip)
return prometheus.Labels{
"source": sourceName,
"source_deployment": sourceDeployment,
"target": targetName,
"target_deployment": targetDeployment,
"method": methodString(requestScope.Ctx.Method),
"path": requestScope.Ctx.Path,
}
}
func responseLabelsFor(responseScope *write.ResponseScope, eosScope *write.EosScope) prometheus.Labels {
httpStatusCode := strconv.Itoa(int(responseScope.Ctx.HttpStatusCode))
classification := "failure"
switch x := eosScope.Ctx.End.(type) {
case *write.EosCtx_GrpcStatusCode:
if x.GrpcStatusCode == uint32(codes.OK) {
classification = "success"
}
}
return prometheus.Labels{
"http_status_code": httpStatusCode,
"classification": classification,
}
}

16
controller/util/grpc.go Normal file
View File

@ -0,0 +1,16 @@
package util
import (
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
// returns a grpc server pre-configured with prometheus interceptors
func NewGrpcServer() *grpc.Server {
server := grpc.NewServer(
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
)
grpc_prometheus.Register(server)
return server
}

61
controller/util/util.go Normal file
View File

@ -0,0 +1,61 @@
package util
import (
"fmt"
"strconv"
"strings"
pb "github.com/runconduit/conduit/controller/gen/common"
)
func AddressToString(addr *pb.TcpAddress) string {
octects := decodeIPToOctets(addr.GetIp().GetIpv4())
return fmt.Sprintf("%d.%d.%d.%d:%d", octects[0], octects[1], octects[2], octects[3], addr.GetPort())
}
func AddressesToString(addrs []pb.TcpAddress) string {
addrStrs := make([]string, len(addrs))
for i := range addrs {
addrStrs[i] = AddressToString(&addrs[i])
}
return "[" + strings.Join(addrStrs, ",") + "]"
}
func IPToString(ip *pb.IPAddress) string {
octets := decodeIPToOctets(ip.GetIpv4())
return fmt.Sprintf("%d.%d.%d.%d", octets[0], octets[1], octets[2], octets[3])
}
func IPV4(a1, a2, a3, a4 uint8) *pb.IPAddress {
ip := (uint32(a1) << 24) | (uint32(a2) << 16) | (uint32(a3) << 8) | uint32(a4)
return &pb.IPAddress{
Ip: &pb.IPAddress_Ipv4{
Ipv4: ip,
},
}
}
func ParseIPV4(ip string) (*pb.IPAddress, error) {
segments := strings.Split(ip, ".")
if len(segments) != 4 {
return nil, fmt.Errorf("Invalid IP address: %s", ip)
}
octets := [4]uint8{0, 0, 0, 0}
for i, segment := range segments {
octet, err := strconv.ParseUint(segment, 10, 8)
if err != nil {
return nil, fmt.Errorf("Invalid IP segment: %s", segment)
}
octets[i] = uint8(octet)
}
return IPV4(octets[0], octets[1], octets[2], octets[3]), nil
}
func decodeIPToOctets(ip uint32) [4]uint8 {
return [4]uint8{
uint8(ip >> 24 & 255),
uint8(ip >> 16 & 255),
uint8(ip >> 8 & 255),
uint8(ip & 255),
}
}

3
controller/version.go Normal file
View File

@ -0,0 +1,3 @@
package controller
var Version = "v0.0.1"

101
docker-compose.yml Normal file
View File

@ -0,0 +1,101 @@
version: '3'
services:
destination:
build:
context: .
dockerfile: controller/Dockerfile
ports:
- "8089:8089"
- "9999:9999"
volumes:
- ~/.kube/config:/kubeconfig:ro
command:
- destination
- -addr=:8089
- -metrics-addr=:9999
- -kubeconfig=/kubeconfig
tap:
build:
context: .
dockerfile: controller/Dockerfile
ports:
- "8088:8088"
- "9998:9998"
volumes:
- ~/.kube/config:/kubeconfig:ro
command:
- tap
- -addr=:8088
- -metrics-addr=:9998
- -kubeconfig=/kubeconfig
telemetry:
build:
context: .
dockerfile: controller/Dockerfile
ports:
- "8087:8087"
- "9997:9997"
volumes:
- ~/.kube/config:/kubeconfig:ro
command:
- telemetry
- -addr=:8087
- -metrics-addr=:9997
- -prometheus-url=http://prometheus:9090
- -kubeconfig=/kubeconfig
proxy-api:
build:
context: .
dockerfile: controller/Dockerfile
ports:
- "8086:8086"
- "9996:9996"
command:
- proxy-api
- -addr=:8086
- -metrics-addr=:9996
- -telemetry-addr=telemetry:8087
- -destination-addr=destination:8089
public-api:
build:
context: .
dockerfile: controller/Dockerfile
ports:
- "8085:8085"
- "9995:9995"
command:
- public-api
- -addr=:8085
- -metrics-addr=:9995
- -telemetry-addr=telemetry:8087
- -tap-addr=tap:8088
web:
build:
context: .
dockerfile: web/Dockerfile
ports:
- "8084:8084"
- "9994:9994"
command:
- -addr=:8084
- -metrics-addr=:9994
- -api-addr=public-api:8085
- -static-dir=/dist
- -template-dir=/templates
prometheus:
image: prom/prometheus:v1.8.1
ports:
- 9090:9090
volumes:
- .prometheus.dev.yml:/etc/prometheus/prometheus.yml:ro
command:
- -config.file=/etc/prometheus/prometheus.yml
- -storage.local.memory-chunks=500000
- -storage.local.retention=6h

View File

@ -0,0 +1,8 @@
[package]
name = "futures-mpsc-lossy"
version = "0.0.1"
authors = ["Oliver Gould <ver@buoyant.io>"]
publish = false
[dependencies]
futures = "0.1"

View File

@ -0,0 +1,143 @@
extern crate futures;
use std::fmt;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{Async, AsyncSink, Poll, Sink, StartSend, Stream};
use futures::sync::mpsc;
/// Creates a lossy multi-producer single-consumer channel.
///
/// This channel is bounded but provides no mechanism for backpressure. Though it returns
/// items that it cannot accept, it does not notify a producer of capacity availability.
///
/// This allows producers to send events on this channel without obtaining a mutable
/// reference to a sender.
pub fn channel<T>(capacity: usize) -> (Sender<T>, Receiver<T>) {
let (tx, rx) = mpsc::unbounded();
let capacity = Arc::new(AtomicUsize::new(capacity));
let s = Sender {
tx,
capacity: capacity.clone(),
};
let r = Receiver {
rx,
capacity,
};
(s, r)
}
pub struct Receiver<T> {
rx: mpsc::UnboundedReceiver<T>,
capacity: Arc<AtomicUsize>,
}
pub struct Sender<T> {
tx: mpsc::UnboundedSender<T>,
capacity: Arc<AtomicUsize>,
}
/// Indicates that channel was not able to send an item. Subsequents items, however, may
/// be sent iff the item is `Rejected`.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum SendError<T> {
NoReceiver(T),
Rejected(T),
}
// ===== impl Receiver =====
impl<T> Stream for Receiver<T> {
type Item = T;
type Error = ();
fn poll(&mut self) -> Poll<Option<T>, Self::Error> {
match self.rx.poll() {
Ok(Async::Ready(Some(v))) => {
self.capacity.fetch_add(1, Ordering::SeqCst);
Ok(Async::Ready(Some(v)))
}
res => res,
}
}
}
// NB: `rx` does not have a `Debug` impl.
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Receiver")
.field("capacity", &self.capacity)
.finish()
}
}
// ===== impl Sender =====
impl<T> Sender<T> {
pub fn lossy_send(&self, v: T) -> Result<(), SendError<T>> {
loop {
let cap = self.capacity.load(Ordering::SeqCst);
if cap == 0 {
return Err(SendError::Rejected(v));
}
let ret = self.capacity
.compare_and_swap(cap, cap - 1, Ordering::SeqCst);
if ret == cap {
break;
}
}
self.tx.unbounded_send(v)
.map_err(|se| SendError::NoReceiver(se.into_inner()))
}
}
/// Drops events instead of exerting backpressure
impl<T> Sink for Sender<T> {
type SinkItem = T;
type SinkError = SendError<T>;
fn start_send(&mut self, item: T) -> StartSend<Self::SinkItem, Self::SinkError> {
self.lossy_send(item)
.map(|_| AsyncSink::Ready)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
Ok(().into())
}
}
// NB Clone cannot be derived because `T` doesn't have to implement Clone.
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
Sender {
tx: self.tx.clone(),
capacity: self.capacity.clone(),
}
}
}
// NB: `tx` does not have a `Debug` impl.
impl<T> fmt::Debug for Sender<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Sender")
.field("capacity", &self.capacity)
.finish()
}
}
// ===== impl SendError =====
impl<T> SendError<T> {
pub fn into_inner(self) -> T {
match self {
SendError::NoReceiver(v) |
SendError::Rejected(v) => v
}
}
}

110
proto/common/common.proto Normal file
View File

@ -0,0 +1,110 @@
syntax = "proto3";
package conduit.common;
import "google/protobuf/duration.proto";
message HttpMethod {
enum Registered {
GET = 0;
POST = 1;
PUT = 2;
DELETE = 3;
PATCH = 4;
OPTIONS = 5;
CONNECT = 6;
HEAD = 7;
TRACE = 8;
}
oneof type {
Registered registered = 1;
string unregistered = 2;
}
}
message Scheme {
enum Registered {
HTTP = 0;
HTTPS = 1;
}
oneof type {
Registered registered = 1;
string unregistered = 2;
}
}
message IPAddress {
oneof ip {
fixed32 ipv4 = 1;
IPv6 ipv6 = 2;
}
}
message IPv6 {
fixed64 first = 1; // hextets 1-4
fixed64 last = 2; // hextets 5-8
}
message TcpAddress {
IPAddress ip = 1;
uint32 port = 2;
}
message Destination {
string scheme = 1; // such as "DNS" or "K8S"
string path = 2;
}
message TapEvent {
TcpAddress source = 1;
TcpAddress target = 2;
oneof event {
Http http = 3;
}
message Http {
oneof event {
RequestInit request_init = 1;
ResponseInit response_init = 2;
ResponseEnd response_end = 3;
}
message StreamId {
// A randomized base (stable across a process's runtime)
uint32 base = 1;
// A stream id unique within the lifetime of `base`.
uint64 stream = 2;
}
message RequestInit {
StreamId id = 1;
HttpMethod method = 2;
Scheme scheme = 3;
string authority = 4;
string path = 5;
// TODO headers
}
message ResponseInit {
StreamId id = 1;
google.protobuf.Duration since_request_init = 2;
uint32 http_status = 3;
}
message ResponseEnd {
StreamId id = 1;
google.protobuf.Duration since_request_init = 2;
google.protobuf.Duration since_response_init = 3;
uint64 response_bytes = 4;
uint32 grpc_status = 5;
}
}
}

View File

@ -0,0 +1,10 @@
syntax = "proto3";
package conduit.controller.tap;
import "common/common.proto";
import "public/api.proto";
service Tap {
rpc Tap(public.TapRequest) returns (stream common.TapEvent) {}
}

View File

@ -0,0 +1,37 @@
syntax = "proto3";
package conduit.controller.telemetry;
import "public/api.proto";
/// Telemetry Service ///
//
// Internal Telemetry Service read API
service Telemetry {
rpc Query(QueryRequest) returns (QueryResponse) {}
rpc ListPods(ListPodsRequest) returns (public.ListPodsResponse) {}
}
message QueryRequest {
string query = 1;
int64 start_ms = 2;
int64 end_ms = 3;
string step = 4;
}
message QueryResponse {
repeated Sample metrics = 1;
}
message Sample {
repeated SampleValue values = 1;
map<string, string> labels = 2;
}
message SampleValue {
double value = 1;
int64 timestamp_ms = 2;
}
message ListPodsRequest {}

View File

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View File

@ -0,0 +1,44 @@
syntax = "proto3";
package conduit.proxy.destination;
import "common/common.proto";
/// Destination Service ///
//
// This is the service discovery API. Given a destination, this returns a
// weighted set of addresses and address metadata. Can be implemented with DNS
// or lookups against other service discovery backends.
//
// The controller is expected to send an Update every time there is a
// change in service discovery. The controller is also expected to send an
// update at least once every ADDRESS_UPDATE_INTERVAL to indicate that the
// controller is still healthy. If no service discovery updates have taken
// place, the controller can simply send an empty `add`. The controller may
// determine the value of ADDRESS_UPDATE_INTERVAL.
service Destination {
// Given a destination, return all addresses in that destination as a long-
// running stream of updates.
rpc Get(common.Destination) returns (stream Update) {}
}
message Update {
oneof update {
WeightedAddrSet add = 1;
AddrSet remove = 2;
}
}
message AddrSet {
repeated common.TcpAddress addrs = 1;
}
message WeightedAddrSet {
repeated WeightedAddr addrs = 1;
}
message WeightedAddr {
common.TcpAddress addr = 1;
uint32 weight = 3;
}

75
proto/proxy/tap/tap.proto Normal file
View File

@ -0,0 +1,75 @@
syntax = "proto3";
package conduit.proxy.tap;
import "common/common.proto";
// A service exposed by proxy instances to setup
service Tap {
rpc Observe(ObserveRequest) returns (stream common.TapEvent) {}
}
message ObserveRequest {
// Limits the number of event keys that will be returned by this tap.
uint32 limit = 1;
// Encodes request-matching logic.
Match match = 2;
message Match {
message Seq {
repeated Match matches = 1;
}
oneof match {
Seq all = 1;
Seq any = 2;
Match not = 3;
Tcp source = 4;
Tcp destination = 5;
Http http = 6;
}
message Tcp {
oneof match {
Netmask netmask = 1;
PortRange ports = 3;
}
message Netmask {
common.IPAddress ip = 1;
uint32 mask =2;
}
// If either a minimum or maximum is not specified, the range is considered to be
// over a discrete value.
message PortRange {
// Minimum matching port value (inclusive), if specified.
uint32 min = 1;
// Maximum matching port value (inclusive), if specified.
uint32 max = 2;
}
}
message Http {
oneof match {
common.Scheme scheme = 1;
common.HttpMethod method = 3;
StringMatch authority = 2;
StringMatch path = 4;
// TODO Header header = 4;
}
message StringMatch {
oneof match {
string exact = 1;
string prefix = 2;
}
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More