Adds kapply and kstatus packages

This commit is contained in:
Sean Sullivan 2020-01-29 13:42:40 -08:00
parent cca93bf6d9
commit c3a47147a7
32 changed files with 7131 additions and 0 deletions

18
go.mod Normal file
View File

@ -0,0 +1,18 @@
module sigs.k8s.io/cli-utils
go 1.13
require (
github.com/ghodss/yaml v1.0.0
github.com/go-errors/errors v1.0.1
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v0.0.5
github.com/stretchr/testify v1.4.0
k8s.io/api v0.17.2
k8s.io/apimachinery v0.17.2
k8s.io/cli-runtime v0.17.2
k8s.io/client-go v0.17.2
k8s.io/kubectl v0.0.0-20191219154910-1528d4eea6dd
sigs.k8s.io/controller-runtime v0.4.0
sigs.k8s.io/yaml v1.1.0
)

468
go.sum Normal file
View File

@ -0,0 +1,468 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48=
k8s.io/api v0.0.0-20191214185829-ca1d04f8b0d3/go.mod h1:itOjKREfmUTvcjantxOsyYU5mbFsU7qUnyUuRfF5+5M=
k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc=
k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
k8s.io/apimachinery v0.0.0-20191214185652-442f8fb2f03a/go.mod h1:Ng1IY8TS7sC44KJxT/WUR6qFRfWwahYYYpNXyYRKOCY=
k8s.io/apimachinery v0.0.0-20191216025728-0ee8b4573e3a/go.mod h1:Ng1IY8TS7sC44KJxT/WUR6qFRfWwahYYYpNXyYRKOCY=
k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4=
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/cli-runtime v0.0.0-20191214191754-e6dc6d5c8724/go.mod h1:wzlq80lvjgHW9if6MlE4OIGC86MDKsy5jtl9nxz/IYY=
k8s.io/cli-runtime v0.17.2 h1:YH4txSplyGudvxjhAJeHEtXc7Tr/16clKGfN076ydGk=
k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk=
k8s.io/client-go v0.0.0-20191214190045-a32a6f7a3052/go.mod h1:tAaoc/sYuIL0+njJefSAmE28CIcxyaFV4kbIujBlY2s=
k8s.io/client-go v0.0.0-20191219150334-0b8da7416048/go.mod h1:ZEe8ZASDUAuqVGJ+UN0ka0PfaR+b6a6E1PGsSNZRui8=
k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc=
k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE=
k8s.io/code-generator v0.0.0-20191214185510-0b9b3c99f9f2/go.mod h1:BjGKcoq1MRUmcssvHiSxodCco1T6nVIt4YeCT5CMSao=
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
k8s.io/component-base v0.0.0-20191214190519-d868452632e2 h1:u+imIMbplT23aBROe/dox39S4BJv75bI/irS0Vr0fqY=
k8s.io/component-base v0.0.0-20191214190519-d868452632e2/go.mod h1:wupxkh1T/oUDqyTtcIjiEfpbmIHGm8By/vqpSKC6z8c=
k8s.io/component-base v0.17.2 h1:0XHf+cerTvL9I5Xwn9v+0jmqzGAZI7zNydv4tL6Cw6A=
k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubectl v0.0.0-20191219154910-1528d4eea6dd h1:nZX5+wEqTu/EBIYjrZlFOA63z4+Zcy96lDkCZPU9a9c=
k8s.io/kubectl v0.0.0-20191219154910-1528d4eea6dd/go.mod h1:9ehGcuUGjXVZh0qbYSB0vvofQw2JQe6c6cO0k4wu/Oo=
k8s.io/kubectl v0.17.2 h1:QZR8Q6lWiVRjwKslekdbN5WPMp53dS/17j5e+oi5XVU=
k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
k8s.io/metrics v0.0.0-20191214191643-6b1944c9f765/go.mod h1:5V7rewilItwK0cz4nomU0b3XCcees2Ka5EBYWS1HBeM=
k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg=
sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=

205
pkg/apply/applier.go Normal file
View File

@ -0,0 +1,205 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"context"
"time"
"github.com/go-errors/errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/kubectl/pkg/cmd/apply"
"k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/cli-utils/pkg/kstatus/wait"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// newApplier returns a new Applier. It will set up the applyOptions and
// statusOptions which are responsible for capturing any command line flags.
// It currently requires IOStreams, but this is a legacy from when
// the ApplyOptions were responsible for printing progress. This is now
// handled by a separate printer with the KubectlPrinterAdapter bridging
// between the two.
func newApplier(factory util.Factory, ioStreams genericclioptions.IOStreams) *Applier {
return &Applier{
applyOptions: apply.NewApplyOptions(ioStreams),
statusOptions: NewStatusOptions(),
factory: factory,
ioStreams: ioStreams,
}
}
// resolver defines the interface the applier needs to observe status for resources.
type resolver interface {
WaitForStatusOfObjects(ctx context.Context, objects []wait.KubernetesObject) <-chan wait.Event
}
// Applier performs the step of applying a set of resources into a cluster,
// conditionally waits for all of them to be fully reconciled and finally
// performs prune to clean up any resources that has been deleted.
type Applier struct {
factory util.Factory
ioStreams genericclioptions.IOStreams
applyOptions *apply.ApplyOptions
statusOptions *StatusOptions
resolver resolver
}
// Initialize sets up the Applier for actually doing an apply against
// a cluster. This involves validating command line inputs and configuring
// clients for communicating with the cluster.
func (a *Applier) Initialize(cmd *cobra.Command) error {
a.applyOptions.PreProcessorFn = PrependGroupingObject(a.applyOptions)
err := a.applyOptions.Complete(a.factory, cmd)
if err != nil {
return errors.WrapPrefix(err, "error setting up ApplyOptions", 1)
}
// Default PostProcessor is configured in "Complete" function,
// so the prune must happen after "Complete".
a.applyOptions.PostProcessorFn = prune(a.factory, a.applyOptions)
resolver, err := a.newResolver(a.statusOptions.period)
if err != nil {
return errors.WrapPrefix(err, "error creating resolver", 1)
}
a.resolver = resolver
return nil
}
// SetFlags configures the command line flags needed for apply and
// status. This is a temporary solution as we should separate the configuration
// of cobra flags from the Applier.
func (a *Applier) SetFlags(cmd *cobra.Command) {
a.applyOptions.DeleteFlags.AddFlags(cmd)
a.applyOptions.RecordFlags.AddFlags(cmd)
a.applyOptions.PrintFlags.AddFlags(cmd)
a.statusOptions.AddFlags(cmd)
a.applyOptions.Overwrite = true
}
// newResolver sets up a new Resolver for computing status. The configuration
// needed for the resolver is taken from the Factory.
func (a *Applier) newResolver(pollInterval time.Duration) (*wait.Resolver, error) {
config, err := a.factory.ToRESTConfig()
if err != nil {
return nil, errors.WrapPrefix(err, "error getting RESTConfig", 1)
}
mapper, err := a.factory.ToRESTMapper()
if err != nil {
return nil, errors.WrapPrefix(err, "error getting RESTMapper", 1)
}
c, err := client.New(config, client.Options{Scheme: scheme.Scheme, Mapper: mapper})
if err != nil {
return nil, errors.WrapPrefix(err, "error creating client", 1)
}
return wait.NewResolver(c, mapper, pollInterval), nil
}
// Run performs the Apply step. This happens asynchronously with updates
// on progress and any errors are reported back on the event channel.
// Cancelling the operation or setting timeout on how long to wait
// for it complete can be done with the passed in context.
// Note: There sn't currently any way to interrupt the operation
// before all the given resources have been applied to the cluster. Any
// cancellation or timeout will only affect how long we wait for the
// resources to become current.
func (a *Applier) Run(ctx context.Context) <-chan Event {
ch := make(chan Event)
go func() {
defer close(ch)
adapter := &KubectlPrinterAdapter{
ch: ch,
}
// The adapter is used to intercept what is meant to be printing
// in the ApplyOptions, and instead turn those into events.
a.applyOptions.ToPrinter = adapter.toPrinterFunc()
// This provides us with a slice of all the objects that will be
// applied to the cluster.
infos, _ := a.applyOptions.GetObjects()
err := a.applyOptions.Run()
if err != nil {
// If we see an error here we just report it on the channel and then
// give up. Eventually we might be able to determine which errors
// are fatal and which might allow us to continue.
ch <- Event{
EventType: ErrorEventType,
ErrorEvent: ErrorEvent{
Err: errors.WrapPrefix(err, "error applying resources", 1),
},
}
return
}
if a.statusOptions.wait {
statusChannel := a.resolver.WaitForStatusOfObjects(ctx, infosToObjects(infos))
// As long as the statusChannel remains open, we take every statusEvent,
// wrap it in an Event and send it on the channel.
for statusEvent := range statusChannel {
ch <- Event{
EventType: StatusEventType,
StatusEvent: statusEvent,
}
}
}
}()
return ch
}
func infosToObjects(infos []*resource.Info) []wait.KubernetesObject {
var objects []wait.KubernetesObject
for _, info := range infos {
u := info.Object.(*unstructured.Unstructured)
objects = append(objects, u)
}
return objects
}
// EventType determines the type of events that are available.
type EventType string
const (
ErrorEventType EventType = "error"
ApplyEventType EventType = "apply"
StatusEventType EventType = "status"
)
// Event is the type of the objects that will be returned through
// the channel that is returned from a call to Run. It contains
// information about progress and errors encountered during
// the process of doing apply, waiting for status and doing a prune.
type Event struct {
// EventType is the type of event.
EventType EventType
// ErrorEvent contains information about any errors encountered.
ErrorEvent ErrorEvent
// ApplyEvent contains information about progress pertaining to
// applying a resource to the cluster.
ApplyEvent ApplyEvent
// StatusEvents contains information about the status of one of
// the applied resources.
StatusEvent wait.Event
}
type ErrorEvent struct {
Err error
}
type ApplyEvent struct {
Operation string
Object runtime.Object
}

View File

@ -0,0 +1,63 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"sigs.k8s.io/cli-utils/pkg/kstatus/wait"
)
// BasicPrinter is a simple implementation that just prints the events
// from the channel in the default format for kubectl.
// We need to support different printers for different output formats.
type BasicPrinter struct {
ioStreams genericclioptions.IOStreams
}
// Print outputs the events from the provided channel in a simple
// format on StdOut. As we support other printer implementations
// this should probably be an interface.
// This function will block until the channel is closed.
func (b *BasicPrinter) Print(ch <-chan Event) {
for event := range ch {
switch event.EventType {
case ErrorEventType:
cmdutil.CheckErr(event.ErrorEvent.Err)
case ApplyEventType:
obj := event.ApplyEvent.Object
gvk := obj.GetObjectKind().GroupVersionKind()
name := "<unknown>"
if acc, err := meta.Accessor(obj); err == nil {
if n := acc.GetName(); len(n) > 0 {
name = n
}
}
fmt.Fprintf(b.ioStreams.Out, "%s %s\n", resourceIdToString(gvk.GroupKind(), name), event.ApplyEvent.Operation)
case StatusEventType:
statusEvent := event.StatusEvent
switch statusEvent.Type {
case wait.ResourceUpdate:
id := statusEvent.EventResource.ResourceIdentifier
gk := id.GroupKind
fmt.Fprintf(b.ioStreams.Out, "%s is %s: %s\n", resourceIdToString(gk, id.Name), statusEvent.EventResource.Status.String(), statusEvent.EventResource.Message)
case wait.Completed:
fmt.Fprint(b.ioStreams.Out, "all resources has reached the Current status\n")
case wait.Aborted:
fmt.Fprintf(b.ioStreams.Out, "resources failed to the reached Current status\n")
}
}
}
}
// resourceIdToString returns the string representation of a GroupKind and a resource name.
func resourceIdToString(gk schema.GroupKind, name string) string {
return fmt.Sprintf("%s/%s", strings.ToLower(gk.String()), name)
}

159
pkg/apply/commands.go Normal file
View File

@ -0,0 +1,159 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"context"
"flag"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/kubectl/pkg/cmd/apply"
"k8s.io/kubectl/pkg/cmd/diff"
"k8s.io/kubectl/pkg/cmd/util"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
)
// GetCommand returns a command from kubectl to install
func GetCommand(parent *cobra.Command) *cobra.Command {
// if !commandutil.GetAlphaEnabled() {
// return &cobra.Command{
// Use: "resources",
// Short: "[Alpha] To enable set KUSTOMIZE_ENABLE_ALPHA_COMMANDS=true",
// Long: "[Alpha] To enable set KUSTOMIZE_ENABLE_ALPHA_COMMANDS=true",
// }
// }
r := &cobra.Command{
Use: "resources",
Short: "[Alpha] Perform cluster operations using declarative configuration",
Long: "[Alpha] Perform cluster operations using declarative configuration",
}
// configure kubectl dependencies and flags
flags := r.Flags()
kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
kubeConfigFlags.AddFlags(flags)
matchVersionKubeConfigFlags := util.NewMatchVersionFlags(kubeConfigFlags)
matchVersionKubeConfigFlags.AddFlags(r.PersistentFlags())
r.PersistentFlags().AddGoFlagSet(flag.CommandLine)
f := util.NewFactory(matchVersionKubeConfigFlags)
var ioStreams genericclioptions.IOStreams
if parent != nil {
ioStreams.In = parent.InOrStdin()
ioStreams.Out = parent.OutOrStdout()
ioStreams.ErrOut = parent.ErrOrStderr()
} else {
ioStreams.In = os.Stdin
ioStreams.Out = os.Stdout
ioStreams.ErrOut = os.Stderr
}
names := []string{"apply", "diff"}
applyCmd := NewCmdApply("kustomize", f, ioStreams)
updateHelp(names, applyCmd)
diffCmd := diff.NewCmdDiff(f, ioStreams)
updateHelp(names, diffCmd)
r.AddCommand(applyCmd, diffCmd)
return r
}
// updateHelp replaces `kubectl` help messaging with `kustomize` help messaging
func updateHelp(names []string, c *cobra.Command) {
for i := range names {
name := names[i]
c.Short = strings.ReplaceAll(c.Short, "kubectl "+name, "kustomize "+name)
c.Long = strings.ReplaceAll(c.Long, "kubectl "+name, "kustomize "+name)
c.Example = strings.ReplaceAll(c.Example, "kubectl "+name, "kustomize "+name)
}
}
// NewCmdApply creates the `apply` command
func NewCmdApply(baseName string, f util.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
applier := newApplier(f, ioStreams)
printer := &BasicPrinter{
ioStreams: ioStreams,
}
cmd := &cobra.Command{
Use: "apply (-f FILENAME | -k DIRECTORY)",
DisableFlagsInUseLine: true,
Short: i18n.T("Apply a configuration to a resource by filename or stdin"),
//Long: applyLong,
//Example: applyExample,
Args: cobra.MaximumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
// check is kustomize, if so update
applier.applyOptions.DeleteFlags.FileNameFlags.Kustomize = &args[0]
}
cmdutil.CheckErr(applier.Initialize(cmd))
// Create a context with the provided timout from the cobra parameter.
ctx, cancel := context.WithTimeout(context.Background(), applier.statusOptions.timeout)
defer cancel()
// Run the applier. It will return a channel where we can receive updates
// to keep track of progress and any issues.
ch := applier.Run(ctx)
// The printer will print updates from the channel. It will block
// until the channel is closed.
printer.Print(ch)
},
}
applier.SetFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmd.Flags().BoolVar(&applier.applyOptions.ServerDryRun, "server-dry-run", applier.applyOptions.ServerDryRun, "If true, request will be sent to server with dry-run flag, which means the modifications won't be persisted. This is an alpha feature and flag.")
cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it. Warning: --dry-run cannot accurately output the result of merging the local manifest and the server-side data. Use --server-dry-run to get the merged result instead.")
cmdutil.AddServerSideApplyFlags(cmd)
return cmd
}
// PrependGroupingObject orders the objects to apply so the "grouping"
// object stores the inventory, and it is first to be applied.
func PrependGroupingObject(o *apply.ApplyOptions) func() error {
return func() error {
if o == nil {
return fmt.Errorf("ApplyOptions are nil")
}
infos, err := o.GetObjects()
if err != nil {
return err
}
_, exists := findGroupingObject(infos)
if exists {
if err := addInventoryToGroupingObj(infos); err != nil {
return err
}
if !sortGroupingObject(infos) {
return err
}
}
return nil
}
}
// Prune deletes previously applied objects that have been
// omitted in the current apply. The previously applied objects
// are reached through ConfigMap grouping objects.
func prune(f util.Factory, o *apply.ApplyOptions) func() error {
return func() error {
po, err := NewPruneOptions(f, o)
if err != nil {
return err
}
return po.Prune()
}
}

View File

@ -0,0 +1,57 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"testing"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/kubectl/pkg/cmd/apply"
)
func TestPrependGroupingObject(t *testing.T) {
tests := []struct {
infos []*resource.Info
}{
{
infos: []*resource.Info{copyGroupingInfo()},
},
{
infos: []*resource.Info{pod1Info, pod3Info, copyGroupingInfo()},
},
{
infos: []*resource.Info{pod1Info, pod2Info, copyGroupingInfo(), pod3Info},
},
}
for _, test := range tests {
applyOptions := createApplyOptions(test.infos)
f := PrependGroupingObject(applyOptions)
err := f()
if err != nil {
t.Errorf("Error running pre-processor callback: %s", err)
}
infos, _ := applyOptions.GetObjects()
if len(test.infos) != len(infos) {
t.Fatalf("Wrong number of objects after prepending grouping object")
}
groupingInfo := infos[0]
if !isGroupingObject(groupingInfo.Object) {
t.Fatalf("First object is not the grouping object")
}
inventory, _ := retrieveInventoryFromGroupingObj(infos)
if len(inventory) != (len(infos) - 1) {
t.Errorf("Wrong number of inventory items stored in grouping object")
}
}
}
// createApplyOptions is a helper function to assemble the ApplyOptions
// with the passed objects (infos).
func createApplyOptions(infos []*resource.Info) *apply.ApplyOptions {
applyOptions := &apply.ApplyOptions{}
applyOptions.SetObjects(infos)
return applyOptions
}

267
pkg/apply/grouping.go Normal file
View File

@ -0,0 +1,267 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"fmt"
"hash/fnv"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/resource"
)
const (
GroupingLabel = "cli-utils.sigs.k8s.io/inventory-id"
GroupingHash = "cli-utils.sigs.k8s.io/inventory-hash"
)
// retrieveGroupingLabel returns the string value of the GroupingLabel
// for the passed object. Returns error if the passed object is nil or
// is not a grouping object.
func retrieveGroupingLabel(obj runtime.Object) (string, error) {
var groupingLabel string
if obj == nil {
return "", fmt.Errorf("Grouping object is nil.\n")
}
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
labels := accessor.GetLabels()
groupingLabel, exists := labels[GroupingLabel]
if !exists {
return "", fmt.Errorf("Grouping label does not exist for grouping object: %s\n", GroupingLabel)
}
return strings.TrimSpace(groupingLabel), nil
}
// isGroupingObject returns true if the passed object has the
// grouping label.
// TODO(seans3): Check type is ConfigMap.
func isGroupingObject(obj runtime.Object) bool {
if obj == nil {
return false
}
groupingLabel, err := retrieveGroupingLabel(obj)
if err == nil && len(groupingLabel) > 0 {
return true
}
return false
}
// findGroupingObject returns the "Grouping" object (ConfigMap with
// grouping label) if it exists, and a boolean describing if it was found.
func findGroupingObject(infos []*resource.Info) (*resource.Info, bool) {
for _, info := range infos {
if info != nil && isGroupingObject(info.Object) {
return info, true
}
}
return nil, false
}
// sortGroupingObject reorders the infos slice to place the grouping
// object in the first position. Returns true if grouping object found,
// false otherwise.
func sortGroupingObject(infos []*resource.Info) bool {
for i, info := range infos {
if info != nil && isGroupingObject(info.Object) {
// If the grouping object is not already in the first position,
// swap the grouping object with the first object.
if i > 0 {
infos[0], infos[i] = infos[i], infos[0]
}
return true
}
}
return false
}
// Adds the inventory of all objects (passed as infos) to the
// grouping object. Returns an error if a grouping object does not
// exist, or we are unable to successfully add the inventory to
// the grouping object; nil otherwise. Each object is in
// unstructured.Unstructured format.
func addInventoryToGroupingObj(infos []*resource.Info) error {
// Iterate through the objects (infos), creating an Inventory struct
// as metadata for each object, or if it's the grouping object, store it.
var groupingInfo *resource.Info
var groupingObj *unstructured.Unstructured
inventoryMap := map[string]string{}
for _, info := range infos {
obj := info.Object
if isGroupingObject(obj) {
// If we have more than one grouping object--error.
if groupingObj != nil {
return fmt.Errorf("Error--applying more than one grouping object.")
}
var ok bool
groupingObj, ok = obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("Grouping object is not an Unstructured: %#v", groupingObj)
}
groupingInfo = info
} else {
if obj == nil {
return fmt.Errorf("Creating inventory; object is nil")
}
gk := obj.GetObjectKind().GroupVersionKind().GroupKind()
inventory, err := createInventory(info.Namespace, info.Name, gk)
if err != nil {
return err
}
inventoryMap[inventory.String()] = ""
}
}
// If we've found the grouping object, store the object metadata inventory
// in the grouping config map.
if groupingObj == nil {
return fmt.Errorf("Grouping object not found")
}
if len(inventoryMap) > 0 {
// Adds the inventory map to the ConfigMap "data" section.
err := unstructured.SetNestedStringMap(groupingObj.UnstructuredContent(),
inventoryMap, "data")
if err != nil {
return err
}
// Adds the hash of the inventory strings as an annotation to the
// grouping object. Inventory strings must be sorted to make hash
// deterministic.
inventoryList := mapKeysToSlice(inventoryMap)
sort.Strings(inventoryList)
invHash, err := calcInventoryHash(inventoryList)
if err != nil {
return err
}
// Add the hash as a suffix to the grouping object's name.
invHashStr := strconv.FormatUint(uint64(invHash), 16)
if err := addSuffixToName(groupingInfo, invHashStr); err != nil {
return err
}
annotations := groupingObj.GetAnnotations()
if annotations == nil {
annotations = map[string]string{}
}
annotations[GroupingHash] = invHashStr
groupingObj.SetAnnotations(annotations)
}
return nil
}
// retrieveInventoryFromGroupingObj returns a slice of pointers to the
// inventory metadata. This function finds the grouping object, then
// parses the stored resource metadata into Inventory structs. Returns
// an error if there is a problem parsing the data into Inventory
// structs, or if the grouping object is not in Unstructured format; nil
// otherwise. If a grouping object does not exist, or it does not have a
// "data" map, then returns an empty slice and no error.
func retrieveInventoryFromGroupingObj(infos []*resource.Info) ([]*Inventory, error) {
inventory := []*Inventory{}
groupingInfo, exists := findGroupingObject(infos)
if exists {
groupingObj, ok := groupingInfo.Object.(*unstructured.Unstructured)
if !ok {
err := fmt.Errorf("Grouping object is not an Unstructured: %#v", groupingObj)
return inventory, err
}
invMap, exists, err := unstructured.NestedStringMap(groupingObj.Object, "data")
if err != nil {
err := fmt.Errorf("Error retrieving inventory from grouping object.")
return inventory, err
}
if exists {
for invStr := range invMap {
inv, err := parseInventory(invStr)
if err != nil {
return inventory, err
}
inventory = append(inventory, inv)
}
}
}
return inventory, nil
}
// calcInventoryHash returns an unsigned int32 representing the hash
// of the inventory strings. If there is an error writing bytes to
// the hash, then the error is returned; nil is returned otherwise.
// Used to quickly identify the set of resources in the grouping object.
func calcInventoryHash(inv []string) (uint32, error) {
h := fnv.New32a()
for _, is := range inv {
_, err := h.Write([]byte(is))
if err != nil {
return uint32(0), err
}
}
return h.Sum32(), nil
}
// retrieveInventoryHash takes a grouping object (encapsulated by
// a resource.Info), and returns the string representing the hash
// of the grouping inventory; returns empty string if the grouping
// object is not in Unstructured format, or if the hash annotation
// does not exist.
func retrieveInventoryHash(groupingInfo *resource.Info) string {
var invHash = ""
groupingObj, ok := groupingInfo.Object.(*unstructured.Unstructured)
if ok {
annotations := groupingObj.GetAnnotations()
if annotations != nil {
invHash = annotations[GroupingHash]
}
}
return invHash
}
// mapKeysToSlice returns the map keys as a slice of strings.
func mapKeysToSlice(m map[string]string) []string {
s := make([]string, len(m))
i := 0
for k := range m {
s[i] = k
i++
}
return s
}
// addSuffixToName adds the passed suffix (usually a hash) as a suffix
// to the name of the passed object stored in the Info struct. Returns
// an error if the object is not "*unstructured.Unstructured" or if the
// name stored in the object differs from the name in the Info struct.
func addSuffixToName(info *resource.Info, suffix string) error {
if info == nil {
return fmt.Errorf("Nil resource.Info")
}
suffix = strings.TrimSpace(suffix)
if len(suffix) == 0 {
return fmt.Errorf("Passed empty suffix")
}
accessor, _ := meta.Accessor(info.Object)
name := accessor.GetName()
if name != info.Name {
return fmt.Errorf("Grouping object (%s) and resource.Info (%s) have different names\n", name, info.Name)
}
// Error if name alread has suffix.
suffix = "-" + suffix
if strings.HasSuffix(name, suffix) {
return fmt.Errorf("Name already has suffix: %s\n", name)
}
name += suffix
accessor.SetName(name)
info.Name = name
return nil
}

599
pkg/apply/grouping_test.go Normal file
View File

@ -0,0 +1,599 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"fmt"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/resource"
)
var testNamespace = "test-grouping-namespace"
var groupingObjName = "test-grouping-obj"
var pod1Name = "pod-1"
var pod2Name = "pod-2"
var pod3Name = "pod-3"
var testGroupingLabel = "test-app-label"
var groupingObj = unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": map[string]interface{}{
"name": groupingObjName,
"namespace": testNamespace,
"labels": map[string]interface{}{
GroupingLabel: testGroupingLabel,
},
},
},
}
var pod1 = unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"name": pod1Name,
"namespace": testNamespace,
},
},
}
var pod1Info = &resource.Info{
Namespace: testNamespace,
Name: pod1Name,
Object: &pod1,
}
var pod2 = unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"name": pod2Name,
"namespace": testNamespace,
},
},
}
var pod2Info = &resource.Info{
Namespace: testNamespace,
Name: pod2Name,
Object: &pod2,
}
var pod3 = unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"name": pod3Name,
"namespace": testNamespace,
},
},
}
var pod3Info = &resource.Info{
Namespace: testNamespace,
Name: pod3Name,
Object: &pod3,
}
var nonUnstructuredGroupingObj = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: groupingObjName,
Labels: map[string]string{
GroupingLabel: "true",
},
},
}
var nonUnstructuredGroupingInfo = &resource.Info{
Namespace: testNamespace,
Name: groupingObjName,
Object: nonUnstructuredGroupingObj,
}
var nilInfo = &resource.Info{
Namespace: testNamespace,
Name: groupingObjName,
Object: nil,
}
var groupingObjLabelWithSpace = unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": map[string]interface{}{
"name": groupingObjName,
"namespace": testNamespace,
"labels": map[string]interface{}{
GroupingLabel: "\tgrouping-label ",
},
},
},
}
func TestRetrieveGroupingLabel(t *testing.T) {
tests := []struct {
obj runtime.Object
groupingLabel string
isError bool
}{
// Nil grouping object throws error.
{
obj: nil,
groupingLabel: "",
isError: true,
},
// Pod is not a grouping object.
{
obj: &pod2,
groupingLabel: "",
isError: true,
},
// Retrieves label without preceding/trailing whitespace.
{
obj: &groupingObjLabelWithSpace,
groupingLabel: "grouping-label",
isError: false,
},
{
obj: &groupingObj,
groupingLabel: testGroupingLabel,
isError: false,
},
}
for _, test := range tests {
actual, err := retrieveGroupingLabel(test.obj)
if test.isError && err == nil {
t.Errorf("Did not receive expected error.\n")
}
if !test.isError {
if err != nil {
t.Fatalf("Received unexpected error: %s\n", err)
}
if test.groupingLabel != actual {
t.Errorf("Expected grouping label (%s), got (%s)\n", test.groupingLabel, actual)
}
}
}
}
func TestIsGroupingObject(t *testing.T) {
tests := []struct {
obj runtime.Object
isGrouping bool
}{
{
obj: nil,
isGrouping: false,
},
{
obj: &groupingObj,
isGrouping: true,
},
{
obj: &pod2,
isGrouping: false,
},
}
for _, test := range tests {
grouping := isGroupingObject(test.obj)
if test.isGrouping && !grouping {
t.Errorf("Grouping object not identified: %#v", test.obj)
}
if !test.isGrouping && grouping {
t.Errorf("Non-grouping object identifed as grouping obj: %#v", test.obj)
}
}
}
func TestFindGroupingObject(t *testing.T) {
tests := []struct {
infos []*resource.Info
exists bool
name string
}{
{
infos: []*resource.Info{},
exists: false,
name: "",
},
{
infos: []*resource.Info{nil},
exists: false,
name: "",
},
{
infos: []*resource.Info{copyGroupingInfo()},
exists: true,
name: groupingObjName,
},
{
infos: []*resource.Info{pod1Info},
exists: false,
name: "",
},
{
infos: []*resource.Info{pod1Info, pod2Info, pod3Info},
exists: false,
name: "",
},
{
infos: []*resource.Info{pod1Info, pod2Info, copyGroupingInfo(), pod3Info},
exists: true,
name: groupingObjName,
},
}
for _, test := range tests {
groupingObj, found := findGroupingObject(test.infos)
if test.exists && !found {
t.Errorf("Should have found grouping object")
}
if !test.exists && found {
t.Errorf("Grouping object found, but it does not exist: %#v", groupingObj)
}
if test.exists && found && test.name != groupingObj.Name {
t.Errorf("Grouping object name does not match: %s/%s", test.name, groupingObj.Name)
}
}
}
func TestSortGroupingObject(t *testing.T) {
tests := []struct {
infos []*resource.Info
sorted bool
}{
{
infos: []*resource.Info{},
sorted: false,
},
{
infos: []*resource.Info{copyGroupingInfo()},
sorted: true,
},
{
infos: []*resource.Info{pod1Info},
sorted: false,
},
{
infos: []*resource.Info{pod1Info, pod2Info},
sorted: false,
},
{
infos: []*resource.Info{copyGroupingInfo(), pod1Info},
sorted: true,
},
{
infos: []*resource.Info{pod1Info, copyGroupingInfo()},
sorted: true,
},
{
infos: []*resource.Info{pod1Info, pod2Info, copyGroupingInfo(), pod3Info},
sorted: true,
},
{
infos: []*resource.Info{pod1Info, pod2Info, pod3Info, copyGroupingInfo()},
sorted: true,
},
{
infos: []*resource.Info{copyGroupingInfo(), pod1Info, pod2Info, pod3Info},
sorted: true,
},
}
for _, test := range tests {
wasSorted := sortGroupingObject(test.infos)
if wasSorted && !test.sorted {
t.Errorf("Grouping object was sorted, but it shouldn't have been")
}
if !wasSorted && test.sorted {
t.Errorf("Grouping object was NOT sorted, but it should have been")
}
if wasSorted {
first := test.infos[0]
if !isGroupingObject(first.Object) {
t.Errorf("Grouping object was not sorted into first position")
}
}
}
}
func TestAddRetrieveInventoryToFromGroupingObject(t *testing.T) {
tests := []struct {
infos []*resource.Info
expected []*Inventory
isError bool
}{
// No grouping object is an error.
{
infos: []*resource.Info{},
isError: true,
},
// No grouping object is an error.
{
infos: []*resource.Info{pod1Info, pod2Info},
isError: true,
},
// Grouping object without other objects is OK.
{
infos: []*resource.Info{copyGroupingInfo(), nilInfo},
isError: true,
},
{
infos: []*resource.Info{nonUnstructuredGroupingInfo},
isError: true,
},
{
infos: []*resource.Info{copyGroupingInfo()},
expected: []*Inventory{},
isError: false,
},
// More than one grouping object is an error.
{
infos: []*resource.Info{copyGroupingInfo(), copyGroupingInfo()},
expected: []*Inventory{},
isError: true,
},
// More than one grouping object is an error.
{
infos: []*resource.Info{copyGroupingInfo(), pod1Info, copyGroupingInfo()},
expected: []*Inventory{},
isError: true,
},
// Basic test case: one grouping object, one pod.
{
infos: []*resource.Info{copyGroupingInfo(), pod1Info},
expected: []*Inventory{
&Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
},
isError: false,
},
{
infos: []*resource.Info{pod1Info, copyGroupingInfo()},
expected: []*Inventory{
&Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
},
isError: false,
},
{
infos: []*resource.Info{pod1Info, pod2Info, copyGroupingInfo(), pod3Info},
expected: []*Inventory{
&Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod2Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod3Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
},
isError: false,
},
{
infos: []*resource.Info{pod1Info, pod2Info, pod3Info, copyGroupingInfo()},
expected: []*Inventory{
&Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod2Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod3Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
},
isError: false,
},
{
infos: []*resource.Info{copyGroupingInfo(), pod1Info, pod2Info, pod3Info},
expected: []*Inventory{
&Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod2Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
&Inventory{
Namespace: testNamespace,
Name: pod3Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
},
},
isError: false,
},
}
for _, test := range tests {
err := addInventoryToGroupingObj(test.infos)
if test.isError && err == nil {
t.Errorf("Should have produced an error, but returned none.")
}
if !test.isError {
if err != nil {
t.Fatalf("Received error when expecting none (%s)\n", err)
}
retrieved, err := retrieveInventoryFromGroupingObj(test.infos)
if err != nil {
t.Fatalf("Error retrieving inventory: %s\n", err)
}
if len(test.expected) != len(retrieved) {
t.Errorf("Expected inventory for %d resources, actual %d",
len(test.expected), len(retrieved))
}
for _, expected := range test.expected {
found := false
for _, actual := range retrieved {
if expected.Equals(actual) {
found = true
continue
}
}
if !found {
t.Errorf("Expected inventory (%s) not found", expected)
}
}
// If the grouping object has an inventory, check the
// grouping object has an inventory hash.
groupingInfo, exists := findGroupingObject(test.infos)
if exists && len(test.expected) > 0 {
invHash := retrieveInventoryHash(groupingInfo)
if len(invHash) == 0 {
t.Errorf("Grouping object missing inventory hash")
}
}
}
}
}
func TestAddSuffixToName(t *testing.T) {
tests := []struct {
info *resource.Info
suffix string
expected string
isError bool
}{
// Nil info should return error.
{
info: nil,
suffix: "",
expected: "",
isError: true,
},
// Empty suffix should return error.
{
info: copyGroupingInfo(),
suffix: "",
expected: "",
isError: true,
},
// Empty suffix should return error.
{
info: copyGroupingInfo(),
suffix: " \t",
expected: "",
isError: true,
},
{
info: copyGroupingInfo(),
suffix: "hashsuffix",
expected: groupingObjName + "-hashsuffix",
isError: false,
},
}
for _, test := range tests {
//t.Errorf("%#v [%s]", test.info, test.suffix)
err := addSuffixToName(test.info, test.suffix)
if test.isError {
if err == nil {
t.Errorf("Should have produced an error, but returned none.")
}
}
if !test.isError {
if err != nil {
t.Fatalf("Received error when expecting none (%s)\n", err)
}
actualName, err := getObjectName(test.info.Object)
if err != nil {
t.Fatalf("Error getting object name: %s", err)
}
if actualName != test.info.Name {
t.Errorf("Object name (%s) does not match info name (%s)\n", actualName, test.info.Name)
}
if test.expected != actualName {
t.Errorf("Expected name (%s), got (%s)\n", test.expected, actualName)
}
}
}
}
func getObjectName(obj runtime.Object) (string, error) {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return "", fmt.Errorf("Grouping object is not Unstructured format")
}
return u.GetName(), nil
}
func copyGroupingInfo() *resource.Info {
groupingObjCopy := groupingObj.DeepCopy()
var groupingInfo = &resource.Info{
Namespace: testNamespace,
Name: groupingObjName,
Object: groupingObjCopy,
}
return groupingInfo
}

192
pkg/apply/inventory.go Normal file
View File

@ -0,0 +1,192 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"fmt"
"sort"
"strings"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Separates inventory fields. This string is allowable as a
// ConfigMap key, but it is not allowed as a character in
// resource name.
const fieldSeparator = "_"
// Inventory organizes and stores the indentifying information
// for an object. This struct (as a string) is stored in a
// grouping object to keep track of sets of applied objects.
type Inventory struct {
Namespace string
Name string
GroupKind schema.GroupKind
}
// createInventory returns a pointer to an Inventory struct filled
// with the passed values. This function validates the passed fields
// and returns an error for bad parameters.
func createInventory(namespace string,
name string, gk schema.GroupKind) (*Inventory, error) {
// Namespace can be empty, but name cannot.
name = strings.TrimSpace(name)
if name == "" {
return nil, fmt.Errorf("Empty name for inventory object")
}
if gk.Empty() {
return nil, fmt.Errorf("Empty GroupKind for inventory object")
}
return &Inventory{
Namespace: strings.TrimSpace(namespace),
Name: name,
GroupKind: gk,
}, nil
}
// parseInventory takes a string, splits it into its five fields,
// and returns a pointer to an Inventory struct storing the
// five fields. Example inventory string:
//
// test-namespace/test-name/apps/v1/ReplicaSet
//
// Returns an error if unable to parse and create the Inventory
// struct.
func parseInventory(inv string) (*Inventory, error) {
parts := strings.Split(inv, fieldSeparator)
if len(parts) == 4 {
gk := schema.GroupKind{
Group: strings.TrimSpace(parts[2]),
Kind: strings.TrimSpace(parts[3]),
}
return createInventory(parts[0], parts[1], gk)
}
return nil, fmt.Errorf("Unable to decode inventory: %s\n", inv)
}
// Equals returns true if the Inventory structs are identical;
// false otherwise.
func (i *Inventory) Equals(other *Inventory) bool {
if other == nil {
return false
}
return i.String() == other.String()
}
// String create a string version of the Inventory struct.
func (i *Inventory) String() string {
return fmt.Sprintf("%s%s%s%s%s%s%s",
i.Namespace, fieldSeparator,
i.Name, fieldSeparator,
i.GroupKind.Group, fieldSeparator,
i.GroupKind.Kind)
}
// InventorySet encapsulates a grouping of unique Inventory
// structs. Organizes the Inventory structs with a map,
// which ensures there are no duplicates. Allows set
// operations such as merging sets and subtracting sets.
type InventorySet struct {
set map[string]*Inventory
}
// NewInventorySet returns a pointer to an InventorySet
// struct grouping the passed Inventory items.
func NewInventorySet(items []*Inventory) *InventorySet {
invSet := InventorySet{set: map[string]*Inventory{}}
invSet.AddItems(items)
return &invSet
}
// GetItems returns the set of pointers to Inventory
// structs.
func (is *InventorySet) GetItems() []*Inventory {
items := []*Inventory{}
for _, item := range is.set {
items = append(items, item)
}
return items
}
// AddItems adds Inventory structs to the set which
// are not already in the set.
func (is *InventorySet) AddItems(items []*Inventory) {
for _, item := range items {
if item != nil {
is.set[item.String()] = item
}
}
}
// DeleteItem removes an Inventory struct from the
// set if it exists in the set. Returns true if the
// Inventory item was deleted, false if it did not exist
// in the set.
func (is *InventorySet) DeleteItem(item *Inventory) bool {
if item == nil {
return false
}
if _, ok := is.set[item.String()]; ok {
delete(is.set, item.String())
return true
}
return false
}
// Merge combines the unique set of Inventory items from the
// current set with the passed "other" set, returning a new
// set or error. Returns an error if the passed set to merge
// is nil.
func (is *InventorySet) Merge(other *InventorySet) (*InventorySet, error) {
if other == nil {
return nil, fmt.Errorf("InventorySet to merge is nil.")
}
// Copy the current InventorySet into result
result := NewInventorySet(is.GetItems())
result.AddItems(other.GetItems())
return result, nil
}
// Subtract removes the Inventory items in the "other" set from the
// current set, returning a new set. This does not modify the current
// set. Returns an error if the passed set to subtract is nil.
func (is *InventorySet) Subtract(other *InventorySet) (*InventorySet, error) {
if other == nil {
return nil, fmt.Errorf("InventorySet to subtract is nil.")
}
// Copy the current InventorySet into result
result := NewInventorySet(is.GetItems())
// Remove each item in "other" which exists in "result"
for _, item := range other.GetItems() {
result.DeleteItem(item)
}
return result, nil
}
// Equals returns true if the "other" inventory set is the same
// as this current inventory set. Relies on the fact that the
// inventory items are sorted for the String() function.
func (is *InventorySet) Equals(other *InventorySet) bool {
if other == nil {
return false
}
return is.String() == other.String()
}
// String returns a string describing set of Inventory structs.
func (is *InventorySet) String() string {
strs := []string{}
for _, item := range is.GetItems() {
strs = append(strs, item.String())
}
sort.Strings(strs)
return strings.Join(strs, ", ")
}
// Size returns the number of Inventory structs in the set.
func (is *InventorySet) Size() int {
return len(is.set)
}

557
pkg/apply/inventory_test.go Normal file
View File

@ -0,0 +1,557 @@
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestCreateInventory(t *testing.T) {
tests := []struct {
namespace string
name string
gk schema.GroupKind
expected string
isError bool
}{
{
namespace: " \n",
name: " test-name\t",
gk: schema.GroupKind{
Group: "apps",
Kind: "ReplicaSet",
},
expected: "_test-name_apps_ReplicaSet",
isError: false,
},
{
namespace: "test-namespace ",
name: " test-name\t",
gk: schema.GroupKind{
Group: "apps",
Kind: "ReplicaSet",
},
expected: "test-namespace_test-name_apps_ReplicaSet",
isError: false,
},
// Error with empty name.
{
namespace: "test-namespace ",
name: " \t",
gk: schema.GroupKind{
Group: "apps",
Kind: "ReplicaSet",
},
expected: "",
isError: true,
},
// Error with empty GroupKind.
{
namespace: "test-namespace",
name: "test-name",
gk: schema.GroupKind{},
expected: "",
isError: true,
},
}
for _, test := range tests {
inv, err := createInventory(test.namespace, test.name, test.gk)
if !test.isError {
if err != nil {
t.Errorf("Error creating inventory when it should have worked.")
} else if test.expected != inv.String() {
t.Errorf("Expected inventory (%s) != created inventory(%s)\n", test.expected, inv.String())
}
}
if test.isError && err == nil {
t.Errorf("Should have returned an error in createInventory()")
}
}
}
func TestInventoryEqual(t *testing.T) {
tests := []struct {
inventory1 *Inventory
inventory2 *Inventory
isEqual bool
}{
// "Other" inventory is nil, then not equal.
{
inventory1: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
inventory2: nil,
isEqual: false,
},
// Two equal inventories without a namespace
{
inventory1: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
inventory2: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
isEqual: true,
},
// Two equal inventories with a namespace
{
inventory1: &Inventory{
Namespace: "test-namespace",
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
inventory2: &Inventory{
Namespace: "test-namespace",
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
isEqual: true,
},
// One inventory with a namespace, one without -- not equal.
{
inventory1: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
inventory2: &Inventory{
Namespace: "test-namespace",
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
isEqual: false,
},
// One inventory with a Deployment, one with a ReplicaSet -- not equal.
{
inventory1: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
inventory2: &Inventory{
Name: "test-inv",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "ReplicaSet",
},
},
isEqual: false,
},
}
for _, test := range tests {
actual := test.inventory1.Equals(test.inventory2)
if test.isEqual && !actual {
t.Errorf("Expected inventories equal, but actual is not: (%s)/(%s)\n", test.inventory1, test.inventory2)
}
}
}
func TestParseInventory(t *testing.T) {
tests := []struct {
invStr string
inventory *Inventory
isError bool
}{
{
invStr: "_test-name_apps_ReplicaSet\t",
inventory: &Inventory{
Name: "test-name",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "ReplicaSet",
},
},
isError: false,
},
{
invStr: "test-namespace_test-name_apps_Deployment",
inventory: &Inventory{
Namespace: "test-namespace",
Name: "test-name",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
},
isError: false,
},
// Not enough fields -- error
{
invStr: "_test-name_apps",
inventory: &Inventory{},
isError: true,
},
}
for _, test := range tests {
actual, err := parseInventory(test.invStr)
if !test.isError {
if err != nil {
t.Errorf("Error parsing inventory when it should have worked.")
} else if !test.inventory.Equals(actual) {
t.Errorf("Expected inventory (%s) != parsed inventory (%s)\n", test.inventory, actual)
}
}
if test.isError && err == nil {
t.Errorf("Should have returned an error in parseInventory()")
}
}
}
var inventory1 = Inventory{
Namespace: "test-namespace",
Name: "test-inv-1",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
}
var inventory2 = Inventory{
Namespace: "test-namespace",
Name: "test-inv-2",
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
}
var inventory3 = Inventory{
Namespace: "test-namespace",
Name: "test-inv-3",
GroupKind: schema.GroupKind{
Group: "",
Kind: "Service",
},
}
var inventory4 = Inventory{
Namespace: "test-namespace",
Name: "test-inv-4",
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "DaemonSet",
},
}
func TestNewInventorySet(t *testing.T) {
tests := []struct {
items []*Inventory
expectedStr string
expectedSize int
}{
{
items: []*Inventory{},
expectedStr: "",
expectedSize: 0,
},
{
items: []*Inventory{&inventory1},
expectedStr: "test-namespace_test-inv-1_apps_Deployment",
expectedSize: 1,
},
{
items: []*Inventory{&inventory1, &inventory2},
expectedStr: "test-namespace_test-inv-1_apps_Deployment, test-namespace_test-inv-2__Pod",
expectedSize: 2,
},
}
for _, test := range tests {
invSet := NewInventorySet(test.items)
actualStr := invSet.String()
actualSize := invSet.Size()
if test.expectedStr != actualStr {
t.Errorf("Expected InventorySet (%s), got (%s)\n", test.expectedStr, actualStr)
}
if test.expectedSize != actualSize {
t.Errorf("Expected InventorySet size (%d), got (%d)\n", test.expectedSize, actualSize)
}
actualItems := invSet.GetItems()
if len(test.items) != len(actualItems) {
t.Errorf("Expected num inventory items (%d), got (%d)\n", len(test.items), len(actualItems))
}
}
}
func TestInventorySetAddItems(t *testing.T) {
tests := []struct {
initialItems []*Inventory
addItems []*Inventory
expectedItems []*Inventory
}{
// Adding no items to empty inventory set.
{
initialItems: []*Inventory{},
addItems: []*Inventory{},
expectedItems: []*Inventory{},
},
// Adding item to empty inventory set.
{
initialItems: []*Inventory{},
addItems: []*Inventory{&inventory1},
expectedItems: []*Inventory{&inventory1},
},
// Adding no items does not change the inventory set
{
initialItems: []*Inventory{&inventory1},
addItems: []*Inventory{},
expectedItems: []*Inventory{&inventory1},
},
// Adding an item which alread exists does not increase size.
{
initialItems: []*Inventory{&inventory1, &inventory2},
addItems: []*Inventory{&inventory1},
expectedItems: []*Inventory{&inventory1, &inventory2},
},
{
initialItems: []*Inventory{&inventory1, &inventory2},
addItems: []*Inventory{&inventory3, &inventory4},
expectedItems: []*Inventory{&inventory1, &inventory2, &inventory3, &inventory4},
},
}
for _, test := range tests {
invSet := NewInventorySet(test.initialItems)
invSet.AddItems(test.addItems)
if len(test.expectedItems) != invSet.Size() {
t.Errorf("Expected num inventory items (%d), got (%d)\n", len(test.expectedItems), invSet.Size())
}
}
}
func TestInventorySetDeleteItem(t *testing.T) {
tests := []struct {
initialItems []*Inventory
deleteItem *Inventory
expected bool
expectedItems []*Inventory
}{
{
initialItems: []*Inventory{},
deleteItem: nil,
expected: false,
expectedItems: []*Inventory{},
},
{
initialItems: []*Inventory{},
deleteItem: &inventory1,
expected: false,
expectedItems: []*Inventory{},
},
{
initialItems: []*Inventory{&inventory2},
deleteItem: &inventory1,
expected: false,
expectedItems: []*Inventory{&inventory2},
},
{
initialItems: []*Inventory{&inventory1},
deleteItem: &inventory1,
expected: true,
expectedItems: []*Inventory{},
},
{
initialItems: []*Inventory{&inventory1, &inventory2},
deleteItem: &inventory1,
expected: true,
expectedItems: []*Inventory{&inventory2},
},
}
for _, test := range tests {
invSet := NewInventorySet(test.initialItems)
actual := invSet.DeleteItem(test.deleteItem)
if test.expected != actual {
t.Errorf("Expected return value (%t), got (%t)\n", test.expected, actual)
}
if len(test.expectedItems) != invSet.Size() {
t.Errorf("Expected num inventory items (%d), got (%d)\n", len(test.expectedItems), invSet.Size())
}
}
}
func TestInventorySetMerge(t *testing.T) {
tests := []struct {
set1 []*Inventory
set2 []*Inventory
merged []*Inventory
}{
{
set1: []*Inventory{},
set2: []*Inventory{},
merged: []*Inventory{},
},
{
set1: []*Inventory{},
set2: []*Inventory{&inventory1},
merged: []*Inventory{&inventory1},
},
{
set1: []*Inventory{&inventory1},
set2: []*Inventory{},
merged: []*Inventory{&inventory1},
},
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory1},
merged: []*Inventory{&inventory1, &inventory2},
},
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory1, &inventory2},
merged: []*Inventory{&inventory1, &inventory2},
},
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory3, &inventory4},
merged: []*Inventory{&inventory1, &inventory2, &inventory3, &inventory4},
},
}
for _, test := range tests {
invSet1 := NewInventorySet(test.set1)
invSet2 := NewInventorySet(test.set2)
expected := NewInventorySet(test.merged)
merged, _ := invSet1.Merge(invSet2)
if expected.Size() != merged.Size() {
t.Errorf("Expected merged inventory set size (%d), got (%d)\n", expected.Size(), merged.Size())
}
}
}
func TestInventorySetSubtract(t *testing.T) {
tests := []struct {
initialItems []*Inventory
subtractItems []*Inventory
expected []*Inventory
}{
{
initialItems: []*Inventory{},
subtractItems: []*Inventory{},
expected: []*Inventory{},
},
{
initialItems: []*Inventory{},
subtractItems: []*Inventory{&inventory1},
expected: []*Inventory{},
},
{
initialItems: []*Inventory{&inventory1},
subtractItems: []*Inventory{},
expected: []*Inventory{&inventory1},
},
{
initialItems: []*Inventory{&inventory1, &inventory2},
subtractItems: []*Inventory{&inventory1},
expected: []*Inventory{&inventory2},
},
{
initialItems: []*Inventory{&inventory1, &inventory2},
subtractItems: []*Inventory{&inventory1, &inventory2},
expected: []*Inventory{},
},
{
initialItems: []*Inventory{&inventory1, &inventory2},
subtractItems: []*Inventory{&inventory3, &inventory4},
expected: []*Inventory{&inventory1, &inventory2},
},
}
for _, test := range tests {
invInitialItems := NewInventorySet(test.initialItems)
invSubtractItems := NewInventorySet(test.subtractItems)
expected := NewInventorySet(test.expected)
actual, _ := invInitialItems.Subtract(invSubtractItems)
if expected.Size() != actual.Size() {
t.Errorf("Expected subtracted inventory set size (%d), got (%d)\n", expected.Size(), actual.Size())
}
}
}
func TestInventorySetEquals(t *testing.T) {
tests := []struct {
set1 []*Inventory
set2 []*Inventory
isEqual bool
}{
{
set1: []*Inventory{},
set2: []*Inventory{&inventory1},
isEqual: false,
},
{
set1: []*Inventory{&inventory1},
set2: []*Inventory{},
isEqual: false,
},
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory1},
isEqual: false,
},
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory3, &inventory4},
isEqual: false,
},
// Empty sets are equal.
{
set1: []*Inventory{},
set2: []*Inventory{},
isEqual: true,
},
{
set1: []*Inventory{&inventory1},
set2: []*Inventory{&inventory1},
isEqual: true,
},
// Ordering of the inventory items does not matter for equality.
{
set1: []*Inventory{&inventory1, &inventory2},
set2: []*Inventory{&inventory2, &inventory1},
isEqual: true,
},
}
for _, test := range tests {
invSet1 := NewInventorySet(test.set1)
invSet2 := NewInventorySet(test.set2)
if !invSet1.Equals(invSet2) && test.isEqual {
t.Errorf("Expected equal inventory sets; got unequal (%s)/(%s)\n", invSet1, invSet2)
}
if invSet1.Equals(invSet2) && !test.isEqual {
t.Errorf("Expected inequal inventory sets; got equal (%s)/(%s)\n", invSet1, invSet2)
}
}
}

View File

@ -0,0 +1,53 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"io"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/printers"
)
// KubectlPrinterAdapter is a workaround for capturing progress from
// ApplyOptions. ApplyOptions were originally meant to print progress
// directly using a configurable printer. The KubectlPrinterAdapter
// plugs into ApplyOptions as a ToPrinter function, but instead of
// printing the info, it emits it as an event on the provided channel.
type KubectlPrinterAdapter struct {
ch chan<- Event
}
// resourcePrinterImpl implements the ResourcePrinter interface. But
// instead of printing, it emits information on the provided channel.
type resourcePrinterImpl struct {
operation string
ch chan<- Event
}
// PrintObj takes the provided object and operation and emits
// it on the channel.
func (r *resourcePrinterImpl) PrintObj(obj runtime.Object, _ io.Writer) error {
r.ch <- Event{
EventType: ApplyEventType,
ApplyEvent: ApplyEvent{
Operation: r.operation,
Object: obj,
},
}
return nil
}
type toPrinterFunc func(string) (printers.ResourcePrinter, error)
// toPrinterFunc returns a function of type toPrinterFunc. This
// is the type required by the ApplyOptions.
func (p *KubectlPrinterAdapter) toPrinterFunc() toPrinterFunc {
return func(operation string) (printers.ResourcePrinter, error) {
return &resourcePrinterImpl{
ch: p.ch,
operation: operation,
}, nil
}
}

View File

@ -0,0 +1,49 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestKubectlPrinterAdapter(t *testing.T) {
ch := make(chan Event)
buffer := bytes.Buffer{}
operation := "operation"
adapter := KubectlPrinterAdapter{
ch: ch,
}
toPrinterFunc := adapter.toPrinterFunc()
resourcePrinter, err := toPrinterFunc(operation)
assert.NoError(t, err)
deployment := appsv1.Deployment{
TypeMeta: v1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: v1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
}
// Need to run this in a separate gorutine since go channels
// are blocking.
go func() {
err = resourcePrinter.PrintObj(&deployment, &buffer)
}()
msg := <-ch
assert.NoError(t, err)
assert.Equal(t, operation, msg.ApplyEvent.Operation)
assert.Equal(t, &deployment, msg.ApplyEvent.Object)
}

257
pkg/apply/prune.go Normal file
View File

@ -0,0 +1,257 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"fmt"
"io"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/kubectl/pkg/cmd/apply"
"k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/validation"
)
// PruneOptions encapsulates the necessary information to
// implement the prune functionality.
type PruneOptions struct {
client dynamic.Interface
builder *resource.Builder
mapper meta.RESTMapper
namespace string
// The currently applied objects (as Infos), including the
// current grouping object. These objects are used to
// calculate the prune set after retreiving the previous
// grouping objects.
currentGroupingObject *resource.Info
// The set of retrieved grouping objects (as Infos) selected
// by the grouping label. This set should also include the
// current grouping object. Stored here to make testing
// easier by manually setting the retrieved grouping infos.
pastGroupingObjects []*resource.Info
retrievedGroupingObjects bool
toPrinter func(string) (printers.ResourcePrinter, error)
out io.Writer
validator validation.Schema
// TODO: DeleteOptions--cascade?
}
// NewPruneOptions returns a struct (PruneOptions) encapsulating the necessary
// information to run the prune. Returns an error if an error occurs
// gathering this information.
// TODO: Add dry-run options.
func NewPruneOptions(f util.Factory, ao *apply.ApplyOptions) (*PruneOptions, error) {
po := &PruneOptions{}
var err error
// Fields copied from ApplyOptions.
po.namespace = ao.Namespace
po.toPrinter = ao.ToPrinter
po.out = ao.Out
// Client/Builder fields from the Factory.
po.client, err = f.DynamicClient()
if err != nil {
return nil, err
}
po.builder = f.NewBuilder()
po.mapper, err = f.ToRESTMapper()
if err != nil {
return nil, err
}
po.validator, err = f.Validator(false)
if err != nil {
return nil, err
}
// Retrieve/store the grouping object for current apply.
currentObjects, err := ao.GetObjects()
if err != nil {
return nil, err
}
currentGroupingObject, found := findGroupingObject(currentObjects)
if !found {
return nil, fmt.Errorf("Current grouping object not found during prune.")
}
po.currentGroupingObject = currentGroupingObject
// Initialize past grouping objects as empty.
po.pastGroupingObjects = []*resource.Info{}
po.retrievedGroupingObjects = false
return po, nil
}
// getPreviousGroupingObjects returns the set of grouping objects
// that have the same label as the current grouping object. Removes
// the current grouping object from this set. Returns an error
// if there is a problem retrieving the grouping objects.
func (po *PruneOptions) getPreviousGroupingObjects() ([]*resource.Info, error) {
// Ensures the "pastGroupingObjects" is set.
if !po.retrievedGroupingObjects {
if err := po.retrievePreviousGroupingObjects(); err != nil {
return nil, err
}
}
// Remove the current grouping info from the previous grouping infos.
currentInventory, err := infoToInventory(po.currentGroupingObject)
if err != nil {
return nil, err
}
pastGroupInfos := []*resource.Info{}
for _, pastInfo := range po.pastGroupingObjects {
pastInventory, err := infoToInventory(pastInfo)
if err != nil {
return nil, err
}
if !currentInventory.Equals(pastInventory) {
pastGroupInfos = append(pastGroupInfos, pastInfo)
}
}
return pastGroupInfos, nil
}
// retrievePreviousGroupingObjects requests the previous grouping objects
// using the grouping label from the current grouping object. Sets
// the field "pastGroupingObjects". Returns an error if the grouping
// label doesn't exist for the current currentGroupingObject does not
// exist or if the call to retrieve the past grouping objects fails.
func (po *PruneOptions) retrievePreviousGroupingObjects() error {
// Get the grouping label for this grouping object, and create
// a label selector from it.
if po.currentGroupingObject == nil || po.currentGroupingObject.Object == nil {
return fmt.Errorf("Missing current grouping object.\n")
}
groupingLabel, err := retrieveGroupingLabel(po.currentGroupingObject.Object)
if err != nil {
return err
}
labelSelector := fmt.Sprintf("%s=%s", GroupingLabel, groupingLabel)
retrievedGroupingInfos, err := po.builder.
Unstructured().
// TODO: Check if this validator is necessary.
Schema(po.validator).
ContinueOnError().
NamespaceParam(po.namespace).DefaultNamespace().
ResourceTypes("configmap").
LabelSelectorParam(labelSelector).
Flatten().
Do().
Infos()
if err != nil {
return err
}
po.pastGroupingObjects = retrievedGroupingInfos
po.retrievedGroupingObjects = true
return nil
}
// infoToInventory transforms the object represented by the passed "info"
// into its Inventory representation. Returns error if the passed Info
// is nil, or the Object in the Info is empty.
func infoToInventory(info *resource.Info) (*Inventory, error) {
if info == nil || info.Object == nil {
return nil, fmt.Errorf("Empty resource.Info can not calculate as inventory.\n")
}
obj := info.Object
gk := obj.GetObjectKind().GroupVersionKind().GroupKind()
return createInventory(info.Namespace, info.Name, gk)
}
// unionPastInventory takes a set of grouping objects (infos), returning the
// union of the objects referenced by these grouping objects as an
// InventorySet. Returns an error if any of the passed objects are not
// grouping objects, or if unable to retrieve the inventory from any
// grouping object.
func unionPastInventory(infos []*resource.Info) (*InventorySet, error) {
inventorySet := NewInventorySet([]*Inventory{})
for _, info := range infos {
inv, err := retrieveInventoryFromGroupingObj([]*resource.Info{info})
if err != nil {
return nil, err
}
inventorySet.AddItems(inv)
}
return inventorySet, nil
}
// calcPruneSet returns the InventorySet representing the objects to
// delete (prune). pastGroupInfos are the set of past applied grouping
// objects, storing the inventory of the objects applied at the same time.
// Calculates the prune set as:
//
// prune set = (prev1 U prev2 U ... U prevN) - (curr1, curr2, ..., currN)
//
// Returns an error if we are unable to retrieve the set of previously
// applied objects, or if we are unable to get the currently applied objects
// from the current grouping object.
func (po *PruneOptions) calcPruneSet(pastGroupingInfos []*resource.Info) (*InventorySet, error) {
pastInventory, err := unionPastInventory(pastGroupingInfos)
if err != nil {
return nil, err
}
// Current grouping object as inventory set.
c := []*resource.Info{po.currentGroupingObject}
currentInv, err := retrieveInventoryFromGroupingObj(c)
if err != nil {
return nil, err
}
return pastInventory.Subtract(NewInventorySet(currentInv))
}
// Prune deletes the set of resources which were previously applied
// (retrieved from previous grouping objects) but omitted in
// the current apply. Prune also delete all previous grouping
// objects. Returns an error if there was a problem.
func (po *PruneOptions) Prune() error {
// Retrieve previous grouping objects, and calculate the
// union of the previous applies as an inventory set.
pastGroupingInfos, err := po.getPreviousGroupingObjects()
if err != nil {
return err
}
pruneSet, err := po.calcPruneSet(pastGroupingInfos)
if err != nil {
return err
}
// Delete the prune objects.
for _, inv := range pruneSet.GetItems() {
mapping, err := po.mapper.RESTMapping(inv.GroupKind)
if err != nil {
return err
}
err = po.client.Resource(mapping.Resource).Namespace(inv.Namespace).Delete(inv.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
fmt.Fprintf(po.out, "%s/%s deleted\n", strings.ToLower(inv.GroupKind.Kind), inv.Name)
}
// Delete previous grouping objects.
for _, pastGroupInfo := range pastGroupingInfos {
err = po.client.Resource(pastGroupInfo.Mapping.Resource).
Namespace(pastGroupInfo.Namespace).
Delete(pastGroupInfo.Name, &metav1.DeleteOptions{})
if err != nil {
return err
}
printer, err := po.toPrinter("deleted")
if err != nil {
return err
}
if err = printer.PrintObj(pastGroupInfo.Object, po.out); err != nil {
return err
}
}
return nil
}

243
pkg/apply/prune_test.go Normal file
View File

@ -0,0 +1,243 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/resource"
)
var pod1Inv = &Inventory{
Namespace: testNamespace,
Name: pod1Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
}
var pod2Inv = &Inventory{
Namespace: testNamespace,
Name: pod2Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
}
var pod3Inv = &Inventory{
Namespace: testNamespace,
Name: pod3Name,
GroupKind: schema.GroupKind{
Group: "",
Kind: "Pod",
},
}
var groupingInv = &Inventory{
Namespace: testNamespace,
Name: groupingObjName,
GroupKind: schema.GroupKind{
Group: "",
Kind: "ConfigMap",
},
}
func TestInfoToInventory(t *testing.T) {
tests := map[string]struct {
info *resource.Info
expected *Inventory
isError bool
}{
"Nil info is an error": {
info: nil,
expected: nil,
isError: true,
},
"Nil info object is an error": {
info: nilInfo,
expected: nil,
isError: true,
},
"Pod 1 object becomes Pod 1 inventory": {
info: pod1Info,
expected: pod1Inv,
isError: false,
},
"Grouping object becomes grouping inventory": {
info: copyGroupingInfo(),
expected: groupingInv,
isError: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
actual, err := infoToInventory(test.info)
if test.isError && err == nil {
t.Errorf("Did not receive expected error.\n")
}
if !test.isError {
if err != nil {
t.Errorf("Receieved unexpected error: %s\n", err)
}
if !test.expected.Equals(actual) {
t.Errorf("Expected inventory (%s), got (%s)\n", test.expected, actual)
}
}
})
}
}
// Returns a grouping object with the inventory set from
// the passed "children".
func createGroupingInfo(name string, children ...(*resource.Info)) *resource.Info {
groupingObjCopy := groupingObj.DeepCopy()
var groupingInfo = &resource.Info{
Namespace: testNamespace,
Name: groupingObjName,
Object: groupingObjCopy,
}
infos := []*resource.Info{groupingInfo}
infos = append(infos, children...)
_ = addInventoryToGroupingObj(infos)
return groupingInfo
}
func TestUnionPastInventory(t *testing.T) {
tests := map[string]struct {
groupingInfos []*resource.Info
expected []*Inventory
}{
"Empty grouping objects = empty inventory set": {
groupingInfos: []*resource.Info{},
expected: []*Inventory{},
},
"No children in grouping object, equals no inventory": {
groupingInfos: []*resource.Info{createGroupingInfo("test-1")},
expected: []*Inventory{},
},
"Grouping object with Pod1 returns inventory with Pod1": {
groupingInfos: []*resource.Info{createGroupingInfo("test-1", pod1Info)},
expected: []*Inventory{pod1Inv},
},
"Grouping object with three pods returns inventory with three pods": {
groupingInfos: []*resource.Info{
createGroupingInfo("test-1", pod1Info, pod2Info, pod3Info),
},
expected: []*Inventory{pod1Inv, pod2Inv, pod3Inv},
},
"Two grouping objects with different pods returns inventory with both pods": {
groupingInfos: []*resource.Info{
createGroupingInfo("test-1", pod1Info),
createGroupingInfo("test-2", pod2Info),
},
expected: []*Inventory{pod1Inv, pod2Inv},
},
"Two grouping objects with overlapping pods returns set of pods": {
groupingInfos: []*resource.Info{
createGroupingInfo("test-1", pod1Info, pod2Info),
createGroupingInfo("test-2", pod2Info, pod3Info),
},
expected: []*Inventory{pod1Inv, pod2Inv, pod3Inv},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
actual, err := unionPastInventory(test.groupingInfos)
expected := NewInventorySet(test.expected)
if err != nil {
t.Errorf("Unexpected error received: %s\n", err)
}
if !expected.Equals(actual) {
t.Errorf("Expected inventory (%s), got (%s)\n", expected, actual)
}
})
}
}
func TestCalcPruneSet(t *testing.T) {
tests := map[string]struct {
past []*resource.Info
current *resource.Info
expected []*Inventory
isError bool
}{
"Object not unstructured--error": {
past: []*resource.Info{nonUnstructuredGroupingInfo},
current: &resource.Info{},
expected: []*Inventory{},
isError: true,
},
"No past group objects--no prune set": {
past: []*resource.Info{},
current: createGroupingInfo("test-1"),
expected: []*Inventory{},
isError: false,
},
"Empty past grouping object--no prune set": {
past: []*resource.Info{createGroupingInfo("test-1")},
current: createGroupingInfo("test-1"),
expected: []*Inventory{},
isError: false,
},
"Pod1 - Pod1 = empty set": {
past: []*resource.Info{
createGroupingInfo("test-1", pod1Info),
},
current: createGroupingInfo("test-1", pod1Info),
expected: []*Inventory{},
isError: false,
},
"(Pod1, Pod2) - Pod1 = Pod2": {
past: []*resource.Info{
createGroupingInfo("test-1", pod1Info, pod2Info),
},
current: createGroupingInfo("test-1", pod1Info),
expected: []*Inventory{pod2Inv},
isError: false,
},
"(Pod1, Pod2) - Pod2 = Pod1": {
past: []*resource.Info{
createGroupingInfo("test-1", pod1Info, pod2Info),
},
current: createGroupingInfo("test-1", pod2Info),
expected: []*Inventory{pod1Inv},
isError: false,
},
"(Pod1, Pod2, Pod3) - Pod2 = Pod1, Pod3": {
past: []*resource.Info{
createGroupingInfo("test-1", pod1Info, pod2Info),
createGroupingInfo("test-1", pod2Info, pod3Info),
},
current: createGroupingInfo("test-1", pod2Info),
expected: []*Inventory{pod1Inv, pod3Inv},
isError: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
po := &PruneOptions{}
po.currentGroupingObject = test.current
actual, err := po.calcPruneSet(test.past)
expected := NewInventorySet(test.expected)
if test.isError && err == nil {
t.Errorf("Did not receive expected error.\n")
}
if !test.isError {
if err != nil {
t.Errorf("Unexpected error received: %s\n", err)
}
if !expected.Equals(actual) {
t.Errorf("Expected prune set (%s), got (%s)\n", expected, actual)
}
}
})
}
}

30
pkg/apply/status.go Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package apply
import (
"time"
"github.com/spf13/cobra"
)
func NewStatusOptions() *StatusOptions {
return &StatusOptions{
wait: false,
period: 2 * time.Second,
timeout: time.Minute,
}
}
type StatusOptions struct {
wait bool
period time.Duration
timeout time.Duration
}
func (s *StatusOptions) AddFlags(c *cobra.Command) {
c.Flags().BoolVar(&s.wait, "status", s.wait, "Wait for all applied resources to reach the Current status.")
c.Flags().DurationVar(&s.period, "status-period", s.period, "Polling period for resource statuses.")
c.Flags().DurationVar(&s.timeout, "status-timeout", s.timeout, "Timeout threshold for waiting for all resources to reach the Current status.")
}

54
pkg/kstatus/.golangci.yml Normal file
View File

@ -0,0 +1,54 @@
# Copyright 2019 The Kubernetes Authors.
# SPDX-License-Identifier: Apache-2.0
run:
deadline: 5m
linters:
# please, do not use `enable-all`: it's deprecated and will be removed soon.
# inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
- errcheck
# - funlen
- gochecknoinits
- goconst
- gocritic
- gocyclo
- gofmt
- goimports
- golint
- gosec
- gosimple
- govet
- ineffassign
- interfacer
- lll
- misspell
- nakedret
- scopelint
- staticcheck
- structcheck
- stylecheck
- typecheck
- unconvert
- unparam
- unused
- varcheck
- whitespace
linters-settings:
dupl:
threshold: 400
lll:
line-length: 170
gocyclo:
min-complexity: 30
golint:
min-confidence: 0.85

115
pkg/kstatus/README.md Normal file
View File

@ -0,0 +1,115 @@
# kstatus
kstatus provides tools for checking the status of Kubernetes resources. The primary use case is knowing when
(or if) a given set of resources in cluster has successfully reconciled an apply operation.
## Concepts
This effort has several goals, some with a shorter timeline than others. Initially, we want to provide
a library that makes it easier to decide when changes to a set of resources have been reconciled in a cluster.
To support types that do not yet publish status information, we will initially fallback on type specific rules.
The library already contains rules for many of the most common built-in types such a Deployment and StatefulSet.
For custom resource definitions (CRDs), there currently isn't much guidance on which properties should be exposed in the status
object and which conditions should be used. As part of this effort we want to define a set of standard conditions
that the library will understand and that we encourage developers to adopt in their CRDs. These standard conditions will
be focused on providing the necessary information for understanding status of the reconcile after `apply` and it is not
expected that these will necessarily be the only conditions exposed in a custom resource. Developers will be free to add as many conditions
as they wish, but if the CRDs adopt the standard conditions defined here, this library will handle them correctly.
The `status` objects for built-in types don't all conform to a common behavior. Not all built-in types expose conditions,
and even among the types that does, the types of conditions vary widely. Long-term, we hope to add support for the
standard conditions to the built-in types as well. This would remove the need for type-specific rules for determining
status.
### Statuses
The library currently defines the following statuses for resource:
* __InProgress__: The actual state of the resource has not yet reached the desired state as specified in the
resource manifest, i.e. the resource reconcile has not yet completed. Newly created resources will usually
start with this status, although some resources like ConfigMaps are Current immediately.
* __Failed__: The process of reconciling the actual state with the desired state has encountered and error
or it has made insufficient progress.
* __Current__: The actual state of the resource matches the desired state. The reconcile process is considered
complete until there are changes to either the desired or the actual state.
* __Terminating__: The resource is in the process of being deleted.
* __Unknown__: This is for situations when the library are unable to determine the status of a resource.
### Conditions
The conditions defined in the library are designed to adhere to the "abnormal-true" pattern, i.e. that
conditions are present and with a value of true whenever something unusual happens. So the absence of
any conditions means everything is normal. Normal in this situation simply means that the latest observed
generation of the resource manifest by the controller have been fully reconciled with the actual state.
* __InProgress__: The controller is currently working on reconciling the latest changes.
* __Failed__: The controller has encountered an error during the reconcile process or it has made
insufficient progress (timeout).
The use of the "abnormal-true" pattern has some challenges. If the controller is not running, or for some
reason not able to update the resource, it will look like it is in a good state when that is not true. The
solution to this issue is to adopt the pattern used by several of the built-in types where there is an
`observedGeneration` property on the status object which is set by the controller during the reconcile loop.
If the `generation` and the `observedGeneration` of a resource does not match, it means there are changes
that the controller has not yet seen, and therefore not acted upon.
## Features
The library is currently separated into two packages, one that provides the basic functionality, and another that
builds upon the basics to provide a higher level API.
**sigs.k8s.io/kustomize/kstatus/status**: Provides two basic functions. First, it provides the `Compute` function
that takes a single resource and computes the status for this resource based on the fields in the status object for
the resource. Second, it provides the `Augment` function that computes the appropriate standard conditions based on
the status object and then amends them to the conditions in the resource. Both of these functions currently operate
on Unstructured types, but this should eventually be changed to rely on the kyaml library. Both of these functions
compute the status and conditions solely based on the data in the resource passed in. It does not communicate with
a cluster to get the latest state of the resources.
**sigs.k8s.io/kustomize/kstatus/wait**: This package builds upon the status package and provides functionality that
will fetch the latest state from a cluster. It provides the `FetchAndResolve` function that takes list of resource
identifiers, fetches the latest state for all the resources from the cluster, computes the status for all of them and
returns the results. `WaitForStatus` accepts a list of resource identifiers and will poll cluster for the status of
the resources until all resources have reached the `Current` status.
## Challenges
### Status is not obvious for all resource types
For some types of resources, it is pretty clear what the different statuses mean. For others, it
is far less obvious. For example, what does it mean that a PodDisruptionBudget is Current? Based on
the assumptions above it probably should be whenever the controller has observed the resource
and updated the status object of the PDB with information on allowed disruptions. But currently, a PDB is
considered Current when the number of healthy replicas meets the threshold given in the PDB. Also, should
the presence of a PDB influence when a Deployment is considered Current? This would mean that a Deployment
should be considered Current whenever the number of replicas reach the threshold set by the corresponding
PDB. This is not currently supported as described below.
### Status is decided based on single resource
Currently the status of a resource is decided solely based on information from
the state of that resource. This is an issue for resources that create other resources
and that doesn't provide sufficient information within their own status object. An example
is the Service resource that doesn't provide much status information but do generate Endpoint
resources that could be used to determine status. Similar, the status of a Deployment could be
based on its generated ReplicaSets and Pods.
Not having the generated resources also limits the amount of details that can be provided
when something isn't working as expected.
## Future
### Depend on kyaml instead of k8s libraries
The sigs.k8s.io/kustomize/kstatus/status package currently depends on k8s libraries. This can be
challenging if someone wants to vendor the library within their own project. We want to replace
the dependencies on k8s libraries with kyaml for the status package. The wait package needs to
talk to a k8s cluster, so this package will continue to rely on the k8s libraries.
### Use watches instead of polling
We currently poll for updates to resources, but it would be possible to set up
watches instead. This could also be combined with deciding status based on not only a single
resource, but also all its generated resources. This would lead to a design that seems similar
to a controller, so maybe a solution like this could be built on top of controller-runtime.
A challenge here is that the rules for each built-in type would need to be expressed in a different
way that what we currently do.

19
pkg/kstatus/doc.go Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Package kstatus contains libraries for computing status of kubernetes
// resources.
//
// Status
// Get status and/or conditions for resources based on resources already
// read from a cluster, i.e. it will not fetch resources from
// a cluster.
//
// Wait
// Get status and/or conditions for resources by fetching them
// from a cluster. This supports specifying a set of resources as
// an Inventory or as a list of manifests/unstructureds. This also
// supports polling the state of resources until they all reach a
// specific status. A common use case for this can be to wait for
// a set of resources to all finish reconciling after an apply.
package kstatus

497
pkg/kstatus/status/core.go Normal file
View File

@ -0,0 +1,497 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"fmt"
"math"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// GetConditionsFn defines the signature for functions to compute the
// status of a built-in resource.
type GetConditionsFn func(*unstructured.Unstructured) (*Result, error)
// legacyTypes defines the mapping from GroupKind to a function that can
// compute the status for the given resource.
var legacyTypes = map[string]GetConditionsFn{
"Service": serviceConditions,
"Pod": podConditions,
"Secret": alwaysReady,
"PersistentVolumeClaim": pvcConditions,
"apps/StatefulSet": stsConditions,
"apps/DaemonSet": daemonsetConditions,
"extensions/DaemonSet": daemonsetConditions,
"apps/Deployment": deploymentConditions,
"extensions/Deployment": deploymentConditions,
"apps/ReplicaSet": replicasetConditions,
"extensions/ReplicaSet": replicasetConditions,
"policy/PodDisruptionBudget": pdbConditions,
"batch/CronJob": alwaysReady,
"ConfigMap": alwaysReady,
"batch/Job": jobConditions,
}
const (
tooFewReady = "LessReady"
tooFewAvailable = "LessAvailable"
tooFewUpdated = "LessUpdated"
tooFewReplicas = "LessReplicas"
onDeleteUpdateStrategy = "OnDelete"
)
// GetLegacyConditionsFn returns a function that can compute the status for the
// given resource, or nil if the resource type is not known.
func GetLegacyConditionsFn(u *unstructured.Unstructured) GetConditionsFn {
gvk := u.GroupVersionKind()
g := gvk.Group
k := gvk.Kind
key := g + "/" + k
if g == "" {
key = k
}
return legacyTypes[key]
}
// alwaysReady Used for resources that are always ready
func alwaysReady(u *unstructured.Unstructured) (*Result, error) {
return &Result{
Status: CurrentStatus,
Message: "Resource is always ready",
Conditions: []Condition{},
}, nil
}
// stsConditions return standardized Conditions for Statefulset
//
// StatefulSet does define the .status.conditions property, but the controller never
// actually sets any Conditions. Thus, status must be computed only based on the other
// properties under .status. We don't have any way to find out if a reconcile for a
// StatefulSet has failed.
func stsConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
// updateStrategy==ondelete is a user managed statefulset.
updateStrategy := GetStringField(obj, ".spec.updateStrategy.type", "")
if updateStrategy == onDeleteUpdateStrategy {
return &Result{
Status: CurrentStatus,
Message: "StatefulSet is using the ondelete update strategy",
Conditions: []Condition{},
}, nil
}
// Replicas
specReplicas := GetIntField(obj, ".spec.replicas", 1)
readyReplicas := GetIntField(obj, ".status.readyReplicas", 0)
currentReplicas := GetIntField(obj, ".status.currentReplicas", 0)
updatedReplicas := GetIntField(obj, ".status.updatedReplicas", 0)
statusReplicas := GetIntField(obj, ".status.replicas", 0)
partition := GetIntField(obj, ".spec.updateStrategy.rollingUpdate.partition", -1)
if specReplicas > statusReplicas {
message := fmt.Sprintf("Replicas: %d/%d", statusReplicas, specReplicas)
return newInProgressStatus(tooFewReplicas, message), nil
}
if specReplicas > readyReplicas {
message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas)
return newInProgressStatus(tooFewReady, message), nil
}
// https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
if partition != -1 {
if updatedReplicas < (specReplicas - partition) {
message := fmt.Sprintf("updated: %d/%d", updatedReplicas, specReplicas-partition)
return newInProgressStatus("PartitionRollout", message), nil
}
// Partition case All ok
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("Partition rollout complete. updated: %d", updatedReplicas),
Conditions: []Condition{},
}, nil
}
if specReplicas > currentReplicas {
message := fmt.Sprintf("current: %d/%d", currentReplicas, specReplicas)
return newInProgressStatus("LessCurrent", message), nil
}
// Revision
currentRevision := GetStringField(obj, ".status.currentRevision", "")
updatedRevision := GetStringField(obj, ".status.updateRevision", "")
if currentRevision != updatedRevision {
message := "Waiting for updated revision to match current"
return newInProgressStatus("RevisionMismatch", message), nil
}
// All ok
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("All replicas scheduled as expected. Replicas: %d", statusReplicas),
Conditions: []Condition{},
}, nil
}
// deploymentConditions return standardized Conditions for Deployment.
//
// For Deployments, we look at .status.conditions as well as the other properties
// under .status. Status will be Failed if the progress deadline has been exceeded.
func deploymentConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
progressing := false
// Check if progressDeadlineSeconds is set. If not, the controller will not set
// the `Progressing` condition, so it will always consider a deployment to be
// progressing. The use of math.MaxInt32 is due to special handling in the
// controller:
// https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/deployment/util/deployment_util.go#L886
progressDeadline := GetIntField(obj, ".spec.progressDeadlineSeconds", math.MaxInt32)
if progressDeadline == math.MaxInt32 {
progressing = true
}
available := false
objc, err := GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, c := range objc.Status.Conditions {
switch c.Type {
case "Progressing": //appsv1.DeploymentProgressing:
// https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/deployment/progress.go#L52
if c.Reason == "ProgressDeadlineExceeded" {
return &Result{
Status: FailedStatus,
Message: "Progress deadline exceeded",
Conditions: []Condition{{ConditionFailed, corev1.ConditionTrue, c.Reason, c.Message}},
}, nil
}
if c.Status == corev1.ConditionTrue && c.Reason == "NewReplicaSetAvailable" {
progressing = true
}
case "Available": //appsv1.DeploymentAvailable:
if c.Status == corev1.ConditionTrue {
available = true
}
}
}
// replicas
specReplicas := GetIntField(obj, ".spec.replicas", 1) // Controller uses 1 as default if not specified.
statusReplicas := GetIntField(obj, ".status.replicas", 0)
updatedReplicas := GetIntField(obj, ".status.updatedReplicas", 0)
readyReplicas := GetIntField(obj, ".status.readyReplicas", 0)
availableReplicas := GetIntField(obj, ".status.availableReplicas", 0)
// TODO spec.replicas zero case ??
if specReplicas > statusReplicas {
message := fmt.Sprintf("replicas: %d/%d", statusReplicas, specReplicas)
return newInProgressStatus(tooFewReplicas, message), nil
}
if specReplicas > updatedReplicas {
message := fmt.Sprintf("Updated: %d/%d", updatedReplicas, specReplicas)
return newInProgressStatus(tooFewUpdated, message), nil
}
if statusReplicas > updatedReplicas {
message := fmt.Sprintf("Pending termination: %d", statusReplicas-updatedReplicas)
return newInProgressStatus("ExtraPods", message), nil
}
if updatedReplicas > availableReplicas {
message := fmt.Sprintf("Available: %d/%d", availableReplicas, updatedReplicas)
return newInProgressStatus(tooFewAvailable, message), nil
}
if specReplicas > readyReplicas {
message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas)
return newInProgressStatus(tooFewReady, message), nil
}
// check conditions
if !progressing {
message := "ReplicaSet not Available"
return newInProgressStatus("ReplicaSetNotAvailable", message), nil
}
if !available {
message := "Deployment not Available"
return newInProgressStatus("DeploymentNotAvailable", message), nil
}
// All ok
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("Deployment is available. Replicas: %d", statusReplicas),
Conditions: []Condition{},
}, nil
}
// replicasetConditions return standardized Conditions for Replicaset
func replicasetConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
// Conditions
objc, err := GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, c := range objc.Status.Conditions {
// https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/replicaset/replica_set_utils.go
if c.Type == "ReplicaFailure" && c.Status == corev1.ConditionTrue {
message := "Replica Failure condition. Check Pods"
return newInProgressStatus("ReplicaFailure", message), nil
}
}
// Replicas
specReplicas := GetIntField(obj, ".spec.replicas", 1) // Controller uses 1 as default if not specified.
statusReplicas := GetIntField(obj, ".status.replicas", 0)
readyReplicas := GetIntField(obj, ".status.readyReplicas", 0)
availableReplicas := GetIntField(obj, ".status.availableReplicas", 0)
fullyLabelledReplicas := GetIntField(obj, ".status.fullyLabeledReplicas", 0)
if specReplicas > fullyLabelledReplicas {
message := fmt.Sprintf("Labelled: %d/%d", fullyLabelledReplicas, specReplicas)
return newInProgressStatus("LessLabelled", message), nil
}
if specReplicas > availableReplicas {
message := fmt.Sprintf("Available: %d/%d", availableReplicas, specReplicas)
return newInProgressStatus(tooFewAvailable, message), nil
}
if specReplicas > readyReplicas {
message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas)
return newInProgressStatus(tooFewReady, message), nil
}
if specReplicas < statusReplicas {
message := fmt.Sprintf("replicas: %d/%d", statusReplicas, specReplicas)
return newInProgressStatus("ExtraPods", message), nil
}
// All ok
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("ReplicaSet is available. Replicas: %d", statusReplicas),
Conditions: []Condition{},
}, nil
}
// daemonsetConditions return standardized Conditions for DaemonSet
func daemonsetConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
// replicas
desiredNumberScheduled := GetIntField(obj, ".status.desiredNumberScheduled", -1)
currentNumberScheduled := GetIntField(obj, ".status.currentNumberScheduled", 0)
updatedNumberScheduled := GetIntField(obj, ".status.updatedNumberScheduled", 0)
numberAvailable := GetIntField(obj, ".status.numberAvailable", 0)
numberReady := GetIntField(obj, ".status.numberReady", 0)
if desiredNumberScheduled == -1 {
message := "Missing .status.desiredNumberScheduled"
return newInProgressStatus("NoDesiredNumber", message), nil
}
if desiredNumberScheduled > currentNumberScheduled {
message := fmt.Sprintf("Current: %d/%d", currentNumberScheduled, desiredNumberScheduled)
return newInProgressStatus("LessCurrent", message), nil
}
if desiredNumberScheduled > updatedNumberScheduled {
message := fmt.Sprintf("Updated: %d/%d", updatedNumberScheduled, desiredNumberScheduled)
return newInProgressStatus(tooFewUpdated, message), nil
}
if desiredNumberScheduled > numberAvailable {
message := fmt.Sprintf("Available: %d/%d", numberAvailable, desiredNumberScheduled)
return newInProgressStatus(tooFewAvailable, message), nil
}
if desiredNumberScheduled > numberReady {
message := fmt.Sprintf("Ready: %d/%d", numberReady, desiredNumberScheduled)
return newInProgressStatus(tooFewReady, message), nil
}
// All ok
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("All replicas scheduled as expected. Replicas: %d", desiredNumberScheduled),
Conditions: []Condition{},
}, nil
}
// pvcConditions return standardized Conditions for PVC
func pvcConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
phase := GetStringField(obj, ".status.phase", "unknown")
if phase != "Bound" { // corev1.ClaimBound
message := fmt.Sprintf("PVC is not Bound. phase: %s", phase)
return newInProgressStatus("NotBound", message), nil
}
// All ok
return &Result{
Status: CurrentStatus,
Message: "PVC is Bound",
Conditions: []Condition{},
}, nil
}
// podConditions return standardized Conditions for Pod
func podConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
objc, err := GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
phase := GetStringField(obj, ".status.phase", "unknown")
if phase == "Succeeded" {
return &Result{
Status: CurrentStatus,
Message: "Pod has completed successfully",
Conditions: []Condition{},
}, nil
}
for _, c := range objc.Status.Conditions {
if c.Type == "Ready" {
if c.Status == corev1.ConditionTrue {
return &Result{
Status: CurrentStatus,
Message: "Pod has reached the ready state",
Conditions: []Condition{},
}, nil
}
if c.Status == corev1.ConditionFalse && c.Reason == "PodCompleted" && phase != "Succeeded" {
message := "Pod has completed, but not successfully."
return &Result{
Status: FailedStatus,
Message: message,
Conditions: []Condition{{
Type: ConditionFailed,
Status: corev1.ConditionTrue,
Reason: "PodFailed",
Message: fmt.Sprintf("Pod has completed, but not succeesfully."),
}},
}, nil
}
}
}
message := "Pod has not become ready"
return newInProgressStatus("PodNotReady", message), nil
}
// pdbConditions computes the status for PodDisruptionBudgets. A PDB
// is currently considered Current if the disruption controller has
// observed the latest version of the PDB resource and has computed
// the AllowedDisruptions. PDBs do have ObservedGeneration in the
// Status object, so if this function gets called we know that
// the controller has observed the latest changes.
// The disruption controller does not set any conditions if
// computing the AllowedDisruptions fails (and there are many ways
// it can fail), but there is PR against OSS Kubernetes to address
// this: https://github.com/kubernetes/kubernetes/pull/86929
func pdbConditions(u *unstructured.Unstructured) (*Result, error) {
// All ok
return &Result{
Status: CurrentStatus,
Message: "AllowedDisruptions has been computed.",
Conditions: []Condition{},
}, nil
}
// jobConditions return standardized Conditions for Job
//
// A job will have the InProgress status until it starts running. Then it will have the Current
// status while the job is running and after it has been completed successfully. It
// will have the Failed status if it the job has failed.
func jobConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
parallelism := GetIntField(obj, ".spec.parallelism", 1)
completions := GetIntField(obj, ".spec.completions", parallelism)
succeeded := GetIntField(obj, ".status.succeeded", 0)
active := GetIntField(obj, ".status.active", 0)
failed := GetIntField(obj, ".status.failed", 0)
starttime := GetStringField(obj, ".status.startTime", "")
// Conditions
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24
objc, err := GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, c := range objc.Status.Conditions {
switch c.Type {
case "Complete":
if c.Status == corev1.ConditionTrue {
message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions)
return &Result{
Status: CurrentStatus,
Message: message,
Conditions: []Condition{},
}, nil
}
case "Failed":
if c.Status == corev1.ConditionTrue {
message := fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)
return &Result{
Status: FailedStatus,
Message: message,
Conditions: []Condition{{
ConditionFailed,
corev1.ConditionTrue,
"JobFailed",
fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions),
}},
}, nil
}
}
}
// replicas
if starttime == "" {
message := "Job not started"
return newInProgressStatus("JobNotStarted", message), nil
}
return &Result{
Status: CurrentStatus,
Message: fmt.Sprintf("Job in progress. success:%d, active: %d, failed: %d", succeeded, active, failed),
Conditions: []Condition{},
}, nil
}
// serviceConditions return standardized Conditions for Service
func serviceConditions(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
specType := GetStringField(obj, ".spec.type", "ClusterIP")
specClusterIP := GetStringField(obj, ".spec.clusterIP", "")
if specType == "LoadBalancer" {
if specClusterIP == "" {
message := "ClusterIP not set. Service type: LoadBalancer"
return newInProgressStatus("NoIPAssigned", message), nil
}
}
return &Result{
Status: CurrentStatus,
Message: "Service is ready",
Conditions: []Condition{},
}, nil
}

41
pkg/kstatus/status/doc.go Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Package kstatus contains functionality for computing the status
// of Kubernetes resources.
//
// The statuses defined in this package are:
// * InProgress
// * Current
// * Failed
// * Terminating
// * Unknown
//
// Computing the status of a resources can be done by calling the
// Compute function in the status package.
//
// import (
// "sigs.k8s.io/cli-utils/pkg/kstatus/status"
// )
//
// res, err := status.Compute(resource)
//
// The package also defines a set of new conditions:
// * InProgress
// * Failed
// These conditions have been chosen to follow the
// "abnormal-true" pattern where conditions should be set to true
// for error/abnormal conditions and the absence of a condition means
// things are normal.
//
// The Augment function augments any unstructured resource with
// the standard conditions described above. The values of
// these conditions are decided based on other status information
// available in the resources.
//
// import (
// "sigs.k8s.io/cli-utils/pkg/kstatus/status
// )
//
// err := status.Augment(resource)
package status

View File

@ -0,0 +1,110 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status_test
import (
"fmt"
"log"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
. "sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/yaml"
)
func ExampleCompute() {
deploymentManifest := `
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
generation: 1
namespace: qual
status:
observedGeneration: 1
updatedReplicas: 1
readyReplicas: 1
availableReplicas: 1
replicas: 1
conditions:
- type: Progressing
status: "True"
reason: NewReplicaSetAvailable
- type: Available
status: "True"
`
deployment := yamlManifestToUnstructured(deploymentManifest)
res, err := Compute(deployment)
if err != nil {
log.Fatal(err)
}
fmt.Println(res.Status)
// Output:
// Current
}
func ExampleAugment() {
deploymentManifest := `
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
generation: 1
namespace: qual
status:
observedGeneration: 1
updatedReplicas: 1
readyReplicas: 1
availableReplicas: 1
replicas: 1
conditions:
- type: Progressing
status: "True"
reason: NewReplicaSetAvailable
- type: Available
status: "True"
`
deployment := yamlManifestToUnstructured(deploymentManifest)
err := Augment(deployment)
if err != nil {
log.Fatal(err)
}
b, err := yaml.Marshal(deployment.Object)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(b))
// Output:
// apiVersion: apps/v1
// kind: Deployment
// metadata:
// generation: 1
// name: test
// namespace: qual
// status:
// availableReplicas: 1
// conditions:
// - reason: NewReplicaSetAvailable
// status: "True"
// type: Progressing
// - status: "True"
// type: Available
// observedGeneration: 1
// readyReplicas: 1
// replicas: 1
// updatedReplicas: 1
}
func yamlManifestToUnstructured(manifest string) *unstructured.Unstructured {
jsonManifest, err := yaml.YAMLToJSON([]byte(manifest))
if err != nil {
log.Fatal(err)
}
resource, _, err := unstructured.UnstructuredJSONScheme.Decode(jsonManifest, nil, nil)
if err != nil {
log.Fatal(err)
}
return resource.(*unstructured.Unstructured)
}

View File

@ -0,0 +1,100 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"fmt"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// checkGenericProperties looks at the properties that are available on
// all or most of the Kubernetes resources. If a decision can be made based
// on this information, there is no need to look at the resource-specidic
// rules.
// This also checks for the presence of the conditions defined in this package.
// If any of these are set on the resource, a decision is made solely based
// on this and none of the resource specific rules will be used. The goal here
// is that if controllers, built-in or custom, use these conditions, we can easily
// find status of resources.
func checkGenericProperties(u *unstructured.Unstructured) (*Result, error) {
obj := u.UnstructuredContent()
// Check if the resource is scheduled for deletion
deletionTimestamp, found, err := unstructured.NestedString(obj, "metadata", "deletionTimestamp")
if err != nil {
return nil, errors.Wrap(err, "looking up metadata.deletionTimestamp from resource")
}
if found && deletionTimestamp != "" {
return &Result{
Status: TerminatingStatus,
Message: "Resource scheduled for deletion",
Conditions: []Condition{},
}, nil
}
res, err := checkGeneration(u)
if res != nil || err != nil {
return res, err
}
// Check if the resource has any of the standard conditions. If so, we just use them
// and no need to look at anything else.
objWithConditions, err := GetObjectWithConditions(obj)
if err != nil {
return nil, err
}
for _, cond := range objWithConditions.Status.Conditions {
if cond.Type == string(ConditionInProgress) && cond.Status == corev1.ConditionTrue {
return newInProgressStatus(cond.Reason, cond.Message), nil
}
if cond.Type == string(ConditionFailed) && cond.Status == corev1.ConditionTrue {
return &Result{
Status: FailedStatus,
Message: cond.Message,
Conditions: []Condition{
{
Type: ConditionFailed,
Status: corev1.ConditionTrue,
Reason: cond.Reason,
Message: cond.Message,
},
},
}, nil
}
}
return nil, nil
}
func checkGeneration(u *unstructured.Unstructured) (*Result, error) {
// ensure that the meta generation is observed
generation, found, err := unstructured.NestedInt64(u.Object, "metadata", "generation")
if err != nil {
return nil, errors.Wrap(err, "looking up metadata.generation from resource")
}
if !found {
return nil, nil
}
observedGeneration, found, err := unstructured.NestedInt64(u.Object, "status", "observedGeneration")
if err != nil {
return nil, errors.Wrap(err, "looking up status.observedGeneration from resource")
}
if found {
// Resource does not have this field, so we can't do this check.
// TODO(mortent): Verify behavior of not set vs does not exist.
if observedGeneration != generation {
message := fmt.Sprintf("%s generation is %d, but latest observed generation is %d", u.GetKind(), generation, observedGeneration)
return &Result{
Status: InProgressStatus,
Message: message,
Conditions: []Condition{newInProgressCondition("LatestGenerationNotObserved", message)},
}, nil
}
}
return nil, nil
}

View File

@ -0,0 +1,187 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"fmt"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
// The set of standard conditions defined in this package. These follow the "abnormality-true"
// convention where conditions should have a true value for abnormal/error situations and the absence
// of a condition should be interpreted as a false value, i.e. everything is normal.
ConditionFailed ConditionType = "Failed"
ConditionInProgress ConditionType = "InProgress"
// The set of status conditions which can be assigned to resources.
InProgressStatus Status = "InProgress"
FailedStatus Status = "Failed"
CurrentStatus Status = "Current"
TerminatingStatus Status = "Terminating"
UnknownStatus Status = "Unknown"
)
var (
Statuses = []Status{InProgressStatus, FailedStatus, CurrentStatus, TerminatingStatus, UnknownStatus}
ConditionTypes = []ConditionType{ConditionFailed, ConditionInProgress}
)
// ConditionType defines the set of condition types allowed inside a Condition struct.
type ConditionType string
// String returns the ConditionType as a string.
func (c ConditionType) String() string {
return string(c)
}
// Status defines the set of statuses a resource can have.
type Status string
// String returns the status as a string.
func (s Status) String() string {
return string(s)
}
// StatusFromString turns a string into a Status. Will panic if the provided string is
// not a valid status.
func FromStringOrDie(text string) Status {
s := Status(text)
for _, r := range Statuses {
if s == r {
return s
}
}
panic(fmt.Errorf("string has invalid status: %s", s))
}
// Result contains the results of a call to compute the status of
// a resource.
type Result struct {
//Status
Status Status
// Message
Message string
// Conditions list of extracted conditions from Resource
Conditions []Condition
}
// Condition defines the general format for conditions on Kubernetes resources.
// In practice, each kubernetes resource defines their own format for conditions, but
// most (maybe all) follows this structure.
type Condition struct {
// Type condition type
Type ConditionType
// Status String that describes the condition status
Status corev1.ConditionStatus
// Reason one work CamelCase reason
Reason string
// Message Human readable reason string
Message string
}
// Compute finds the status of a given unstructured resource. It does not
// fetch the state of the resource from a cluster, so the provided unstructured
// must have the complete state, including status.
//
// The returned result contains the status of the resource, which will be
// one of
// * InProgress
// * Current
// * Failed
// * Terminating
// It also contains a message that provides more information on why
// the resource has the given status. Finally, the result also contains
// a list of standard resources that would belong on the given resource.
func Compute(u *unstructured.Unstructured) (*Result, error) {
res, err := checkGenericProperties(u)
if err != nil {
return nil, err
}
// If res is not nil, it means the generic checks was able to determine
// the status of the resource. We don't need to check the type-specific
// rules.
if res != nil {
return res, nil
}
fn := GetLegacyConditionsFn(u)
if fn != nil {
return fn(u)
}
// The resource is not one of the built-in types with specific
// rules and we were unable to make a decision based on the
// generic rules. In this case we assume that the absence of any known
// conditions means the resource is current.
return &Result{
Status: CurrentStatus,
Message: "Resource is current",
Conditions: []Condition{},
}, err
}
// Augment takes a resource and augments the resource with the
// standard status conditions.
func Augment(u *unstructured.Unstructured) error {
res, err := Compute(u)
if err != nil {
return err
}
conditions, found, err := unstructured.NestedSlice(u.Object, "status", "conditions")
if err != nil {
return err
}
if !found {
conditions = make([]interface{}, 0)
}
currentTime := time.Now().UTC().Format(time.RFC3339)
for _, resCondition := range res.Conditions {
present := false
for _, c := range conditions {
condition, ok := c.(map[string]interface{})
if !ok {
return errors.New("condition does not have the expected structure")
}
conditionType, ok := condition["type"].(string)
if !ok {
return errors.New("condition type does not have the expected type")
}
if conditionType == string(resCondition.Type) {
conditionStatus, ok := condition["status"].(string)
if !ok {
return errors.New("condition status does not have the expected type")
}
if conditionStatus != string(resCondition.Status) {
condition["lastTransitionTime"] = currentTime
}
condition["status"] = string(resCondition.Status)
condition["lastUpdateTime"] = currentTime
condition["reason"] = resCondition.Reason
condition["message"] = resCondition.Message
present = true
}
}
if !present {
conditions = append(conditions, map[string]interface{}{
"lastTransitionTime": currentTime,
"lastUpdateTime": currentTime,
"message": resCondition.Message,
"reason": resCondition.Reason,
"status": string(resCondition.Status),
"type": string(resCondition.Type),
})
}
}
return unstructured.SetNestedSlice(u.Object, conditions, "status", "conditions")
}

View File

@ -0,0 +1,166 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
var pod = `
apiVersion: v1
kind: Pod
metadata:
generation: 1
name: test
namespace: qual
status:
phase: Running
`
var custom = `
apiVersion: v1beta1
kind: SomeCustomKind
metadata:
generation: 1
name: test
namespace: default
`
var timestamp = time.Now().Add(-1 * time.Minute).UTC().Format(time.RFC3339)
func addConditions(t *testing.T, u *unstructured.Unstructured, conditions []map[string]interface{}) {
conds := make([]interface{}, 0)
for _, c := range conditions {
conds = append(conds, c)
}
err := unstructured.SetNestedSlice(u.Object, conds, "status", "conditions")
if err != nil {
t.Fatal(err)
}
}
func TestAugmentConditions(t *testing.T) {
testCases := map[string]struct {
manifest string
withConditions []map[string]interface{}
expectedConditions []Condition
}{
"no existing conditions": {
manifest: pod,
withConditions: []map[string]interface{}{},
expectedConditions: []Condition{
{
Type: ConditionInProgress,
Status: corev1.ConditionTrue,
Reason: "PodNotReady",
},
},
},
"has other existing conditions": {
manifest: pod,
withConditions: []map[string]interface{}{
{
"lastTransitionTime": timestamp,
"lastUpdateTime": timestamp,
"type": "Ready",
"status": "False",
"reason": "Pod has not started",
},
},
expectedConditions: []Condition{
{
Type: ConditionInProgress,
Status: corev1.ConditionTrue,
Reason: "PodNotReady",
},
{
Type: "Ready",
Status: corev1.ConditionFalse,
Reason: "Pod has not started",
},
},
},
"already has condition of standard type InProgress": {
manifest: pod,
withConditions: []map[string]interface{}{
{
"lastTransitionTime": timestamp,
"lastUpdateTime": timestamp,
"type": ConditionInProgress.String(),
"status": "True",
"reason": "PodIsAbsolutelyNotReady",
},
},
expectedConditions: []Condition{
{
Type: ConditionInProgress,
Status: corev1.ConditionTrue,
Reason: "PodIsAbsolutelyNotReady",
},
},
},
"already has condition of standard type Failed": {
manifest: pod,
withConditions: []map[string]interface{}{
{
"lastTransitionTime": timestamp,
"lastUpdateTime": timestamp,
"type": ConditionFailed.String(),
"status": "True",
"reason": "PodHasFailed",
},
},
expectedConditions: []Condition{
{
Type: ConditionFailed,
Status: corev1.ConditionTrue,
Reason: "PodHasFailed",
},
},
},
"custom resource with no conditions": {
manifest: custom,
withConditions: []map[string]interface{}{},
expectedConditions: []Condition{},
},
}
for tn, tc := range testCases {
tc := tc
t.Run(tn, func(t *testing.T) {
u := y2u(t, tc.manifest)
addConditions(t, u, tc.withConditions)
err := Augment(u)
if err != nil {
t.Error(err)
}
obj, err := GetObjectWithConditions(u.Object)
if err != nil {
t.Error(err)
}
assert.Equal(t, len(tc.expectedConditions), len(obj.Status.Conditions))
for _, expectedCondition := range tc.expectedConditions {
found := false
for _, condition := range obj.Status.Conditions {
if expectedCondition.Type.String() != condition.Type {
continue
}
found = true
assert.Equal(t, expectedCondition.Type.String(), condition.Type)
assert.Equal(t, expectedCondition.Reason, condition.Reason)
}
assert.True(t, found)
}
})
}
}

File diff suppressed because it is too large Load Diff

112
pkg/kstatus/status/util.go Normal file
View File

@ -0,0 +1,112 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"strings"
corev1 "k8s.io/api/core/v1"
apiunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
// newInProgressCondition creates an inProgress condition with the given
// reason and message.
func newInProgressCondition(reason, message string) Condition {
return Condition{
Type: ConditionInProgress,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
}
}
// newInProgressStatus creates a status Result with the InProgress status
// and an InProgress condition.
func newInProgressStatus(reason, message string) *Result {
return &Result{
Status: InProgressStatus,
Message: message,
Conditions: []Condition{newInProgressCondition(reason, message)},
}
}
// ObjWithConditions Represent meta object with status.condition array
type ObjWithConditions struct {
// Status as expected to be present in most compliant kubernetes resources
Status ConditionStatus `json:"status" yaml:"status"`
}
// ConditionStatus represent status with condition array
type ConditionStatus struct {
// Array of Conditions as expected to be present in kubernetes resources
Conditions []BasicCondition `json:"conditions" yaml:"conditions"`
}
// BasicCondition fields that are expected in a condition
type BasicCondition struct {
// Type Condition type
Type string `json:"type" yaml:"type"`
// Status is one of True,False,Unknown
Status corev1.ConditionStatus `json:"status" yaml:"status"`
// Reason simple single word reason in CamleCase
// +optional
Reason string `json:"reason,omitempty" yaml:"reason"`
// Message human readable reason
// +optional
Message string `json:"message,omitempty" yaml:"message"`
}
// GetObjectWithConditions return typed object
func GetObjectWithConditions(in map[string]interface{}) (*ObjWithConditions, error) {
var out = new(ObjWithConditions)
err := runtime.DefaultUnstructuredConverter.FromUnstructured(in, out)
if err != nil {
return nil, err
}
return out, nil
}
// GetStringField return field as string defaulting to value if not found
func GetStringField(obj map[string]interface{}, fieldPath string, defaultValue string) string {
var rv = defaultValue
fields := strings.Split(fieldPath, ".")
if fields[0] == "" {
fields = fields[1:]
}
val, found, err := apiunstructured.NestedFieldNoCopy(obj, fields...)
if !found || err != nil {
return rv
}
if v, ok := val.(string); ok {
return v
}
return rv
}
// GetIntField return field as string defaulting to value if not found
func GetIntField(obj map[string]interface{}, fieldPath string, defaultValue int) int {
fields := strings.Split(fieldPath, ".")
if fields[0] == "" {
fields = fields[1:]
}
val, found, err := apiunstructured.NestedFieldNoCopy(obj, fields...)
if !found || err != nil {
return defaultValue
}
switch v := val.(type) {
case int:
return v
case int32:
return int(v)
case int64:
return int(v)
}
return defaultValue
}

View File

@ -0,0 +1,59 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package status
import (
"testing"
"github.com/stretchr/testify/assert"
)
var testObj = map[string]interface{}{
"f1": map[string]interface{}{
"f2": map[string]interface{}{
"i32": int32(32),
"i64": int64(64),
"float": 64.02,
"ms": []interface{}{
map[string]interface{}{"f1f2ms0f1": 22},
map[string]interface{}{"f1f2ms1f1": "index1"},
},
"msbad": []interface{}{
map[string]interface{}{"f1f2ms0f1": 22},
32,
},
},
},
"ride": "dragon",
"status": map[string]interface{}{
"conditions": []interface{}{
map[string]interface{}{"f1f2ms0f1": 22},
map[string]interface{}{"f1f2ms1f1": "index1"},
},
},
}
func TestGetIntField(t *testing.T) {
v := GetIntField(testObj, ".f1.f2.i32", -1)
assert.Equal(t, int(32), v)
v = GetIntField(testObj, ".f1.f2.wrongname", -1)
assert.Equal(t, int(-1), v)
v = GetIntField(testObj, ".f1.f2.i64", -1)
assert.Equal(t, int(64), v)
v = GetIntField(testObj, ".f1.f2.float", -1)
assert.Equal(t, int(-1), v)
}
func TestGetStringField(t *testing.T) {
v := GetStringField(testObj, ".ride", "horse")
assert.Equal(t, v, "dragon")
v = GetStringField(testObj, ".destination", "north")
assert.Equal(t, v, "north")
}

58
pkg/kstatus/wait/doc.go Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
// Package wait contains functionality for getting the statuses
// of a list of kubernetes resources. Unlike the status package,
// the functions exposed in the wait package will talk to a
// live kubernetes cluster to get the latest state of resources
// and provides functionality for polling the cluster until the
// resources reach the Current status.
//
// FetchAndResolve will fetch resources from a cluster, compute the
// status for each of them and then return the results. The list of
// resources is defined as a slice of ResourceIdentifier, which is
// an interface that is implemented by the Unstructured type. It
// only requires functions for getting the apiVersion, kind, name
// and namespace of a resource.
//
// import (
// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
// "k8s.io/apimachinery/pkg/types"
// "sigs.k8s.io/cli-utils/pkg/kstatus/wait"
// )
//
// key := types.NamespacedName{Name: "name", Namespace: "namespace"}
// deployment := &unstructured.Unstructured{
// Object: map[string]interface{}{
// "apiVersion": "apps/v1",
// "kind": "Deployment",
// },
// }
// client.Get(context.Background(), key, deployment)
// resourceIdentifiers := []wait.ResourceIdentifier{deployment}
//
// resolver := wait.NewResolver(client)
// results := resolver.FetchAndResolve(context.Background(), resourceIdentifiers)
//
// WaitForStatus also looks up status for a list of resources, but it will
// block until all the provided resources has reached the Current status or
// the wait is cancelled through the passed-in context. The function returns
// a channel that will provide updates as the status of the different
// resources change.
//
// import (
// "sigs.k8s.io/cli-utils/pkg/kstatus/wait"
// )
//
// resolver := wait.NewResolver(client)
// eventsChan := resolver.WaitForStatus(context.Background(), resourceIdentifiers, 2 * time.Second)
// for {
// select {
// case event, ok := <-eventsChan:
// if !ok {
// return
// }
// fmt.Printf(event) // do something useful here.
// }
// }
package wait

35
pkg/kstatus/wait/util.go Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wait
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func resourceIdentifierFromObject(object KubernetesObject) ResourceIdentifier {
return ResourceIdentifier{
Name: object.GetName(),
Namespace: object.GetNamespace(),
GroupKind: object.GroupVersionKind().GroupKind(),
}
}
func resourceIdentifiersFromObjects(objects []KubernetesObject) []ResourceIdentifier {
var resourceIdentifiers []ResourceIdentifier
for _, object := range objects {
resourceIdentifiers = append(resourceIdentifiers, resourceIdentifierFromObject(object))
}
return resourceIdentifiers
}
func resourceIdentifierFromRuntimeObject(object runtime.Object) ResourceIdentifier {
gvk := object.GetObjectKind().GroupVersionKind()
r := object.(metav1.Object)
return ResourceIdentifier{
GroupKind: gvk.GroupKind(),
Name: r.GetName(),
Namespace: r.GetNamespace(),
}
}

332
pkg/kstatus/wait/wait.go Normal file
View File

@ -0,0 +1,332 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wait
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
defaultNamespace = "default"
)
// ResourceIdentifier defines the functions needed to identify
// a resource in a cluster. This interface is implemented by
// both unstructured.Unstructured and the standard Kubernetes types.
type KubernetesObject interface {
GetName() string
GetNamespace() string
GroupVersionKind() schema.GroupVersionKind
}
// ResourceIdentifier contains the information needed to uniquely
// identify a resource in a cluster.
type ResourceIdentifier struct {
Name string
Namespace string
GroupKind schema.GroupKind
}
// Equals compares two ResourceIdentifiers and returns true if they
// refer to the same resource. Special handling is needed for namespace
// since an empty namespace for a namespace-scoped resource is defaulted
// to the "default" namespace.
func (r ResourceIdentifier) Equals(other ResourceIdentifier) bool {
isSameNamespace := r.Namespace == other.Namespace ||
(r.Namespace == "" && other.Namespace == defaultNamespace) ||
(r.Namespace == defaultNamespace && other.Namespace == "")
return r.GroupKind == other.GroupKind &&
r.Name == other.Name &&
isSameNamespace
}
// Resolver provides the functions for resolving status of a list of resources.
type Resolver struct {
// client is the client used to talk
// with the cluster. It uses the Reader interface
// from controller-runtime.
client client.Reader
// mapper is the RESTMapper needed to look up mappings
// for resource types.
mapper meta.RESTMapper
// statusComputeFunc defines which function should be used for computing
// the status of a resource. This is available for testing purposes.
statusComputeFunc func(u *unstructured.Unstructured) (*status.Result, error)
// pollInterval defines the frequency with which the resolver should poll
// the cluster for the state of resources. More frequent polling will
// lead to more load on the cluster.
pollInterval time.Duration
}
// NewResolver creates a new resolver with the provided client. Fetching
// and polling of resources will be done using the provided client.
func NewResolver(client client.Reader, mapper meta.RESTMapper, pollInterval time.Duration) *Resolver {
return &Resolver{
client: client,
mapper: mapper,
statusComputeFunc: status.Compute,
pollInterval: pollInterval,
}
}
// ResourceResult is the status result for a given resource. It provides
// information about the resource if the request was successful and an
// error if something went wrong.
type ResourceResult struct {
Result *status.Result
ResourceIdentifier ResourceIdentifier
Error error
}
// FetchAndResolveObjects returns the status for a list of kubernetes objects. These can be provided
// either as Unstructured resources or the specific resource types. It will return the status for each
// of them individually. The provided resources will only be used to get the information needed to
// fetch the updated state of the resources from the cluster.
func (r *Resolver) FetchAndResolveObjects(ctx context.Context, objects []KubernetesObject) []ResourceResult {
resourceIds := resourceIdentifiersFromObjects(objects)
return r.FetchAndResolve(ctx, resourceIds)
}
// FetchAndResolve returns the status for a list of ResourceIdentifiers. It will return
// the status for each of them individually.
func (r *Resolver) FetchAndResolve(ctx context.Context, resourceIDs []ResourceIdentifier) []ResourceResult {
var results []ResourceResult
for _, resourceID := range resourceIDs {
u, err := r.fetchResource(ctx, resourceID)
if err != nil {
if k8serrors.IsNotFound(errors.Cause(err)) {
results = append(results, ResourceResult{
ResourceIdentifier: resourceID,
Result: &status.Result{
Status: status.CurrentStatus,
Message: "Resource does not exist",
},
})
} else {
results = append(results, ResourceResult{
Result: &status.Result{
Status: status.UnknownStatus,
Message: fmt.Sprintf("Error fetching resource from cluster: %v", err),
},
ResourceIdentifier: resourceID,
Error: err,
})
}
continue
}
res, err := r.statusComputeFunc(u)
results = append(results, ResourceResult{
Result: res,
ResourceIdentifier: resourceID,
Error: err,
})
}
return results
}
// Event is returned through the channel returned after a call
// to WaitForStatus. It contains an update to either an individual
// resource or to the aggregate status for the set of resources.
type Event struct {
// Type defines which type of event this is.
Type EventType
// AggregateStatus is the aggregated status for all the provided resources.
AggregateStatus status.Status
// EventResource is information about the event to which this event pertains.
// This is only populated for ResourceUpdate events.
EventResource *EventResource
}
type EventType string
const (
// The status/message for a resource has changed. This also means the
// aggregate status might have changed.
ResourceUpdate EventType = "ResourceUpdate"
// All resources have reached the current status.
Completed EventType = "Completed"
// The wait was stopped before all resources could reach the
// Current status.
Aborted EventType = "Aborted"
)
// EventResource contains information about the resource for which
// a specific Event pertains.
type EventResource struct {
// Identifier contains information that identifies which resource
// this information is about.
ResourceIdentifier ResourceIdentifier
// Status is the latest status for the given resource.
Status status.Status
// Message is more details about the status.
Message string
// Error is set if there was a problem identifying the status
// of the resource. For example, if polling the cluster for information
// about the resource failed.
Error error
}
// WaitForStatus polls all the provided resources until all of them have reached the Current
// status or the timeout specified through the context is reached. Updates on the status
// of individual resources and the aggregate status is provided through the Event channel.
func (r *Resolver) WaitForStatusOfObjects(ctx context.Context, objects []KubernetesObject) <-chan Event {
resourceIds := resourceIdentifiersFromObjects(objects)
return r.WaitForStatus(ctx, resourceIds)
}
// WaitForStatus polls all the resources references by the provided ResourceIdentifiers until
// all of them have reached the Current status or the timeout specified through the context is
// reached. Updates on the status of individual resources and the aggregate status is provided
// through the Event channel.
func (r *Resolver) WaitForStatus(ctx context.Context, resources []ResourceIdentifier) <-chan Event {
eventChan := make(chan Event)
go func() {
ticker := time.NewTicker(r.pollInterval)
defer func() {
ticker.Stop()
// Make sure the channel is closed so consumers can detect that
// we have completed.
close(eventChan)
}()
// No need to wait if we have no resources. We consider
// this a situation where the status is Current.
if len(resources) == 0 {
eventChan <- Event{
Type: Completed,
AggregateStatus: status.CurrentStatus,
EventResource: nil,
}
return
}
// Initiate a new waitStatus object to keep track of the
// resources while polling the state.
waitState := newWaitState(resources, r.statusComputeFunc)
// Check all resources immediately. If the aggregate status is already
// Current, we can exit immediately.
if r.checkAllResources(ctx, waitState, eventChan) {
return
}
// Loop until either all resources have reached the Current status
// or until the wait is cancelled through the context. In both cases
// we will break out of the loop by returning from the function.
for {
select {
case <-ctx.Done():
// The context has been cancelled, so report the most recent
// aggregate status, report it through the channel and then
// break out of the loop (which will close the channel).
eventChan <- Event{
Type: Aborted,
AggregateStatus: waitState.AggregateStatus(),
}
return
case <-ticker.C:
// Every time the ticker fires, we check the status of all
// resources. If the aggregate status has reached Current, checkAllResources
// will return true. If so, we just return.
if r.checkAllResources(ctx, waitState, eventChan) {
return
}
}
}
}()
return eventChan
}
// checkAllResources fetches all resources from the cluster,
// checks if their status has changed and send an event for each resource
// with a new status. In each event, we also include the latest aggregate
// status. Finally, if the aggregate status becomes Current, send a final
// Completed type event. If the aggregate status has become Current, this function
// will return true to signal that it is done.
func (r *Resolver) checkAllResources(ctx context.Context, waitState *waitState, eventChan chan Event) bool {
for resourceID := range waitState.ResourceWaitStates {
// Make sure we have a local copy since we are passing
// pointers to this variable as parameters to functions
u, err := r.fetchResource(ctx, resourceID)
eventResource, updateObserved := waitState.ResourceObserved(resourceID, u, err)
// Find the aggregate status based on the new state for this resource.
aggStatus := waitState.AggregateStatus()
// We want events for changes in status for each resource, so send
// an event for this resource before checking if the aggregate status
// has become Current.
if updateObserved {
eventChan <- Event{
Type: ResourceUpdate,
AggregateStatus: aggStatus,
EventResource: &eventResource,
}
}
// If aggregate status is Current, we are done!
if aggStatus == status.CurrentStatus {
eventChan <- Event{
Type: Completed,
AggregateStatus: status.CurrentStatus,
}
return true
}
}
return false
}
// fetchResource gets the resource given by the identifier from the cluster
// through the client available in the Resolver. It returns the resource
// as an Unstructured.
func (r *Resolver) fetchResource(ctx context.Context, identifier ResourceIdentifier) (*unstructured.Unstructured, error) {
// We need to look up the preferred version for the GroupKind and
// whether the resource type is cluster scoped. We look this
// up with the RESTMapper.
mapping, err := r.mapper.RESTMapping(identifier.GroupKind)
if err != nil {
return nil, err
}
// Resources might not have the namespace set, which means we need to set
// it to `default` if the resource is namespace scoped.
namespace := identifier.Namespace
if namespace == "" && mapping.Scope.Name() == meta.RESTScopeNameNamespace {
namespace = defaultNamespace
}
key := types.NamespacedName{Name: identifier.Name, Namespace: namespace}
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(mapping.GroupVersionKind)
err = r.client.Get(ctx, key, u)
if err != nil {
return nil, errors.Wrap(err, "error fetching resource from cluster")
}
return u, nil
}

View File

@ -0,0 +1,597 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wait
import (
"context"
"fmt"
"reflect"
"testing"
"time"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
const (
testTimeout = 1 * time.Minute
testPollInterval = 1 * time.Second
)
func TestFetchAndResolve(t *testing.T) {
type result struct {
status status.Status
error bool
}
testCases := map[string]struct {
resources []runtime.Object
mapperGVKs []schema.GroupVersionKind
expectedResults []result
}{
"no resources": {
resources: []runtime.Object{},
expectedResults: []result{},
},
"single resource": {
resources: []runtime.Object{
&appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
Name: "myDeployment",
Namespace: "default",
},
},
},
mapperGVKs: []schema.GroupVersionKind{
appsv1.SchemeGroupVersion.WithKind("Deployment"),
},
expectedResults: []result{
{
status: status.InProgressStatus,
error: false,
},
},
},
"multiple resources": {
resources: []runtime.Object{
&appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "StatefulSet",
},
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
Name: "myStatefulSet",
Namespace: "default",
},
Spec: appsv1.StatefulSetSpec{
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.OnDeleteStatefulSetStrategyType,
},
},
Status: appsv1.StatefulSetStatus{
ObservedGeneration: 1,
},
},
&corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
Name: "mySecret",
Namespace: "default",
},
},
},
mapperGVKs: []schema.GroupVersionKind{
appsv1.SchemeGroupVersion.WithKind("StatefulSet"),
corev1.SchemeGroupVersion.WithKind("Secret"),
},
expectedResults: []result{
{
status: status.CurrentStatus,
error: false,
},
{
status: status.CurrentStatus,
error: false,
},
},
},
}
for tn, tc := range testCases {
tc := tc
t.Run(tn, func(t *testing.T) {
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tc.resources...)
resolver := NewResolver(fakeClient, newRESTMapper(tc.mapperGVKs...), testPollInterval)
resolver.statusComputeFunc = status.Compute
var identifiers []ResourceIdentifier
for _, resource := range tc.resources {
gvk := resource.GetObjectKind().GroupVersionKind()
r := resource.(metav1.Object)
identifiers = append(identifiers, ResourceIdentifier{
Name: r.GetName(),
Namespace: r.GetNamespace(),
GroupKind: gvk.GroupKind(),
})
}
results := resolver.FetchAndResolve(context.TODO(), identifiers)
for i, res := range results {
id := identifiers[i]
expectedRes := tc.expectedResults[i]
rid := fmt.Sprintf("%s/%s", id.Namespace, id.Name)
if expectedRes.error {
if res.Error == nil {
t.Errorf("expected error for resource %s, but didn't get one", rid)
}
continue
}
if res.Error != nil {
t.Errorf("didn't expected error for resource %s, but got %v", rid, res.Error)
}
if got, want := res.Result.Status, expectedRes.status; got != want {
t.Errorf("expected status %s for resources %s, but got %s", want, rid, got)
}
}
})
}
}
func TestFetchAndResolveUnknownResource(t *testing.T) {
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme)
resolver := NewResolver(fakeClient, newRESTMapper(appsv1.SchemeGroupVersion.WithKind("Deployment")), testPollInterval)
results := resolver.FetchAndResolve(context.TODO(), []ResourceIdentifier{
{
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
Name: "myDeployment",
Namespace: "default",
},
})
if want, got := 1, len(results); want != got {
t.Errorf("expected %d results, but got %d", want, got)
}
res := results[0]
if want, got := status.CurrentStatus, res.Result.Status; got != want {
t.Errorf("expected status %s, but got %s", want, got)
}
if res.Error != nil {
t.Errorf("expected no error, but got %v", res.Error)
}
}
func TestFetchAndResolveWithFetchError(t *testing.T) {
expectedError := errors.New("failed to fetch resource")
resolver := NewResolver(
&fakeReader{
Err: expectedError,
},
newRESTMapper(appsv1.SchemeGroupVersion.WithKind("Deployment")),
testPollInterval,
)
results := resolver.FetchAndResolve(context.TODO(), []ResourceIdentifier{
{
GroupKind: schema.GroupKind{
Group: "apps",
Kind: "Deployment",
},
Name: "myDeployment",
Namespace: "default",
},
})
if want, got := 1, len(results); want != got {
t.Errorf("expected %d results, but got %d", want, got)
}
res := results[0]
if want, got := status.UnknownStatus, res.Result.Status; got != want {
t.Errorf("expected status %s, but got %s", want, got)
}
if want, got := expectedError, errors.Cause(res.Error); got != want {
t.Errorf("expected error %v, but got %v", want, got)
}
}
func TestFetchAndResolveComputeStatusError(t *testing.T) {
expectedError := errors.New("this is a test")
resource := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
Name: "myDeployment",
Namespace: "default",
},
}
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, resource)
resolver := NewResolver(fakeClient, newRESTMapper(appsv1.SchemeGroupVersion.WithKind("Deployment")), testPollInterval)
resolver.statusComputeFunc = func(u *unstructured.Unstructured) (*status.Result, error) {
return &status.Result{
Status: status.UnknownStatus,
Message: "Got an error",
}, expectedError
}
results := resolver.FetchAndResolve(context.TODO(), []ResourceIdentifier{
{
GroupKind: schema.GroupKind{
Group: resource.GroupVersionKind().Group,
Kind: resource.Kind,
},
Name: resource.GetName(),
Namespace: resource.GetNamespace(),
},
})
if want, got := 1, len(results); want != got {
t.Errorf("expected %d results, but got %d", want, got)
}
res := results[0]
if want, got := expectedError, res.Error; got != want {
t.Errorf("expected error %v, but got %v", want, got)
}
if want, got := status.UnknownStatus, res.Result.Status; got != want {
t.Errorf("expected status %s, but got %s", want, got)
}
}
type fakeReader struct {
Called int
Err error
}
func (f *fakeReader) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error {
f.Called += 1
return f.Err
}
func (f *fakeReader) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error {
return errors.New("list not used")
}
func TestWaitForStatus(t *testing.T) {
testCases := map[string]struct {
resources map[runtime.Object][]*status.Result
expectedResourceStatuses map[runtime.Object][]status.Status
expectedAggregateStatuses []status.Status
}{
"no resources": {
resources: map[runtime.Object][]*status.Result{},
expectedResourceStatuses: map[runtime.Object][]status.Status{},
expectedAggregateStatuses: []status.Status{
status.CurrentStatus,
},
},
"single resource": {
resources: map[runtime.Object][]*status.Result{
deploymentResource: {
{
Status: status.InProgressStatus,
Message: "FirstInProgress",
},
{
Status: status.InProgressStatus,
Message: "SecondInProgress",
},
{
Status: status.CurrentStatus,
Message: "CurrentProgress",
},
},
},
expectedResourceStatuses: map[runtime.Object][]status.Status{
deploymentResource: {
status.InProgressStatus,
status.InProgressStatus,
status.CurrentStatus,
},
},
expectedAggregateStatuses: []status.Status{
status.InProgressStatus,
status.InProgressStatus,
status.CurrentStatus,
status.CurrentStatus,
},
},
"multiple resource": {
resources: map[runtime.Object][]*status.Result{
statefulSetResource: {
{
Status: status.InProgressStatus,
Message: "FirstUnknown",
},
{
Status: status.InProgressStatus,
Message: "SecondInProgress",
},
{
Status: status.CurrentStatus,
Message: "CurrentProgress",
},
},
serviceResource: {
{
Status: status.CurrentStatus,
Message: "CurrentImmediately",
},
},
},
expectedResourceStatuses: map[runtime.Object][]status.Status{
statefulSetResource: {
status.InProgressStatus,
status.InProgressStatus,
status.CurrentStatus,
},
serviceResource: {
status.CurrentStatus,
},
},
expectedAggregateStatuses: []status.Status{
status.UnknownStatus,
status.InProgressStatus,
status.InProgressStatus,
status.CurrentStatus,
status.CurrentStatus,
},
},
}
for tn, tc := range testCases {
tc := tc
t.Run(tn, func(t *testing.T) {
var objs []runtime.Object
statusResults := make(map[ResourceIdentifier][]*status.Result)
var identifiers []ResourceIdentifier
for obj, statuses := range tc.resources {
objs = append(objs, obj)
identifier := resourceIdentifierFromRuntimeObject(obj)
identifiers = append(identifiers, identifier)
statusResults[identifier] = statuses
}
statusComputer := statusComputer{
results: statusResults,
resourceCallCount: make(map[ResourceIdentifier]int),
}
resolver := &Resolver{
client: fake.NewFakeClientWithScheme(scheme.Scheme, objs...),
mapper: newRESTMapper(
appsv1.SchemeGroupVersion.WithKind("Deployment"),
appsv1.SchemeGroupVersion.WithKind("StatefulSet"),
corev1.SchemeGroupVersion.WithKind("Service"),
),
statusComputeFunc: statusComputer.Compute,
pollInterval: testPollInterval,
}
eventChan := resolver.WaitForStatus(context.TODO(), identifiers)
var events []Event
timer := time.NewTimer(testTimeout)
loop:
for {
select {
case event, ok := <-eventChan:
if !ok {
break loop
}
events = append(events, event)
case <-timer.C:
t.Fatalf("timeout waiting for resources to reach current status")
}
}
var aggregateStatuses []status.Status
resourceStatuses := make(map[ResourceIdentifier][]status.Status)
for _, e := range events {
aggregateStatuses = append(aggregateStatuses, e.AggregateStatus)
if e.EventResource != nil {
identifier := e.EventResource.ResourceIdentifier
resourceStatuses[identifier] = append(resourceStatuses[identifier], e.EventResource.Status)
}
}
for resource, expectedStatuses := range tc.expectedResourceStatuses {
identifier := resourceIdentifierFromRuntimeObject(resource)
actualStatuses := resourceStatuses[identifier]
if !reflect.DeepEqual(expectedStatuses, actualStatuses) {
t.Errorf("expected statuses %v for resource %s/%s, but got %v", expectedStatuses, identifier.Namespace, identifier.Name, actualStatuses)
}
}
if !reflect.DeepEqual(tc.expectedAggregateStatuses, aggregateStatuses) {
t.Errorf("expected aggregate statuses %v, but got %v", tc.expectedAggregateStatuses, aggregateStatuses)
}
})
}
}
func TestWaitForStatusDeletedResources(t *testing.T) {
statusComputer := statusComputer{
results: make(map[ResourceIdentifier][]*status.Result),
resourceCallCount: make(map[ResourceIdentifier]int),
}
resolver := &Resolver{
client: fake.NewFakeClientWithScheme(scheme.Scheme),
mapper: newRESTMapper(
appsv1.SchemeGroupVersion.WithKind("Deployment"),
corev1.SchemeGroupVersion.WithKind("Service"),
),
statusComputeFunc: statusComputer.Compute,
pollInterval: testPollInterval,
}
depResourceIdentifier := resourceIdentifierFromRuntimeObject(deploymentResource)
serviceResourceIdentifier := resourceIdentifierFromRuntimeObject(serviceResource)
identifiers := []ResourceIdentifier{
depResourceIdentifier,
serviceResourceIdentifier,
}
eventChan := resolver.WaitForStatus(context.TODO(), identifiers)
var events []Event
timer := time.NewTimer(testTimeout)
loop:
for {
select {
case event, ok := <-eventChan:
if !ok {
break loop
}
events = append(events, event)
case <-timer.C:
t.Fatalf("timeout waiting for resources to reach current status")
}
}
expectedEvents := []struct {
aggregateStatus status.Status
hasResource bool
resourceStatus status.Status
}{
{
aggregateStatus: status.UnknownStatus,
hasResource: true,
resourceStatus: status.CurrentStatus,
},
{
aggregateStatus: status.CurrentStatus,
hasResource: true,
resourceStatus: status.CurrentStatus,
},
{
aggregateStatus: status.CurrentStatus,
hasResource: false,
},
}
if want, got := len(expectedEvents), len(events); got != want {
t.Errorf("expected %d events, but got %d", want, got)
}
for i, e := range events {
ee := expectedEvents[i]
if want, got := ee.aggregateStatus, e.AggregateStatus; got != want {
t.Errorf("expected event %d to be %s, but got %s", i, want, got)
}
if ee.hasResource {
if want, got := ee.resourceStatus, e.EventResource.Status; want != got {
t.Errorf("expected resource event %d to be %s, but got %s", i, want, got)
}
}
}
}
type statusComputer struct {
t *testing.T
results map[ResourceIdentifier][]*status.Result
resourceCallCount map[ResourceIdentifier]int
}
func (s *statusComputer) Compute(u *unstructured.Unstructured) (*status.Result, error) {
identifier := resourceIdentifierFromRuntimeObject(u)
resourceResults, ok := s.results[identifier]
if !ok {
s.t.Fatalf("No results available for resource %s/%s", u.GetNamespace(), u.GetName())
}
callCount := s.resourceCallCount[identifier]
var res *status.Result
if len(resourceResults) <= callCount {
res = resourceResults[len(resourceResults)-1]
} else {
res = resourceResults[callCount]
}
s.resourceCallCount[identifier] = callCount + 1
return res, nil
}
var deploymentResource = &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "myDeployment",
Namespace: "default",
},
}
var statefulSetResource = &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "StatefulSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "myStatefulSet",
Namespace: "default",
},
}
var serviceResource = &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: "myService",
Namespace: "default",
},
}
func newRESTMapper(gvks ...schema.GroupVersionKind) meta.RESTMapper {
var groupVersions []schema.GroupVersion
for _, gvk := range gvks {
groupVersions = append(groupVersions, gvk.GroupVersion())
}
mapper := meta.NewDefaultRESTMapper(groupVersions)
for _, gvk := range gvks {
mapper.Add(gvk, meta.RESTScopeNamespace)
}
return mapper
}

View File

@ -0,0 +1,172 @@
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package wait
import (
"fmt"
"reflect"
"github.com/pkg/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
)
// waitState keeps the state about the resources and their last
// observed state. This is used to determine any changes in state
// so events can be sent when needed.
type waitState struct {
// ResourceWaitStates contains wait state for each of the resources.
ResourceWaitStates map[ResourceIdentifier]*resourceWaitState
// statusComputeFunc defines the function used to compute the state of
// a single resource. This is available for testing purposes.
statusComputeFunc func(u *unstructured.Unstructured) (*status.Result, error)
}
// resourceWaitState contains state information about an individual resource.
type resourceWaitState struct {
FirstSeenGeneration *int64
HasBeenCurrent bool
Observed bool
LastEvent *EventResource
}
// newWaitState creates a new waitState object and initializes it with the
// provided slice of resources and the provided statusComputeFunc.
func newWaitState(resourceIDs []ResourceIdentifier, statusComputeFunc func(u *unstructured.Unstructured) (*status.Result, error)) *waitState {
resourceWaitStates := make(map[ResourceIdentifier]*resourceWaitState)
for _, resourceID := range resourceIDs {
resourceWaitStates[resourceID] = &resourceWaitState{}
}
return &waitState{
ResourceWaitStates: resourceWaitStates,
statusComputeFunc: statusComputeFunc,
}
}
// AggregateStatus computes the aggregate status for all the resources.
// TODO: Ideally we would like this to be pluggable for different strategies.
func (w *waitState) AggregateStatus() status.Status {
allCurrent := true
for _, rws := range w.ResourceWaitStates {
if !rws.Observed {
return status.UnknownStatus
}
if !rws.HasBeenCurrent {
allCurrent = false
}
}
if allCurrent {
return status.CurrentStatus
}
return status.InProgressStatus
}
// ResourceObserved notifies the waitState that we have new state for
// a resource. This also accepts an error in case fetching the resource
// from a cluster failed. It returns an EventResource object that contains
// information about the observed resource, including the identifier and
// the latest status for the resource. The function also returns a bool value
// that will be true if the status of the observed resource has changed
// since the previous observation and false it not. This is used to determine
// whether a new event should be sent based on this observation.
func (w *waitState) ResourceObserved(resourceID ResourceIdentifier, resource *unstructured.Unstructured, err error) (EventResource, bool) {
// Check for nil is not needed here as the id passed in comes
// from iterating over the keys of the map.
rws := w.ResourceWaitStates[resourceID]
eventResource := w.getEventResource(resourceID, resource, err)
// If the new eventResource is identical to the previous one, we return
// with the last return value indicating this is not a new event.
if rws.LastEvent != nil && reflect.DeepEqual(eventResource, *rws.LastEvent) {
return eventResource, false
}
rws.LastEvent = &eventResource
return eventResource, true
}
// getEventResource creates a new EventResource for the resource identified by
// the provided resourceKey. The EventResource contains information about the
// latest status for the given resource, so it computes status for the resource
// as well as check for deletion.
func (w *waitState) getEventResource(resourceID ResourceIdentifier, resource *unstructured.Unstructured, err error) EventResource {
// Get the resourceWaitState for this resource. It contains information
// of the previous observed statuses. We don't need to check for nil here
// as the identifier comes from iterating over the keys of the
// ResourceWaitState map.
r := w.ResourceWaitStates[resourceID]
// If fetching the resource from the cluster failed, we don't really
// know anything about the status of the resource, so simply
// report the status as Unknown.
if err != nil && !k8serrors.IsNotFound(errors.Cause(err)) {
return EventResource{
ResourceIdentifier: resourceID,
Status: status.UnknownStatus,
Message: fmt.Sprintf("Error: %s", err),
Error: err,
}
}
// If we get here, we have successfully fetched the resource from
// the cluster, or discovered that it doesn't exist.
r.Observed = true
// We treat a non-existent resource as Current. This is to properly
// handle deletion scenarios.
if k8serrors.IsNotFound(errors.Cause(err)) {
r.HasBeenCurrent = true
return EventResource{
ResourceIdentifier: resourceID,
Status: status.CurrentStatus,
Message: fmt.Sprintf("Resource has been deleted"),
}
}
// We want to capture the first seen generation of the resource. This
// allows us to discover if a resource is updated while we are waiting
// for it to become Current.
if r.FirstSeenGeneration != nil {
gen := resource.GetGeneration()
r.FirstSeenGeneration = &gen
}
if resource.GetDeletionTimestamp() != nil {
return EventResource{
ResourceIdentifier: resourceID,
Status: status.TerminatingStatus,
Message: fmt.Sprintf("Resource is terminating"),
}
}
statusResult, err := w.statusComputeFunc(resource)
// If we can't compute status for the resource, we report the status
// as Unknown.
if err != nil {
return EventResource{
ResourceIdentifier: resourceID,
Status: status.UnknownStatus,
Message: fmt.Sprintf("Error: %s", err),
Error: err,
}
}
// We record whether a resource has ever been Current. This makes
// sure we can report a set of resources as being Current if all
// of them has reached the Current status at some point, but not
// necessarily at the same time.
if statusResult.Status == status.CurrentStatus {
r.HasBeenCurrent = true
}
return EventResource{
ResourceIdentifier: resourceID,
Status: statusResult.Status,
Message: statusResult.Message,
}
}