Merge pull request #3989 from brett-elliott/useragent

Set cluster autoscaler-specific user agent.
This commit is contained in:
Kubernetes Prow Robot 2021-04-09 05:49:05 -07:00 committed by GitHub
commit f4c4a77940
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 109 additions and 79 deletions

View File

@ -85,11 +85,12 @@ type autoscalingGceClientV1 struct {
}
// NewAutoscalingGceClientV1 creates a new client for communicating with GCE v1 API.
func NewAutoscalingGceClientV1(client *http.Client, projectId string) (*autoscalingGceClientV1, error) {
func NewAutoscalingGceClientV1(client *http.Client, projectId string, userAgent string) (*autoscalingGceClientV1, error) {
gceService, err := gce.New(client)
if err != nil {
return nil, err
}
gceService.UserAgent = userAgent
return &autoscalingGceClientV1{
projectId: projectId,

View File

@ -30,9 +30,9 @@ import (
gce_api "google.golang.org/api/compute/v1"
)
func newTestAutoscalingGceClient(t *testing.T, projectId, url string) *autoscalingGceClientV1 {
func newTestAutoscalingGceClient(t *testing.T, projectId, url, userAgent string) *autoscalingGceClientV1 {
client := &http.Client{}
gceClient, err := NewAutoscalingGceClientV1(client, projectId)
gceClient, err := NewAutoscalingGceClientV1(client, projectId, userAgent)
if !assert.NoError(t, err) {
t.Fatalf("fatal error: %v", err)
}
@ -65,7 +65,7 @@ const operationDoneResponse = `{
func TestWaitForOp(t *testing.T) {
server := test_util.NewHttpServerMock()
defer server.Close()
g := newTestAutoscalingGceClient(t, "project1", server.URL)
g := newTestAutoscalingGceClient(t, "project1", server.URL, "")
g.operationPollInterval = 1 * time.Millisecond
g.operationWaitTimeout = 500 * time.Millisecond
@ -83,7 +83,7 @@ func TestWaitForOp(t *testing.T) {
func TestWaitForOpTimeout(t *testing.T) {
server := test_util.NewHttpServerMock()
defer server.Close()
g := newTestAutoscalingGceClient(t, "project1", server.URL)
g := newTestAutoscalingGceClient(t, "project1", server.URL, "")
// The values here are higher than in other tests since we're aiming for timeout.
// Lower values make this fragile and flakey.
@ -104,7 +104,7 @@ func TestErrors(t *testing.T) {
const instanceUrl = "https://content.googleapis.com/compute/v1/projects/myprojid/zones/myzone/instances/myinst"
server := test_util.NewHttpServerMock()
defer server.Close()
g := newTestAutoscalingGceClient(t, "project1", server.URL)
g := newTestAutoscalingGceClient(t, "project1", server.URL, "")
testCases := []struct {
errorCodes []string
@ -162,3 +162,18 @@ func TestErrors(t *testing.T) {
}
mock.AssertExpectationsForObjects(t, server)
}
func TestUserAgent(t *testing.T) {
server := test_util.NewHttpServerMock(test_util.MockFieldUserAgent, test_util.MockFieldResponse)
defer server.Close()
g := newTestAutoscalingGceClient(t, "project1", server.URL, "testuseragent")
g.operationPollInterval = 10 * time.Millisecond
g.operationWaitTimeout = 49 * time.Millisecond
server.On("handle", "/project1/zones/us-central1-b/operations/operation-1505728466148-d16f5197").Return("testuseragent", operationRunningResponse).Maybe()
operation := &gce_api.Operation{Name: "operation-1505728466148-d16f5197"}
g.waitForOp(operation, projectId, zoneB, false)
}

View File

@ -356,7 +356,7 @@ func BuildGCE(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscover
defer config.Close()
}
manager, err := CreateGceManager(config, do, opts.Regional, opts.ConcurrentGceRefreshes)
manager, err := CreateGceManager(config, do, opts.Regional, opts.ConcurrentGceRefreshes, opts.UserAgent)
if err != nil {
klog.Fatalf("Failed to create GCE Manager: %v", err)
}

View File

@ -117,7 +117,7 @@ type gceManagerImpl struct {
}
// CreateGceManager constructs GceManager object.
func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions, regional bool, concurrentGceRefreshes int) (GceManager, error) {
func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions, regional bool, concurrentGceRefreshes int, userAgent string) (GceManager, error) {
// Create Google Compute Engine token.
var err error
tokenSource := google.ComputeTokenSource("")
@ -167,7 +167,7 @@ func CreateGceManager(configReader io.Reader, discoveryOpts cloudprovider.NodeGr
// Create Google Compute Engine service.
client := oauth2.NewClient(oauth2.NoContext, tokenSource)
client.Timeout = httpTimeout
gceService, err := NewAutoscalingGceClientV1(client, projectId)
gceService, err := NewAutoscalingGceClientV1(client, projectId, userAgent)
if err != nil {
return nil, err
}

View File

@ -325,7 +325,7 @@ func buildListInstanceGroupManagersResponse(listInstanceGroupManagerResponsePart
}
func newTestGceManager(t *testing.T, testServerURL string, regional bool) *gceManagerImpl {
gceService := newTestAutoscalingGceClient(t, projectId, testServerURL)
gceService := newTestAutoscalingGceClient(t, projectId, testServerURL, "")
// Override wait for op timeouts.
gceService.operationWaitTimeout = 50 * time.Millisecond

View File

@ -53,7 +53,7 @@ const listLKEClusterPoolsResponse4 = `
`
func TestApiClientRest_CreateLKEClusterPool(t *testing.T) {
server := NewHttpServerMockWithContentType()
server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
defer server.Close()
client := NewClient(&http.Client{})
@ -67,7 +67,7 @@ func TestApiClientRest_CreateLKEClusterPool(t *testing.T) {
Disks: []LKEClusterPoolDisk{},
}
requestPath := "/lke/clusters/" + strconv.Itoa(clusterID) + "/pools"
server.On("handleWithContentType", requestPath).Return("application/json", createLKEClusterPoolResponse1).Once()
server.On("handle", requestPath).Return("application/json", createLKEClusterPoolResponse1).Once()
pool, err := client.CreateLKEClusterPool(ctx, clusterID, createOpts)
assert.NoError(t, err)
@ -79,7 +79,7 @@ func TestApiClientRest_CreateLKEClusterPool(t *testing.T) {
}
func TestApiClientRest_DeleteLKEClusterPool(t *testing.T) {
server := NewHttpServerMockWithContentType()
server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
defer server.Close()
client := NewClient(&http.Client{})
@ -89,7 +89,7 @@ func TestApiClientRest_DeleteLKEClusterPool(t *testing.T) {
poolID := 222
ctx := context.Background()
requestPath := "/lke/clusters/" + strconv.Itoa(clusterID) + "/pools/" + strconv.Itoa(poolID)
server.On("handleWithContentType", requestPath).Return("application/json", deleteLKEClusterPoolResponse1).Once()
server.On("handle", requestPath).Return("application/json", deleteLKEClusterPoolResponse1).Once()
err := client.DeleteLKEClusterPool(ctx, clusterID, poolID)
assert.NoError(t, err)
@ -97,7 +97,7 @@ func TestApiClientRest_DeleteLKEClusterPool(t *testing.T) {
}
func TestApiClientRest_ListLKEClusterPools(t *testing.T) {
server := NewHttpServerMockWithContentType()
server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
defer server.Close()
client := NewClient(&http.Client{})
@ -106,7 +106,7 @@ func TestApiClientRest_ListLKEClusterPools(t *testing.T) {
clusterID := 16293
ctx := context.Background()
requestPath := "/lke/clusters/" + strconv.Itoa(clusterID) + "/pools"
server.On("handleWithContentType", requestPath).Return("application/json", listLKEClusterPoolsResponse1).Once().On("handleWithContentType", requestPath).Return("application/json", listLKEClusterPoolsResponse2).Once().On("handleWithContentType", requestPath).Return("application/json", listLKEClusterPoolsResponse3).Once().On("handleWithContentType", requestPath).Return("application/json", listLKEClusterPoolsResponse4).Once()
server.On("handle", requestPath).Return("application/json", listLKEClusterPoolsResponse1).Once().On("handle", requestPath).Return("application/json", listLKEClusterPoolsResponse2).Once().On("handle", requestPath).Return("application/json", listLKEClusterPoolsResponse3).Once().On("handle", requestPath).Return("application/json", listLKEClusterPoolsResponse4).Once()
pools, err := client.ListLKEClusterPools(ctx, clusterID, nil)
assert.NoError(t, err)

View File

@ -77,7 +77,7 @@ func newTestPacketManagerRest(t *testing.T, url string) *packetManagerRest {
}
func TestListPacketDevices(t *testing.T) {
var m *packetManagerRest
server := NewHttpServerMockWithContentType()
server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
defer server.Close()
if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 {
// If auth token set in env, hit the actual Packet API
@ -87,7 +87,7 @@ func TestListPacketDevices(t *testing.T) {
m = newTestPacketManagerRest(t, server.URL)
t.Logf("server URL: %v", server.URL)
t.Logf("default packetManagerNodePool baseURL: %v", m.packetManagerNodePools["default"].baseURL)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(2)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(2)
}
_, err := m.listPacketDevices(context.TODO())

View File

@ -37,7 +37,7 @@ const deletePacketDeviceResponsePool3 = ``
func TestIncreaseDecreaseSize(t *testing.T) {
var m *packetManagerRest
server := NewHttpServerMockWithContentType()
server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
defer server.Close()
assert.Equal(t, true, true)
if len(os.Getenv("PACKET_AUTH_TOKEN")) > 0 {
@ -46,15 +46,15 @@ func TestIncreaseDecreaseSize(t *testing.T) {
} else {
// Set up a mock Packet API
m = newTestPacketManagerRest(t, server.URL)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool3).Times(1)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(2)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool2).Times(1)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool2).Times(3)
server.On("handleWithContentType", "/devices/0f5609af-1c27-451b-8edd-a1283f2c9440").Return("application/json", deletePacketDeviceResponsePool2).Times(1)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(3)
server.On("handleWithContentType", "/devices/8fa90049-e715-4794-ba31-81c1c78cee84").Return("application/json", deletePacketDeviceResponsePool3).Times(1)
server.On("handleWithContentType", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool3).Times(1)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(2)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", createPacketDeviceResponsePool2).Times(1)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool2).Times(3)
server.On("handle", "/devices/0f5609af-1c27-451b-8edd-a1283f2c9440").Return("application/json", deletePacketDeviceResponsePool2).Times(1)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponseAfterIncreasePool3).Times(3)
server.On("handle", "/devices/8fa90049-e715-4794-ba31-81c1c78cee84").Return("application/json", deletePacketDeviceResponsePool3).Times(1)
server.On("handle", "/projects/"+m.packetManagerNodePools["default"].projectID+"/devices").Return("application/json", listPacketDevicesResponse).Times(3)
}
clusterUpdateLock := sync.Mutex{}
ngPool2 := &packetNodeGroup{

View File

@ -161,4 +161,6 @@ type AutoscalingOptions struct {
CordonNodeBeforeTerminate bool
// DaemonSetEvictionForEmptyNodes is whether CA will gracefully terminate DaemonSet pods from empty nodes.
DaemonSetEvictionForEmptyNodes bool
// User agent to use for HTTP calls.
UserAgent string
}

View File

@ -178,6 +178,7 @@ var (
clusterAPICloudConfigAuthoritative = flag.Bool("clusterapi-cloud-config-authoritative", false, "Treat the cloud-config flag authoritatively (do not fallback to using kubeconfig flag). ClusterAPI only")
cordonNodeBeforeTerminate = flag.Bool("cordon-node-before-terminating", false, "Should CA cordon nodes before terminating during downscale process")
daemonSetEvictionForEmptyNodes = flag.Bool("daemonset-eviction-for-empty-nodes", false, "DaemonSet pods will be gracefully terminated from empty nodes")
userAgent = flag.String("user-agent", "cluster-autoscaler", "User agent used for HTTP calls.")
)
func createAutoscalingOptions() config.AutoscalingOptions {
@ -253,6 +254,7 @@ func createAutoscalingOptions() config.AutoscalingOptions {
ClusterAPICloudConfigAuthoritative: *clusterAPICloudConfigAuthoritative,
CordonNodeBeforeTerminate: *cordonNodeBeforeTerminate,
DaemonSetEvictionForEmptyNodes: *daemonSetEvictionForEmptyNodes,
UserAgent: *userAgent,
}
}

View File

@ -18,17 +18,16 @@ package test
import (
"fmt"
"time"
"net/http"
"net/http/httptest"
"strings"
"time"
"github.com/stretchr/testify/mock"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/stretchr/testify/mock"
)
// BuildTestPod creates a pod with specified resources.
@ -230,18 +229,53 @@ func boolptr(val bool) *bool {
// instances, err := g.GetManagedInstances()
// // Check if expected calls were executed.
// mock.AssertExpectationsForObjects(t, server)
//
// Note: to provide a content type, you may pass in the desired
// fields:
// server := NewHttpServerMock(MockFieldContentType, MockFieldResponse)
// ...
// server.On("handle", "/project1/zones/us-central1-b/listManagedInstances").Return("<content type>", "<response>").Once()
// The order of the return objects must match that of the HttpServerMockField constants passed to NewHttpServerMock()
type HttpServerMock struct {
mock.Mock
*httptest.Server
fields []HttpServerMockField
}
// HttpServerMockField specifies a type of field.
type HttpServerMockField int
const (
// MockFieldResponse represents a string response.
MockFieldResponse HttpServerMockField = iota
// MockFieldStatusCode represents an integer HTTP response code.
MockFieldStatusCode
// MockFieldContentType represents a string content type.
MockFieldContentType
// MockFieldUserAgent represents a string user agent.
MockFieldUserAgent
)
// NewHttpServerMock creates new HttpServerMock.
func NewHttpServerMock() *HttpServerMock {
httpServerMock := &HttpServerMock{}
func NewHttpServerMock(fields ...HttpServerMockField) *HttpServerMock {
if len(fields) == 0 {
fields = []HttpServerMockField{MockFieldResponse}
}
foundResponse := false
for _, field := range fields {
if field == MockFieldResponse {
foundResponse = true
break
}
}
if !foundResponse {
panic("Must use MockFieldResponse.")
}
httpServerMock := &HttpServerMock{fields: fields}
mux := http.NewServeMux()
mux.HandleFunc("/",
func(w http.ResponseWriter, req *http.Request) {
result := httpServerMock.handle(req.URL.Path)
result := httpServerMock.handle(req, w, httpServerMock)
_, _ = w.Write([]byte(result))
})
@ -250,49 +284,25 @@ func NewHttpServerMock() *HttpServerMock {
return httpServerMock
}
func (l *HttpServerMock) handle(url string) string {
func (l *HttpServerMock) handle(req *http.Request, w http.ResponseWriter, serverMock *HttpServerMock) string {
url := req.URL.Path
var response string
args := l.Called(url)
return args.String(0)
}
// NewHttpServerMockWithStatusCode creates new HttpServerMock.
func NewHttpServerMockWithStatusCode() *HttpServerMock {
httpServerMock := &HttpServerMock{}
mux := http.NewServeMux()
mux.HandleFunc("/",
func(w http.ResponseWriter, req *http.Request) {
code, result := httpServerMock.handleWithStatusCode(req.URL.Path)
w.WriteHeader(code)
_, _ = w.Write([]byte(result))
})
server := httptest.NewServer(mux)
httpServerMock.Server = server
return httpServerMock
}
func (l *HttpServerMock) handleWithStatusCode(url string) (int, string) {
args := l.Called(url)
return args.Int(0), args.String(1)
}
// NewHttpServerMockWithContentType creates new HttpServerMock.
func NewHttpServerMockWithContentType() *HttpServerMock {
httpServerMock := &HttpServerMock{}
mux := http.NewServeMux()
mux.HandleFunc("/",
func(w http.ResponseWriter, req *http.Request) {
contentType, result := httpServerMock.handleWithContentType(req.URL.Path)
w.Header().Set("Content-Type", contentType)
_, _ = w.Write([]byte(result))
})
server := httptest.NewServer(mux)
httpServerMock.Server = server
return httpServerMock
}
func (l *HttpServerMock) handleWithContentType(url string) (string, string) {
args := l.Called(url)
return args.String(0), args.String(1)
for i, field := range l.fields {
switch field {
case MockFieldResponse:
response = args.String(i)
case MockFieldContentType:
w.Header().Set("Content-Type", args.String(i))
case MockFieldStatusCode:
w.WriteHeader(args.Int(i))
case MockFieldUserAgent:
gotUserAgent := req.UserAgent()
expectedUserAgent := args.String(i)
if !strings.Contains(gotUserAgent, expectedUserAgent) {
panic(fmt.Sprintf("Error handling URL %s, expected user agent %s but got %s.", url, expectedUserAgent, gotUserAgent))
}
}
}
return response
}