mirror of https://github.com/knative/pkg.git
use more general kubernetes.Interface (#2135)
This commit is contained in:
parent
47dfdcfaed
commit
803d2ba3ba
|
@ -54,7 +54,7 @@ and tests can pass in `t.Logf` like this:
|
|||
|
||||
```go
|
||||
_, err = pkgTest.WaitForEndpointState(
|
||||
clients.KubeClient,
|
||||
kubeClient,
|
||||
t.Logf,
|
||||
...),
|
||||
```
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"knative.dev/pkg/test"
|
||||
"knative.dev/pkg/test/logging"
|
||||
)
|
||||
|
||||
|
@ -50,7 +50,7 @@ func extractDeployment(pod string) string {
|
|||
|
||||
// GetLeaders collects all of the leader pods from the specified deployment.
|
||||
// GetLeaders will return duplicate pods by design.
|
||||
func GetLeaders(ctx context.Context, t *testing.T, client *test.KubeClient, deploymentName, namespace string) ([]string, error) {
|
||||
func GetLeaders(ctx context.Context, t *testing.T, client kubernetes.Interface, deploymentName, namespace string) ([]string, error) {
|
||||
leases, err := client.CoordinationV1().Leases(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting leases for deployment %q: %w", deploymentName, err)
|
||||
|
@ -73,7 +73,7 @@ func GetLeaders(ctx context.Context, t *testing.T, client *test.KubeClient, depl
|
|||
|
||||
// WaitForNewLeaders waits until the collection of current leaders consists of "n" leaders
|
||||
// which do not include the specified prior leaders.
|
||||
func WaitForNewLeaders(ctx context.Context, t *testing.T, client *test.KubeClient, deploymentName, namespace string, previousLeaders sets.String, n int) (sets.String, error) {
|
||||
func WaitForNewLeaders(ctx context.Context, t *testing.T, client kubernetes.Interface, deploymentName, namespace string, previousLeaders sets.String, n int) (sets.String, error) {
|
||||
span := logging.GetEmitableSpan(ctx, "WaitForNewLeaders/"+deploymentName)
|
||||
defer span.End()
|
||||
|
||||
|
@ -100,7 +100,7 @@ func WaitForNewLeaders(ctx context.Context, t *testing.T, client *test.KubeClien
|
|||
|
||||
// WaitForNewLeader waits until the holder of the given lease is different from the previousLeader.
|
||||
// DEPRECATED: Use WaitForNewLeaders.
|
||||
func WaitForNewLeader(ctx context.Context, client *test.KubeClient, lease, namespace, previousLeader string) (string, error) {
|
||||
func WaitForNewLeader(ctx context.Context, client kubernetes.Interface, lease, namespace, previousLeader string) (string, error) {
|
||||
span := logging.GetEmitableSpan(ctx, "WaitForNewLeader/"+lease)
|
||||
defer span.End()
|
||||
var leader string
|
||||
|
|
|
@ -207,11 +207,11 @@ func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) {
|
|||
|
||||
// WaitForLogContent waits until logs for given Pod/Container include the given content.
|
||||
// If the content is not present within timeout it returns error.
|
||||
func WaitForLogContent(ctx context.Context, client *KubeClient, podName, containerName, namespace, content string) error {
|
||||
func WaitForLogContent(ctx context.Context, client kubernetes.Interface, podName, containerName, namespace, content string) error {
|
||||
var logs []byte
|
||||
waitErr := wait.PollImmediate(interval, logTimeout, func() (bool, error) {
|
||||
var err error
|
||||
logs, err = client.PodLogs(ctx, podName, containerName, namespace)
|
||||
logs, err = PodLogs(ctx, client, podName, containerName, namespace)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue