Merge pull request #285 from mwielgus/loglevel
Set verbosity for each of the glog.Info logs
This commit is contained in:
		
						commit
						c0b48e4a15
					
				| 
						 | 
				
			
			@ -103,7 +103,7 @@ func main() {
 | 
			
		|||
	for _, p := range partitions {
 | 
			
		||||
		for _, r := range p.Regions() {
 | 
			
		||||
			url := "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/" + r.ID() + "/index.json"
 | 
			
		||||
			glog.Infof("fetching %s\n", url)
 | 
			
		||||
			glog.V(1).Infof("fetching %s\n", url)
 | 
			
		||||
			res, err := http.Get(url)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				glog.Warningf("Error fetching %s skipping...\n", url)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -107,7 +107,7 @@ func (b CloudProviderBuilder) Build(discoveryOpts cloudprovider.NodeGroupDiscove
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if b.cloudProviderFlag == kubemark.ProviderName {
 | 
			
		||||
		glog.Infof("Building kubemark cloud provider.")
 | 
			
		||||
		glog.V(1).Infof("Building kubemark cloud provider.")
 | 
			
		||||
		externalConfig, err := rest.InClusterConfig()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			glog.Fatalf("Failed to get kubeclient config for external cluster: %v", err)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -96,11 +96,11 @@ func CreateGceManager(configReader io.Reader, mode GcpCloudProviderMode, cluster
 | 
			
		|||
		if cfg.Global.TokenURL == "" {
 | 
			
		||||
			glog.Warning("Empty tokenUrl in cloud config")
 | 
			
		||||
		} else {
 | 
			
		||||
			glog.Infof("Using TokenSource from config %#v", tokenSource)
 | 
			
		||||
			glog.V(1).Infof("Using TokenSource from config %#v", tokenSource)
 | 
			
		||||
			tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		glog.Infof("Using default TokenSource %#v", tokenSource)
 | 
			
		||||
		glog.V(1).Infof("Using default TokenSource %#v", tokenSource)
 | 
			
		||||
	}
 | 
			
		||||
	projectId, zone, err := getProjectAndZone()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -364,7 +364,7 @@ func (sd *ScaleDown) TryToScaleDown(nodes []*apiv1.Node, pods []*apiv1.Pod, pdbs
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(candidates) == 0 {
 | 
			
		||||
		glog.Infof("No candidates for scale down")
 | 
			
		||||
		glog.V(1).Infof("No candidates for scale down")
 | 
			
		||||
		return ScaleDownNoUnneeded, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -153,7 +153,7 @@ func createAutoscalerOptions() core.AutoscalerOptions {
 | 
			
		|||
 | 
			
		||||
func createKubeClient() kube_client.Interface {
 | 
			
		||||
	if *kubeConfigFile != "" {
 | 
			
		||||
		glog.Infof("Using kubeconfig file: %s", *kubeConfigFile)
 | 
			
		||||
		glog.V(1).Infof("Using kubeconfig file: %s", *kubeConfigFile)
 | 
			
		||||
		// use the current context in kubeconfig
 | 
			
		||||
		config, err := clientcmd.BuildConfigFromFlags("", *kubeConfigFile)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
| 
						 | 
				
			
			@ -181,13 +181,13 @@ func createKubeClient() kube_client.Interface {
 | 
			
		|||
func registerSignalHandlers(autoscaler core.Autoscaler) {
 | 
			
		||||
	sigs := make(chan os.Signal, 1)
 | 
			
		||||
	signal.Notify(sigs, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGQUIT)
 | 
			
		||||
	glog.Info("Registered cleanup signal handler")
 | 
			
		||||
	glog.V(1).Info("Registered cleanup signal handler")
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
		<-sigs
 | 
			
		||||
		glog.Info("Received signal, attempting cleanup")
 | 
			
		||||
		glog.V(1).Info("Received signal, attempting cleanup")
 | 
			
		||||
		autoscaler.ExitCleanUp()
 | 
			
		||||
		glog.Info("Cleaned up, exiting...")
 | 
			
		||||
		glog.V(1).Info("Cleaned up, exiting...")
 | 
			
		||||
		glog.Flush()
 | 
			
		||||
		os.Exit(0)
 | 
			
		||||
	}()
 | 
			
		||||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ func main() {
 | 
			
		|||
 | 
			
		||||
	healthCheck := metrics.NewHealthCheck(*maxInactivityTimeFlag, *maxFailingTimeFlag)
 | 
			
		||||
 | 
			
		||||
	glog.Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion)
 | 
			
		||||
	glog.V(1).Infof("Cluster Autoscaler %s", ClusterAutoscalerVersion)
 | 
			
		||||
 | 
			
		||||
	correctEstimator := false
 | 
			
		||||
	for _, availableEstimator := range estimator.AvailableEstimators {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,7 +181,7 @@ func UpdateDuration(label FunctionLabel, duration time.Duration) {
 | 
			
		|||
	// TODO(maciekpytel): remove second condition if we manage to get
 | 
			
		||||
	// asynchronous node drain
 | 
			
		||||
	if duration > LogLongDurationThreshold && label != ScaleDown {
 | 
			
		||||
		glog.Infof("Function %s took %v to complete", label, duration)
 | 
			
		||||
		glog.V(4).Infof("Function %s took %v to complete", label, duration)
 | 
			
		||||
	}
 | 
			
		||||
	functionDuration.WithLabelValues(string(label)).Observe(duration.Seconds())
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,7 +57,7 @@ func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
 | 
			
		|||
func addToBeDeletedTaint(node *apiv1.Node) (bool, error) {
 | 
			
		||||
	for _, taint := range node.Spec.Taints {
 | 
			
		||||
		if taint.Key == ToBeDeletedTaint {
 | 
			
		||||
			glog.Infof("ToBeDeletedTaint already present on on node %v", taint, node.Name)
 | 
			
		||||
			glog.V(2).Infof("ToBeDeletedTaint already present on on node %v", taint, node.Name)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,7 +30,7 @@ import (
 | 
			
		|||
// CreateEventRecorder creates an event recorder to send custom events to Kubernetes to be recorded for targeted Kubernetes objects
 | 
			
		||||
func CreateEventRecorder(kubeClient clientset.Interface) kube_record.EventRecorder {
 | 
			
		||||
	eventBroadcaster := kube_record.NewBroadcaster()
 | 
			
		||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
			
		||||
	eventBroadcaster.StartLogging(glog.V(4).Infof)
 | 
			
		||||
	if _, isfake := kubeClient.(*fake.Clientset); !isfake {
 | 
			
		||||
		eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -82,7 +82,7 @@ func BalanceScaleUpBetweenGroups(groups []cloudprovider.NodeGroup, newNodes int)
 | 
			
		|||
		totalCapacity += maxSize - currentSize
 | 
			
		||||
	}
 | 
			
		||||
	if totalCapacity < newNodes {
 | 
			
		||||
		glog.Infof("Requested scale-up (%v) exceeds node group set capacity, capping to %v", newNodes, totalCapacity)
 | 
			
		||||
		glog.V(2).Infof("Requested scale-up (%v) exceeds node group set capacity, capping to %v", newNodes, totalCapacity)
 | 
			
		||||
		newNodes = totalCapacity
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue