mirror of https://github.com/linkerd/linkerd2.git
				
				
				
			multicluster: Delete headless services and endpoints when not exported (#8755)
Closes #8475 When a headless service that was exported no longer is, we currently continue to mirror it and its endpoints. The bug is as follows 1. The service is updated (say with `mirror.linkerd.io/exported: false`) and `createOrUpdateService` is called 2. The service is _not_ exported, so we queue a `RemoteServiceDeleted` 3. The `RemoteServiceDeleted` is processed and the service and its mirror endpoints are deleted 4. The service's endpoints are updated (on the remote cluser with the new label) and `handleCreateOrUpdateEndpoints` is called 5. The endpoints belong to a headless service so `createOrUpdateHeadlessEndpoints` is called 6. `createOrUpdateHeadlessEndpoints` recreates the just-deleted service and endpoints We should stop processing the service update after `3`; the service and it's endpionts have been properly deleted. To fix this, we should avoid calling `handleCreateOrUpdateEndpoints` when the new endpoints are not exported. Currently we only avoid calling this function when both the old _and_ new endpoints are not exported. In the case of endpoints going from being exported to be not being exported, we don't really care about the old set's status. Therefore, to fix this bug we remove that check and avoid calling `handleCreateOrUpdateEndpoints` when the new set is no longer exported. I've tested this to make sure the issue is fixed. Testing can be done manually—which is what I did—or as explained [here](https://github.com/linkerd/linkerd2/pull/8734#pullrequestreview-1018622066). Signed-off-by: Kevin Leimkuhler <kleimkuhler@icloud.com>
This commit is contained in:
		
							parent
							
								
									eadfcaa931
								
							
						
					
					
						commit
						8921c89a5f
					
				|  | @ -836,28 +836,19 @@ func (rcsw *RemoteClusterServiceWatcher) Start(ctx context.Context) error { | |||
| 				rcsw.eventsQueue.Add(&OnAddEndpointsCalled{obj.(*corev1.Endpoints)}) | ||||
| 			}, | ||||
| 			// AddFunc relevant for all kind of exported endpoints
 | ||||
| 			UpdateFunc: func(old, new interface{}) { | ||||
| 			UpdateFunc: func(_, new interface{}) { | ||||
| 				if new.(metav1.Object).GetNamespace() == kubeSystem { | ||||
| 					return | ||||
| 				} | ||||
| 
 | ||||
| 				epOld, ok := old.(*corev1.Endpoints) | ||||
| 				if !ok { | ||||
| 					rcsw.log.Errorf("error processing endpoints object: got %#v, expected *corev1.Endpoints", epOld) | ||||
| 					return | ||||
| 				} | ||||
| 
 | ||||
| 				epNew, ok := new.(*corev1.Endpoints) | ||||
| 				if !ok { | ||||
| 					rcsw.log.Errorf("error processing endpoints object: got %#v, expected *corev1.Endpoints", epNew) | ||||
| 					return | ||||
| 				} | ||||
| 
 | ||||
| 				if !rcsw.isExported(epOld.Labels) && !rcsw.isExported(epNew.Labels) { | ||||
| 				if !rcsw.isExported(epNew.Labels) { | ||||
| 					rcsw.log.Debugf("skipped processing endpoints object %s/%s: missing %s label", epNew.Namespace, epNew.Name, consts.DefaultExportedServiceSelector) | ||||
| 					return | ||||
| 				} | ||||
| 
 | ||||
| 				rcsw.eventsQueue.Add(&OnUpdateEndpointsCalled{epNew}) | ||||
| 			}, | ||||
| 		}, | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue