Merge pull request #666 from salasberryfin/workflows-bump-golangci-lint
chore: bump golangci-lint-action to v8.0.0
This commit is contained in:
commit
8c30949d43
|
|
@ -22,9 +22,9 @@ jobs:
|
|||
with:
|
||||
go-version: 1.23.0
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v7.0.0
|
||||
uses: golangci/golangci-lint-action@v8.0.0
|
||||
with:
|
||||
version: v2.0.2
|
||||
version: v2.1.0
|
||||
working-directory: ${{matrix.working-directory}}
|
||||
args: --timeout=5m0s
|
||||
skip-cache: true
|
||||
|
|
|
|||
|
|
@ -54,30 +54,6 @@ type Scope struct {
|
|||
ControlPlane *controlplanev1.RKE2ControlPlane
|
||||
}
|
||||
|
||||
// HasMachineOwner returns true if the RKE2Config is owned by a Machine.
|
||||
func (s *Scope) HasMachineOwner() bool {
|
||||
return s.Machine != nil
|
||||
}
|
||||
|
||||
// HasMachinePoolOwner returns true if the RKE2Config is owned by a MachinePool.
|
||||
func (s *Scope) HasMachinePoolOwner() bool {
|
||||
return s.MachinePool != nil
|
||||
}
|
||||
|
||||
// HasControlPlaneOwner returns true if the RKE2Config is owned by a Machine which is also a ControlPlane.
|
||||
func (s *Scope) HasControlPlaneOwner() bool {
|
||||
return s.Machine != nil && s.ControlPlane != nil
|
||||
}
|
||||
|
||||
// GetDesiredVersion returns the K8S version associated to the RKE2Config owner.
|
||||
func (s *Scope) GetDesiredVersion() string {
|
||||
if s.MachinePool != nil {
|
||||
return *s.MachinePool.Spec.Template.Spec.Version
|
||||
}
|
||||
|
||||
return *s.Machine.Spec.Version
|
||||
}
|
||||
|
||||
// NewScope initializes the RKE2Config scope given a new request.
|
||||
func NewScope(ctx context.Context, req ctrl.Request, client client.Client) (*Scope, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
|
@ -150,3 +126,27 @@ func NewScope(ctx context.Context, req ctrl.Request, client client.Client) (*Sco
|
|||
Cluster: cluster,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HasMachineOwner returns true if the RKE2Config is owned by a Machine.
|
||||
func (s *Scope) HasMachineOwner() bool {
|
||||
return s.Machine != nil
|
||||
}
|
||||
|
||||
// HasMachinePoolOwner returns true if the RKE2Config is owned by a MachinePool.
|
||||
func (s *Scope) HasMachinePoolOwner() bool {
|
||||
return s.MachinePool != nil
|
||||
}
|
||||
|
||||
// HasControlPlaneOwner returns true if the RKE2Config is owned by a Machine which is also a ControlPlane.
|
||||
func (s *Scope) HasControlPlaneOwner() bool {
|
||||
return s.Machine != nil && s.ControlPlane != nil
|
||||
}
|
||||
|
||||
// GetDesiredVersion returns the K8S version associated to the RKE2Config owner.
|
||||
func (s *Scope) GetDesiredVersion() string {
|
||||
if s.MachinePool != nil {
|
||||
return *s.MachinePool.Spec.Template.Spec.Version
|
||||
}
|
||||
|
||||
return *s.Machine.Spec.Version
|
||||
}
|
||||
|
|
|
|||
|
|
@ -308,6 +308,28 @@ func (r *RKE2ControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c
|
|||
return nil
|
||||
}
|
||||
|
||||
// ClusterToRKE2ControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
|
||||
// for RKE2ControlPlane based on updates to a Cluster.
|
||||
func (r *RKE2ControlPlaneReconciler) ClusterToRKE2ControlPlane(ctx context.Context) handler.MapFunc {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
return func(_ context.Context, o client.Object) []ctrl.Request {
|
||||
c, ok := o.(*clusterv1.Cluster)
|
||||
if !ok {
|
||||
log.Error(nil, fmt.Sprintf("Expected a Cluster but got a %T", o))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
controlPlaneRef := c.Spec.ControlPlaneRef
|
||||
if controlPlaneRef != nil && controlPlaneRef.Kind == "RKE2ControlPlane" {
|
||||
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (r *RKE2ControlPlaneReconciler) updateStatus(ctx context.Context, rcp *controlplanev1.RKE2ControlPlane, cluster *clusterv1.Cluster) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
|
@ -1040,28 +1062,6 @@ func (r *RKE2ControlPlaneReconciler) upgradeControlPlane(
|
|||
}
|
||||
}
|
||||
|
||||
// ClusterToRKE2ControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
|
||||
// for RKE2ControlPlane based on updates to a Cluster.
|
||||
func (r *RKE2ControlPlaneReconciler) ClusterToRKE2ControlPlane(ctx context.Context) handler.MapFunc {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
return func(_ context.Context, o client.Object) []ctrl.Request {
|
||||
c, ok := o.(*clusterv1.Cluster)
|
||||
if !ok {
|
||||
log.Error(nil, fmt.Sprintf("Expected a Cluster but got a %T", o))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
controlPlaneRef := c.Spec.ControlPlaneRef
|
||||
if controlPlaneRef != nil && controlPlaneRef.Kind == "RKE2ControlPlane" {
|
||||
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RKE2ControlPlaneReconciler) reconcilePreTerminateHook(ctx context.Context, controlPlane *rke2.ControlPlane) (ctrl.Result, error) {
|
||||
// Ensure that every active machine has the drain hook set
|
||||
patchHookAnnotation := false
|
||||
|
|
|
|||
|
|
@ -34,6 +34,14 @@ type Addr struct {
|
|||
identifier uint32
|
||||
}
|
||||
|
||||
// NewAddrFromConn creates an Addr from the given connection.
|
||||
func NewAddrFromConn(c *Conn) Addr {
|
||||
return Addr{
|
||||
port: c.stream.Headers().Get(corev1.PortHeader),
|
||||
identifier: c.stream.Identifier(),
|
||||
}
|
||||
}
|
||||
|
||||
// Network returns a fake network.
|
||||
func (a Addr) Network() string {
|
||||
return portforward.PortForwardProtocolV1Name
|
||||
|
|
@ -49,11 +57,3 @@ func (a Addr) String() string {
|
|||
a.port,
|
||||
)
|
||||
}
|
||||
|
||||
// NewAddrFromConn creates an Addr from the given connection.
|
||||
func NewAddrFromConn(c *Conn) Addr {
|
||||
return Addr{
|
||||
port: c.stream.Headers().Get(corev1.PortHeader),
|
||||
identifier: c.stream.Identifier(),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,6 +32,15 @@ type Conn struct {
|
|||
writeDeadline time.Time
|
||||
}
|
||||
|
||||
// NewConn creates a new net/conn interface based on an underlying Kubernetes
|
||||
// API server proxy connection.
|
||||
func NewConn(connection httpstream.Connection, stream httpstream.Stream) *Conn {
|
||||
return &Conn{
|
||||
connection: connection,
|
||||
stream: stream,
|
||||
}
|
||||
}
|
||||
|
||||
// Read from the connection.
|
||||
func (c *Conn) Read(b []byte) (n int, err error) {
|
||||
return c.stream.Read(b)
|
||||
|
|
@ -78,12 +87,3 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConn creates a new net/conn interface based on an underlying Kubernetes
|
||||
// API server proxy connection.
|
||||
func NewConn(connection httpstream.Connection, stream httpstream.Stream) *Conn {
|
||||
return &Conn{
|
||||
connection: connection,
|
||||
stream: stream,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -198,19 +198,6 @@ type ClusterStatus struct {
|
|||
HasRKE2ServingSecret bool
|
||||
}
|
||||
|
||||
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
|
||||
nodes := &corev1.NodeList{}
|
||||
labels := map[string]string{
|
||||
labelNodeRoleControlPlane: "true",
|
||||
}
|
||||
|
||||
if err := w.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// PatchNodes patches the nodes in the workload cluster.
|
||||
func (w *Workload) PatchNodes(ctx context.Context, cp *ControlPlane) error {
|
||||
errList := []error{}
|
||||
|
|
@ -399,6 +386,60 @@ func (w *Workload) UpdateAgentConditions(controlPlane *ControlPlane) {
|
|||
})
|
||||
}
|
||||
|
||||
// UpdateNodeMetadata is responsible for populating node metadata after
|
||||
// it is referenced from machine object.
|
||||
func (w *Workload) UpdateNodeMetadata(ctx context.Context, controlPlane *ControlPlane) error {
|
||||
for nodeName, machine := range controlPlane.Machines {
|
||||
if machine.Spec.Bootstrap.ConfigRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if machine.Status.NodeRef != nil {
|
||||
nodeName = machine.Status.NodeRef.Name
|
||||
}
|
||||
|
||||
conditions.MarkTrue(machine, controlplanev1.NodeMetadataUpToDate)
|
||||
|
||||
node, nodeFound := w.Nodes[nodeName]
|
||||
if !nodeFound {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, "associated node not found")
|
||||
|
||||
continue
|
||||
} else if name, ok := node.Annotations[clusterv1.MachineAnnotation]; !ok || name != machine.Name {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, fmt.Sprintf("node object is missing %s annotation", clusterv1.MachineAnnotation))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
rkeConfig, found := controlPlane.Rke2Configs[machine.Name]
|
||||
if !found {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, "associated RKE2 config not found")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
annotations.AddAnnotations(node, rkeConfig.Spec.AgentConfig.NodeAnnotations)
|
||||
}
|
||||
|
||||
return w.PatchNodes(ctx, controlPlane)
|
||||
}
|
||||
|
||||
// UpdateEtcdConditions is responsible for updating machine conditions reflecting the status of all the etcd members.
|
||||
// This operation is best effort, in the sense that in case of problems in retrieving member status, it sets
|
||||
// the condition to Unknown state without returning any error.
|
||||
func (w *Workload) UpdateEtcdConditions(controlPlane *ControlPlane) {
|
||||
w.updateManagedEtcdConditions(controlPlane)
|
||||
}
|
||||
|
||||
type aggregateFromMachinesToRCPInput struct {
|
||||
controlPlane *ControlPlane
|
||||
machineConditions []clusterv1.ConditionType
|
||||
|
|
@ -409,6 +450,19 @@ type aggregateFromMachinesToRCPInput struct {
|
|||
note string
|
||||
}
|
||||
|
||||
func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
|
||||
nodes := &corev1.NodeList{}
|
||||
labels := map[string]string{
|
||||
labelNodeRoleControlPlane: "true",
|
||||
}
|
||||
|
||||
if err := w.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// aggregateFromMachinesToRCP aggregates a group of conditions from machines to RCP.
|
||||
// NOTE: this func follows the same aggregation rules used by conditions.Merge thus giving priority to
|
||||
// errors, then warning, info down to unknown.
|
||||
|
|
@ -510,13 +564,6 @@ func aggregateFromMachinesToRCP(input aggregateFromMachinesToRCPInput) {
|
|||
}
|
||||
}
|
||||
|
||||
// UpdateEtcdConditions is responsible for updating machine conditions reflecting the status of all the etcd members.
|
||||
// This operation is best effort, in the sense that in case of problems in retrieving member status, it sets
|
||||
// the condition to Unknown state without returning any error.
|
||||
func (w *Workload) UpdateEtcdConditions(controlPlane *ControlPlane) {
|
||||
w.updateManagedEtcdConditions(controlPlane)
|
||||
}
|
||||
|
||||
func (w *Workload) updateManagedEtcdConditions(controlPlane *ControlPlane) {
|
||||
// NOTE: This methods uses control plane nodes only to get in contact with etcd but then it relies on etcd
|
||||
// as ultimate source of truth for the list of members and for their health.
|
||||
|
|
@ -552,50 +599,3 @@ func (w *Workload) updateManagedEtcdConditions(controlPlane *ControlPlane) {
|
|||
conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateNodeMetadata is responsible for populating node metadata after
|
||||
// it is referenced from machine object.
|
||||
func (w *Workload) UpdateNodeMetadata(ctx context.Context, controlPlane *ControlPlane) error {
|
||||
for nodeName, machine := range controlPlane.Machines {
|
||||
if machine.Spec.Bootstrap.ConfigRef == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if machine.Status.NodeRef != nil {
|
||||
nodeName = machine.Status.NodeRef.Name
|
||||
}
|
||||
|
||||
conditions.MarkTrue(machine, controlplanev1.NodeMetadataUpToDate)
|
||||
|
||||
node, nodeFound := w.Nodes[nodeName]
|
||||
if !nodeFound {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, "associated node not found")
|
||||
|
||||
continue
|
||||
} else if name, ok := node.Annotations[clusterv1.MachineAnnotation]; !ok || name != machine.Name {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, fmt.Sprintf("node object is missing %s annotation", clusterv1.MachineAnnotation))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
rkeConfig, found := controlPlane.Rke2Configs[machine.Name]
|
||||
if !found {
|
||||
conditions.MarkUnknown(
|
||||
machine,
|
||||
controlplanev1.NodeMetadataUpToDate,
|
||||
controlplanev1.NodePatchFailedReason, "associated RKE2 config not found")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
annotations.AddAnnotations(node, rkeConfig.Spec.AgentConfig.NodeAnnotations)
|
||||
}
|
||||
|
||||
return w.PatchNodes(ctx, controlPlane)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue