Compare commits
183 Commits
v1.9.2-dev
...
master
Author | SHA1 | Date |
---|---|---|
|
a59e184109 | |
|
f78696c38c | |
|
a282cfadb0 | |
|
0be1ae364a | |
|
1d74ce371e | |
|
5e5e16b491 | |
|
1d6c6d6eb8 | |
|
a086a9d539 | |
|
8bb338dcfe | |
|
d4d8329218 | |
|
2d5701ad6d | |
|
1b2811f045 | |
|
95f5703ec2 | |
|
16b59ba7b3 | |
|
bdbefab905 | |
|
03f2e1a0ae | |
|
b20aad87ad | |
|
5b5a7deb2a | |
|
a8a4599ffb | |
|
d34de92bab | |
|
3f5d685d77 | |
|
d39c0fbab5 | |
|
54c82131cf | |
|
55c124d402 | |
|
4d9e300028 | |
|
12e3a848bb | |
|
9ee7981b29 | |
|
ad639ef004 | |
|
f7c5c70e00 | |
|
d9947c944c | |
|
ad9abda739 | |
|
7e5f4ae426 | |
|
ed745de36a | |
|
ec4b8579da | |
|
dfe71fddab | |
|
3b32e54db5 | |
|
3c8aeb9c5b | |
|
9411d653d0 | |
|
5bb438e64f | |
|
ecfe2d6066 | |
|
a9209b7177 | |
|
2ccd199b56 | |
|
20f2f1f348 | |
|
8427017648 | |
|
62481b6e46 | |
|
815ae7ecca | |
|
92ece48e67 | |
|
c933a9ea14 | |
|
3eb4fa50f4 | |
|
73b5809b92 | |
|
2a4a5fdc5f | |
|
509828b975 | |
|
3fbcf41b83 | |
|
3a630e37bb | |
|
d358a6e40c | |
|
e54c383996 | |
|
8dba854d20 | |
|
4593e8ef35 | |
|
c4f166c295 | |
|
eb0bc95a65 | |
|
e72e92747f | |
|
3cb638aca9 | |
|
81714ef130 | |
|
7d7f645846 | |
|
73d4a9f938 | |
|
ed5c43f075 | |
|
d4b39310f5 | |
|
ef4983e582 | |
|
df44f46337 | |
|
bb84ca8762 | |
|
ffbc1b8c79 | |
|
52a99d8901 | |
|
57b1f20ff8 | |
|
5e79a8b050 | |
|
7234295e83 | |
|
4ddb6cb733 | |
|
e4e7f35a8e | |
|
dd3a637970 | |
|
12563caa8a | |
|
00fb7c6e65 | |
|
15321dc107 | |
|
d563070822 | |
|
115f8c18b4 | |
|
ec2a5a23b8 | |
|
596d573021 | |
|
7c8ddac721 | |
|
9cbb80614f | |
|
aab5731e08 | |
|
d2f728f8e2 | |
|
fd07bfd06f | |
|
bdaeeeb50a | |
|
7ec7390305 | |
|
be534da84b | |
|
16f796214e | |
|
982b21fd3a | |
|
c65035e669 | |
|
738ea81a98 | |
|
d75b51f3d7 | |
|
7d00341c0f | |
|
e2d1b1a12e | |
|
4d5395f41a | |
|
bda034d48a | |
|
3a94c5f436 | |
|
9a63d5554d | |
|
0ba5f41be2 | |
|
31554ed21f | |
|
4f0ab80b29 | |
|
60936f5404 | |
|
c124c8c8ca | |
|
a70050d971 | |
|
bbfa599009 | |
|
a0ddfc27b9 | |
|
16505d0d1a | |
|
7fd9cf808a | |
|
5482297256 | |
|
c36b3d5e1c | |
|
07d9406437 | |
|
dc8587e039 | |
|
ede5b8285c | |
|
6fd42d7a12 | |
|
a56819105f | |
|
6ded50f2c9 | |
|
1c0116d2b2 | |
|
7038f55f16 | |
|
3b39ecb92b | |
|
3a01e83bdf | |
|
7ed2650baa | |
|
842a85bc8e | |
|
849f677822 | |
|
b0e446e623 | |
|
ef6530863f | |
|
48390d7e5f | |
|
15ce69f519 | |
|
c5e2988fd3 | |
|
a79573a761 | |
|
3236f24926 | |
|
cae3910641 | |
|
43e36b45f5 | |
|
604a88fbb6 | |
|
9deae1ff71 | |
|
ebcb3e1b7a | |
|
90b5b59cc6 | |
|
b24de0c817 | |
|
17697500ae | |
|
d401341c50 | |
|
2f5cbf1a1f | |
|
a0ddfeb19e | |
|
3e835e2a7f | |
|
412ede8bba | |
|
08801f5a47 | |
|
635f2a2dea | |
|
69f547d951 | |
|
fd72dff47a | |
|
71fc752773 | |
|
045c08b44e | |
|
9a9bf8188d | |
|
b797aaac95 | |
|
ff4148dbeb | |
|
e83d51f56e | |
|
42fbb8013b | |
|
0c94ce3177 | |
|
48a757ad4a | |
|
523a0aee5d | |
|
96d7e664b3 | |
|
014fd6ea91 | |
|
3468976c36 | |
|
2ecaa61b98 | |
|
bbd62b61be | |
|
d5606b2280 | |
|
da86965486 | |
|
9d343c59ca | |
|
ad0cdbf3c8 | |
|
5b1f5eac87 | |
|
bb52d1da76 | |
|
4957d1e62b | |
|
9562ce1821 | |
|
6a9387b33b | |
|
40fd3ab08f | |
|
a51de50092 | |
|
13816ed654 | |
|
239730bbae | |
|
e98d9f0e62 | |
|
6fff7c3ff8 |
|
@ -7,57 +7,79 @@ on:
|
|||
- master
|
||||
- "v*"
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: 'Pull request number from longhorn/longhorn-manager'
|
||||
required: true
|
||||
type: number
|
||||
base_branch:
|
||||
description: 'Base branch the PR was merged into'
|
||||
required: true
|
||||
default: 'master'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
create-pull-request:
|
||||
if: github.event.pull_request.merged == true
|
||||
if: github.event_name == 'workflow_dispatch' || github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Prepare Packages
|
||||
run: |
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
- name: Prepare Helm
|
||||
run: |
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
|
||||
- name: Log triggering PR information
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Triggered by PR: #${{ github.event.pull_request.number }}"
|
||||
echo "PR Title: ${{ github.event.pull_request.title }}"
|
||||
echo "PR URL: ${{ github.event.pull_request.html_url }}"
|
||||
echo "PR was merged into branch: ${{ github.event.pull_request.base.ref }}"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: longhorn/longhorn
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
- name: Determine PR info
|
||||
id: pr_info
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
echo "PR_NUMBER=${{ inputs.pr_number }}" >> $GITHUB_OUTPUT
|
||||
echo "BASE_BRANCH=${{ inputs.base_branch }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "PR_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_OUTPUT
|
||||
echo "BASE_BRANCH=${{ github.event.pull_request.base.ref }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Update crds.yaml and manifests
|
||||
shell: bash
|
||||
run: |
|
||||
curl -L https://github.com/longhorn/longhorn-manager/raw/master/k8s/crds.yaml -o chart/templates/crds.yaml
|
||||
bash scripts/generate-longhorn-yaml.sh
|
||||
bash scripts/helm-docs.sh
|
||||
- name: Log triggering PR information
|
||||
run: |
|
||||
echo "Triggered by PR: #${{ steps.pr_info.outputs.PR_NUMBER }}"
|
||||
echo "Base branch: ${{ steps.pr_info.outputs.BASE_BRANCH }}"
|
||||
|
||||
- name: Get Head Commit Name
|
||||
id: get_head_commit_name
|
||||
run: echo "::set-output name=commit_name::$(git log -1 --pretty=format:'%an')"
|
||||
- id: app-token
|
||||
uses: actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: ${{ secrets.LONGHORN_GITHUB_BOT_APP_ID }}
|
||||
private-key: ${{ secrets.LONGHORN_GITHUB_BOT_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
permission-contents: write
|
||||
permission-pull-requests: write
|
||||
|
||||
- name: Get Head Commit Email
|
||||
id: get_head_commit_email
|
||||
run: echo "::set-output name=commit_email::$(git log -1 --pretty=format:'%ae')"
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: longhorn/longhorn
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
ref: ${{ steps.pr_info.outputs.BASE_BRANCH }}
|
||||
|
||||
- name: Create Pull Request
|
||||
id: cpr
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.CUSTOM_GITHUB_TOKEN }}
|
||||
branch: "update-crds-and-manifests-longhorn-manager-${{ github.event.pull_request.number }}"
|
||||
delete-branch: true
|
||||
sign-commits: true
|
||||
signoff: true
|
||||
author: ${{ steps.get_head_commit_name.outputs.commit_name }} <${{ steps.get_head_commit_email.outputs.commit_email }}>
|
||||
committer: ${{ steps.get_head_commit_name.outputs.commit_name }} <${{ steps.get_head_commit_email.outputs.commit_email }}>
|
||||
commit-message: "chore(crd): update crds.yaml and manifests (PR longhorn/longhorn-manager#${{ github.event.pull_request.number}})"
|
||||
title: "chore(crd): update crds.yaml and manifests (PR longhorn/longhorn-manager#${{ github.event.pull_request.number}})"
|
||||
body: |
|
||||
This PR updates the crds.yaml and manifests.
|
||||
It was triggered by longhorn/longhorn-manager#${{ github.event.pull_request.number}}.
|
||||
- name: Update crds.yaml and manifests
|
||||
run: |
|
||||
curl -fL "https://github.com/longhorn/longhorn-manager/raw/${{ steps.pr_info.outputs.BASE_BRANCH }}/k8s/crds.yaml" -o chart/templates/crds.yaml
|
||||
bash scripts/generate-longhorn-yaml.sh
|
||||
bash scripts/helm-docs.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
id: cpr
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
branch: "update-crds-and-manifests-longhorn-manager-${{ steps.pr_info.outputs.PR_NUMBER }}"
|
||||
delete-branch: true
|
||||
sign-commits: true
|
||||
signoff: true
|
||||
author: Longhorn GitHub Bot <67932897+longhorn-io-github-bot@users.noreply.github.com>
|
||||
committer: Longhorn GitHub Bot <67932897+longhorn-io-github-bot@users.noreply.github.com>
|
||||
commit-message: "chore(crd): update crds.yaml and manifests (PR longhorn/longhorn-manager#${{ steps.pr_info.outputs.PR_NUMBER }})"
|
||||
title: "chore(crd): update crds.yaml and manifests (PR longhorn/longhorn-manager#${{ steps.pr_info.outputs.PR_NUMBER }})"
|
||||
body: |
|
||||
This PR updates the crds.yaml and manifests.
|
||||
It was triggered by longhorn/longhorn-manager#${{ steps.pr_info.outputs.PR_NUMBER }}.
|
||||
|
|
|
@ -74,7 +74,7 @@ func (s *Server) EngineImageCreate(rw http.ResponseWriter, req *http.Request) er
|
|||
|
||||
func (s *Server) EngineImageDelete(rw http.ResponseWriter, req *http.Request) error {
|
||||
id := mux.Vars(req)["name"]
|
||||
if err := s.m.DeleteEngineImageByName(id); err != nil {
|
||||
if err := s.m.DeleteEngineImage(id); err != nil {
|
||||
return errors.Wrap(err, "failed to delete engine image")
|
||||
}
|
||||
|
||||
|
|
|
@ -2,9 +2,11 @@ package api
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -205,7 +207,7 @@ func UploadParametersForBackingImage(m *manager.VolumeManager) func(req *http.Re
|
|||
if bids.Status.CurrentState != longhorn.BackingImageStatePending {
|
||||
return nil, fmt.Errorf("upload server for backing image %s has not been initiated", name)
|
||||
}
|
||||
return map[string]string{ParameterKeyAddress: fmt.Sprintf("%s:%d", pod.Status.PodIP, engineapi.BackingImageDataSourceDefaultPort)}, nil
|
||||
return map[string]string{ParameterKeyAddress: net.JoinHostPort(pod.Status.PodIP, strconv.Itoa(engineapi.BackingImageDataSourceDefaultPort))}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
178
api/model.go
178
api/model.go
|
@ -29,39 +29,41 @@ type Empty struct {
|
|||
type Volume struct {
|
||||
client.Resource
|
||||
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
Frontend longhorn.VolumeFrontend `json:"frontend"`
|
||||
DisableFrontend bool `json:"disableFrontend"`
|
||||
FromBackup string `json:"fromBackup"`
|
||||
RestoreVolumeRecurringJob longhorn.RestoreVolumeRecurringJobType `json:"restoreVolumeRecurringJob"`
|
||||
DataSource longhorn.VolumeDataSource `json:"dataSource"`
|
||||
DataLocality longhorn.DataLocality `json:"dataLocality"`
|
||||
StaleReplicaTimeout int `json:"staleReplicaTimeout"`
|
||||
State longhorn.VolumeState `json:"state"`
|
||||
Robustness longhorn.VolumeRobustness `json:"robustness"`
|
||||
Image string `json:"image"`
|
||||
CurrentImage string `json:"currentImage"`
|
||||
BackingImage string `json:"backingImage"`
|
||||
Created string `json:"created"`
|
||||
LastBackup string `json:"lastBackup"`
|
||||
LastBackupAt string `json:"lastBackupAt"`
|
||||
LastAttachedBy string `json:"lastAttachedBy"`
|
||||
Standby bool `json:"standby"`
|
||||
RestoreRequired bool `json:"restoreRequired"`
|
||||
RestoreInitiated bool `json:"restoreInitiated"`
|
||||
RevisionCounterDisabled bool `json:"revisionCounterDisabled"`
|
||||
SnapshotDataIntegrity longhorn.SnapshotDataIntegrity `json:"snapshotDataIntegrity"`
|
||||
UnmapMarkSnapChainRemoved longhorn.UnmapMarkSnapChainRemoved `json:"unmapMarkSnapChainRemoved"`
|
||||
BackupCompressionMethod longhorn.BackupCompressionMethod `json:"backupCompressionMethod"`
|
||||
ReplicaSoftAntiAffinity longhorn.ReplicaSoftAntiAffinity `json:"replicaSoftAntiAffinity"`
|
||||
ReplicaZoneSoftAntiAffinity longhorn.ReplicaZoneSoftAntiAffinity `json:"replicaZoneSoftAntiAffinity"`
|
||||
ReplicaDiskSoftAntiAffinity longhorn.ReplicaDiskSoftAntiAffinity `json:"replicaDiskSoftAntiAffinity"`
|
||||
DataEngine longhorn.DataEngineType `json:"dataEngine"`
|
||||
SnapshotMaxCount int `json:"snapshotMaxCount"`
|
||||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
FreezeFilesystemForSnapshot longhorn.FreezeFilesystemForSnapshot `json:"freezeFilesystemForSnapshot"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
Frontend longhorn.VolumeFrontend `json:"frontend"`
|
||||
DisableFrontend bool `json:"disableFrontend"`
|
||||
FromBackup string `json:"fromBackup"`
|
||||
RestoreVolumeRecurringJob longhorn.RestoreVolumeRecurringJobType `json:"restoreVolumeRecurringJob"`
|
||||
DataSource longhorn.VolumeDataSource `json:"dataSource"`
|
||||
DataLocality longhorn.DataLocality `json:"dataLocality"`
|
||||
StaleReplicaTimeout int `json:"staleReplicaTimeout"`
|
||||
State longhorn.VolumeState `json:"state"`
|
||||
Robustness longhorn.VolumeRobustness `json:"robustness"`
|
||||
Image string `json:"image"`
|
||||
CurrentImage string `json:"currentImage"`
|
||||
BackingImage string `json:"backingImage"`
|
||||
Created string `json:"created"`
|
||||
LastBackup string `json:"lastBackup"`
|
||||
LastBackupAt string `json:"lastBackupAt"`
|
||||
LastAttachedBy string `json:"lastAttachedBy"`
|
||||
Standby bool `json:"standby"`
|
||||
RestoreRequired bool `json:"restoreRequired"`
|
||||
RestoreInitiated bool `json:"restoreInitiated"`
|
||||
RevisionCounterDisabled bool `json:"revisionCounterDisabled"`
|
||||
SnapshotDataIntegrity longhorn.SnapshotDataIntegrity `json:"snapshotDataIntegrity"`
|
||||
UnmapMarkSnapChainRemoved longhorn.UnmapMarkSnapChainRemoved `json:"unmapMarkSnapChainRemoved"`
|
||||
BackupCompressionMethod longhorn.BackupCompressionMethod `json:"backupCompressionMethod"`
|
||||
BackupBlockSize string `json:"backupBlockSize"`
|
||||
ReplicaSoftAntiAffinity longhorn.ReplicaSoftAntiAffinity `json:"replicaSoftAntiAffinity"`
|
||||
ReplicaZoneSoftAntiAffinity longhorn.ReplicaZoneSoftAntiAffinity `json:"replicaZoneSoftAntiAffinity"`
|
||||
ReplicaDiskSoftAntiAffinity longhorn.ReplicaDiskSoftAntiAffinity `json:"replicaDiskSoftAntiAffinity"`
|
||||
DataEngine longhorn.DataEngineType `json:"dataEngine"`
|
||||
SnapshotMaxCount int `json:"snapshotMaxCount"`
|
||||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
ReplicaRebuildingBandwidthLimit int64 `json:"replicaRebuildingBandwidthLimit"`
|
||||
FreezeFilesystemForSnapshot longhorn.FreezeFilesystemForSnapshot `json:"freezeFilesystemForSnapshot"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
|
||||
DiskSelector []string `json:"diskSelector"`
|
||||
NodeSelector []string `json:"nodeSelector"`
|
||||
|
@ -176,6 +178,7 @@ type Backup struct {
|
|||
NewlyUploadedDataSize string `json:"newlyUploadDataSize"`
|
||||
ReUploadedDataSize string `json:"reUploadedDataSize"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
BlockSize string `json:"blockSize"`
|
||||
}
|
||||
|
||||
type BackupBackingImage struct {
|
||||
|
@ -249,6 +252,8 @@ type Attachment struct {
|
|||
}
|
||||
|
||||
type VolumeAttachment struct {
|
||||
client.Resource
|
||||
|
||||
Attachments map[string]Attachment `json:"attachments"`
|
||||
Volume string `json:"volume"`
|
||||
}
|
||||
|
@ -368,6 +373,10 @@ type UpdateSnapshotMaxSizeInput struct {
|
|||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
}
|
||||
|
||||
type UpdateReplicaRebuildingBandwidthLimitInput struct {
|
||||
ReplicaRebuildingBandwidthLimit string `json:"replicaRebuildingBandwidthLimit"`
|
||||
}
|
||||
|
||||
type UpdateBackupCompressionMethodInput struct {
|
||||
BackupCompressionMethod string `json:"backupCompressionMethod"`
|
||||
}
|
||||
|
@ -396,6 +405,10 @@ type UpdateSnapshotMaxSize struct {
|
|||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
}
|
||||
|
||||
type UpdateReplicaRebuildingBandwidthLimit struct {
|
||||
ReplicaRebuildingBandwidthLimit string `json:"replicaRebuildingBandwidthLimit"`
|
||||
}
|
||||
|
||||
type UpdateFreezeFilesystemForSnapshotInput struct {
|
||||
FreezeFilesystemForSnapshot string `json:"freezeFilesystemForSnapshot"`
|
||||
}
|
||||
|
@ -662,6 +675,7 @@ func NewSchema() *client.Schemas {
|
|||
schemas.AddType("UpdateSnapshotDataIntegrityInput", UpdateSnapshotDataIntegrityInput{})
|
||||
schemas.AddType("UpdateSnapshotMaxCountInput", UpdateSnapshotMaxCountInput{})
|
||||
schemas.AddType("UpdateSnapshotMaxSizeInput", UpdateSnapshotMaxSizeInput{})
|
||||
schemas.AddType("UpdateReplicaRebuildingBandwidthLimitInput", UpdateReplicaRebuildingBandwidthLimitInput{})
|
||||
schemas.AddType("UpdateBackupCompressionInput", UpdateBackupCompressionMethodInput{})
|
||||
schemas.AddType("UpdateUnmapMarkSnapChainRemovedInput", UpdateUnmapMarkSnapChainRemovedInput{})
|
||||
schemas.AddType("UpdateReplicaSoftAntiAffinityInput", UpdateReplicaSoftAntiAffinityInput{})
|
||||
|
@ -1093,6 +1107,10 @@ func volumeSchema(volume *client.Schema) {
|
|||
Input: "UpdateSnapshotMaxSizeInput",
|
||||
},
|
||||
|
||||
"updateReplicaRebuildingBandwidthLimit": {
|
||||
Input: "UpdateReplicaRebuildingBandwidthLimitInput",
|
||||
},
|
||||
|
||||
"updateBackupCompressionMethod": {
|
||||
Input: "UpdateBackupCompressionMethodInput",
|
||||
},
|
||||
|
@ -1603,30 +1621,32 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
Actions: map[string]string{},
|
||||
Links: map[string]string{},
|
||||
},
|
||||
Name: v.Name,
|
||||
Size: strconv.FormatInt(v.Spec.Size, 10),
|
||||
Frontend: v.Spec.Frontend,
|
||||
DisableFrontend: v.Spec.DisableFrontend,
|
||||
LastAttachedBy: v.Spec.LastAttachedBy,
|
||||
FromBackup: v.Spec.FromBackup,
|
||||
DataSource: v.Spec.DataSource,
|
||||
NumberOfReplicas: v.Spec.NumberOfReplicas,
|
||||
ReplicaAutoBalance: v.Spec.ReplicaAutoBalance,
|
||||
DataLocality: v.Spec.DataLocality,
|
||||
SnapshotDataIntegrity: v.Spec.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: v.Spec.SnapshotMaxCount,
|
||||
SnapshotMaxSize: strconv.FormatInt(v.Spec.SnapshotMaxSize, 10),
|
||||
BackupCompressionMethod: v.Spec.BackupCompressionMethod,
|
||||
StaleReplicaTimeout: v.Spec.StaleReplicaTimeout,
|
||||
Created: v.CreationTimestamp.String(),
|
||||
Image: v.Spec.Image,
|
||||
BackingImage: v.Spec.BackingImage,
|
||||
Standby: v.Spec.Standby,
|
||||
DiskSelector: v.Spec.DiskSelector,
|
||||
NodeSelector: v.Spec.NodeSelector,
|
||||
RestoreVolumeRecurringJob: v.Spec.RestoreVolumeRecurringJob,
|
||||
FreezeFilesystemForSnapshot: v.Spec.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: v.Spec.BackupTargetName,
|
||||
Name: v.Name,
|
||||
Size: strconv.FormatInt(v.Spec.Size, 10),
|
||||
Frontend: v.Spec.Frontend,
|
||||
DisableFrontend: v.Spec.DisableFrontend,
|
||||
LastAttachedBy: v.Spec.LastAttachedBy,
|
||||
FromBackup: v.Spec.FromBackup,
|
||||
DataSource: v.Spec.DataSource,
|
||||
NumberOfReplicas: v.Spec.NumberOfReplicas,
|
||||
ReplicaAutoBalance: v.Spec.ReplicaAutoBalance,
|
||||
DataLocality: v.Spec.DataLocality,
|
||||
SnapshotDataIntegrity: v.Spec.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: v.Spec.SnapshotMaxCount,
|
||||
SnapshotMaxSize: strconv.FormatInt(v.Spec.SnapshotMaxSize, 10),
|
||||
ReplicaRebuildingBandwidthLimit: v.Spec.ReplicaRebuildingBandwidthLimit,
|
||||
BackupCompressionMethod: v.Spec.BackupCompressionMethod,
|
||||
BackupBlockSize: strconv.FormatInt(v.Spec.BackupBlockSize, 10),
|
||||
StaleReplicaTimeout: v.Spec.StaleReplicaTimeout,
|
||||
Created: v.CreationTimestamp.String(),
|
||||
Image: v.Spec.Image,
|
||||
BackingImage: v.Spec.BackingImage,
|
||||
Standby: v.Spec.Standby,
|
||||
DiskSelector: v.Spec.DiskSelector,
|
||||
NodeSelector: v.Spec.NodeSelector,
|
||||
RestoreVolumeRecurringJob: v.Spec.RestoreVolumeRecurringJob,
|
||||
FreezeFilesystemForSnapshot: v.Spec.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: v.Spec.BackupTargetName,
|
||||
|
||||
State: v.Status.State,
|
||||
Robustness: v.Status.Robustness,
|
||||
|
@ -1699,6 +1719,7 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
actions["updateSnapshotDataIntegrity"] = struct{}{}
|
||||
actions["updateSnapshotMaxCount"] = struct{}{}
|
||||
actions["updateSnapshotMaxSize"] = struct{}{}
|
||||
actions["updateReplicaRebuildingBandwidthLimit"] = struct{}{}
|
||||
actions["updateBackupCompressionMethod"] = struct{}{}
|
||||
actions["updateReplicaSoftAntiAffinity"] = struct{}{}
|
||||
actions["updateReplicaZoneSoftAntiAffinity"] = struct{}{}
|
||||
|
@ -1732,6 +1753,7 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
actions["updateSnapshotDataIntegrity"] = struct{}{}
|
||||
actions["updateSnapshotMaxCount"] = struct{}{}
|
||||
actions["updateSnapshotMaxSize"] = struct{}{}
|
||||
actions["updateReplicaRebuildingBandwidthLimit"] = struct{}{}
|
||||
actions["updateBackupCompressionMethod"] = struct{}{}
|
||||
actions["updateReplicaSoftAntiAffinity"] = struct{}{}
|
||||
actions["updateReplicaZoneSoftAntiAffinity"] = struct{}{}
|
||||
|
@ -2023,6 +2045,7 @@ func toBackupResource(b *longhorn.Backup) *Backup {
|
|||
NewlyUploadedDataSize: b.Status.NewlyUploadedDataSize,
|
||||
ReUploadedDataSize: b.Status.ReUploadedDataSize,
|
||||
BackupTargetName: backupTargetName,
|
||||
BlockSize: strconv.FormatInt(b.Spec.BackupBlockSize, 10),
|
||||
}
|
||||
// Set the volume name from backup CR's label if it's empty.
|
||||
// This field is empty probably because the backup state is not Ready
|
||||
|
@ -2414,6 +2437,47 @@ func toOrphanCollection(orphans map[string]*longhorn.Orphan) *client.GenericColl
|
|||
return &client.GenericCollection{Data: data, Collection: client.Collection{ResourceType: "orphan"}}
|
||||
}
|
||||
|
||||
func toVolumeAttachmentResource(volumeAttachment *longhorn.VolumeAttachment) *VolumeAttachment {
|
||||
attachments := make(map[string]Attachment)
|
||||
|
||||
for ticketName, ticket := range volumeAttachment.Spec.AttachmentTickets {
|
||||
status := volumeAttachment.Status.AttachmentTicketStatuses[ticketName]
|
||||
|
||||
attachment := Attachment{
|
||||
AttachmentID: ticket.ID,
|
||||
AttachmentType: string(ticket.Type),
|
||||
NodeID: ticket.NodeID,
|
||||
Parameters: ticket.Parameters,
|
||||
Satisfied: false,
|
||||
Conditions: nil,
|
||||
}
|
||||
|
||||
if status != nil {
|
||||
attachment.Satisfied = status.Satisfied
|
||||
attachment.Conditions = status.Conditions
|
||||
}
|
||||
|
||||
attachments[ticketName] = attachment
|
||||
}
|
||||
|
||||
return &VolumeAttachment{
|
||||
Resource: client.Resource{
|
||||
Id: volumeAttachment.Name,
|
||||
Type: "volumeAttachment",
|
||||
},
|
||||
Volume: volumeAttachment.Spec.Volume,
|
||||
Attachments: attachments,
|
||||
}
|
||||
}
|
||||
|
||||
func toVolumeAttachmentCollection(attachments []*longhorn.VolumeAttachment, apiContext *api.ApiContext) *client.GenericCollection {
|
||||
data := []interface{}{}
|
||||
for _, attachment := range attachments {
|
||||
data = append(data, toVolumeAttachmentResource(attachment))
|
||||
}
|
||||
return &client.GenericCollection{Data: data, Collection: client.Collection{ResourceType: "volumeAttachment"}}
|
||||
}
|
||||
|
||||
func sliceToMap(conditions []longhorn.Condition) map[string]longhorn.Condition {
|
||||
converted := map[string]longhorn.Condition{}
|
||||
for _, c := range conditions {
|
||||
|
|
|
@ -69,21 +69,22 @@ func NewRouter(s *Server) *mux.Router {
|
|||
r.Methods("DELETE").Path("/v1/volumes/{name}").Handler(f(schemas, s.VolumeDelete))
|
||||
r.Methods("POST").Path("/v1/volumes").Handler(f(schemas, s.fwd.Handler(s.fwd.HandleProxyRequestByNodeID, s.fwd.GetHTTPAddressByNodeID(NodeHasDefaultEngineImage(s.m)), s.VolumeCreate)))
|
||||
volumeActions := map[string]func(http.ResponseWriter, *http.Request) error{
|
||||
"attach": s.VolumeAttach,
|
||||
"detach": s.VolumeDetach,
|
||||
"salvage": s.VolumeSalvage,
|
||||
"updateDataLocality": s.VolumeUpdateDataLocality,
|
||||
"updateAccessMode": s.VolumeUpdateAccessMode,
|
||||
"updateUnmapMarkSnapChainRemoved": s.VolumeUpdateUnmapMarkSnapChainRemoved,
|
||||
"updateSnapshotMaxCount": s.VolumeUpdateSnapshotMaxCount,
|
||||
"updateSnapshotMaxSize": s.VolumeUpdateSnapshotMaxSize,
|
||||
"updateReplicaSoftAntiAffinity": s.VolumeUpdateReplicaSoftAntiAffinity,
|
||||
"updateReplicaZoneSoftAntiAffinity": s.VolumeUpdateReplicaZoneSoftAntiAffinity,
|
||||
"updateReplicaDiskSoftAntiAffinity": s.VolumeUpdateReplicaDiskSoftAntiAffinity,
|
||||
"activate": s.VolumeActivate,
|
||||
"expand": s.VolumeExpand,
|
||||
"cancelExpansion": s.VolumeCancelExpansion,
|
||||
"offlineReplicaRebuilding": s.VolumeOfflineRebuilding,
|
||||
"attach": s.VolumeAttach,
|
||||
"detach": s.VolumeDetach,
|
||||
"salvage": s.VolumeSalvage,
|
||||
"updateDataLocality": s.VolumeUpdateDataLocality,
|
||||
"updateAccessMode": s.VolumeUpdateAccessMode,
|
||||
"updateUnmapMarkSnapChainRemoved": s.VolumeUpdateUnmapMarkSnapChainRemoved,
|
||||
"updateSnapshotMaxCount": s.VolumeUpdateSnapshotMaxCount,
|
||||
"updateSnapshotMaxSize": s.VolumeUpdateSnapshotMaxSize,
|
||||
"updateReplicaRebuildingBandwidthLimit": s.VolumeUpdateReplicaRebuildingBandwidthLimit,
|
||||
"updateReplicaSoftAntiAffinity": s.VolumeUpdateReplicaSoftAntiAffinity,
|
||||
"updateReplicaZoneSoftAntiAffinity": s.VolumeUpdateReplicaZoneSoftAntiAffinity,
|
||||
"updateReplicaDiskSoftAntiAffinity": s.VolumeUpdateReplicaDiskSoftAntiAffinity,
|
||||
"activate": s.VolumeActivate,
|
||||
"expand": s.VolumeExpand,
|
||||
"cancelExpansion": s.VolumeCancelExpansion,
|
||||
"offlineReplicaRebuilding": s.VolumeOfflineRebuilding,
|
||||
|
||||
"updateReplicaCount": s.VolumeUpdateReplicaCount,
|
||||
"updateReplicaAutoBalance": s.VolumeUpdateReplicaAutoBalance,
|
||||
|
@ -291,5 +292,9 @@ func NewRouter(s *Server) *mux.Router {
|
|||
r.Path("/v1/ws/events").Handler(f(schemas, eventListStream))
|
||||
r.Path("/v1/ws/{period}/events").Handler(f(schemas, eventListStream))
|
||||
|
||||
// VolumeAttachment routes
|
||||
r.Methods("GET").Path("/v1/volumeattachments").Handler(f(schemas, s.VolumeAttachmentList))
|
||||
r.Methods("GET").Path("/v1/volumeattachments/{name}").Handler(f(schemas, s.VolumeAttachmentGet))
|
||||
|
||||
return r
|
||||
}
|
||||
|
|
|
@ -172,36 +172,43 @@ func (s *Server) VolumeCreate(rw http.ResponseWriter, req *http.Request) error {
|
|||
return errors.Wrap(err, "failed to parse snapshot max size")
|
||||
}
|
||||
|
||||
backupBlockSize, err := util.ConvertSize(volume.BackupBlockSize)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse backup block size %v", volume.BackupBlockSize)
|
||||
}
|
||||
|
||||
v, err := s.m.Create(volume.Name, &longhorn.VolumeSpec{
|
||||
Size: size,
|
||||
AccessMode: volume.AccessMode,
|
||||
Migratable: volume.Migratable,
|
||||
Encrypted: volume.Encrypted,
|
||||
Frontend: volume.Frontend,
|
||||
FromBackup: volume.FromBackup,
|
||||
RestoreVolumeRecurringJob: volume.RestoreVolumeRecurringJob,
|
||||
DataSource: volume.DataSource,
|
||||
NumberOfReplicas: volume.NumberOfReplicas,
|
||||
ReplicaAutoBalance: volume.ReplicaAutoBalance,
|
||||
DataLocality: volume.DataLocality,
|
||||
StaleReplicaTimeout: volume.StaleReplicaTimeout,
|
||||
BackingImage: volume.BackingImage,
|
||||
Standby: volume.Standby,
|
||||
RevisionCounterDisabled: volume.RevisionCounterDisabled,
|
||||
DiskSelector: volume.DiskSelector,
|
||||
NodeSelector: volume.NodeSelector,
|
||||
SnapshotDataIntegrity: volume.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: volume.SnapshotMaxCount,
|
||||
SnapshotMaxSize: snapshotMaxSize,
|
||||
BackupCompressionMethod: volume.BackupCompressionMethod,
|
||||
UnmapMarkSnapChainRemoved: volume.UnmapMarkSnapChainRemoved,
|
||||
ReplicaSoftAntiAffinity: volume.ReplicaSoftAntiAffinity,
|
||||
ReplicaZoneSoftAntiAffinity: volume.ReplicaZoneSoftAntiAffinity,
|
||||
ReplicaDiskSoftAntiAffinity: volume.ReplicaDiskSoftAntiAffinity,
|
||||
DataEngine: volume.DataEngine,
|
||||
FreezeFilesystemForSnapshot: volume.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: volume.BackupTargetName,
|
||||
OfflineRebuilding: volume.OfflineRebuilding,
|
||||
Size: size,
|
||||
AccessMode: volume.AccessMode,
|
||||
Migratable: volume.Migratable,
|
||||
Encrypted: volume.Encrypted,
|
||||
Frontend: volume.Frontend,
|
||||
FromBackup: volume.FromBackup,
|
||||
RestoreVolumeRecurringJob: volume.RestoreVolumeRecurringJob,
|
||||
DataSource: volume.DataSource,
|
||||
NumberOfReplicas: volume.NumberOfReplicas,
|
||||
ReplicaAutoBalance: volume.ReplicaAutoBalance,
|
||||
DataLocality: volume.DataLocality,
|
||||
StaleReplicaTimeout: volume.StaleReplicaTimeout,
|
||||
BackingImage: volume.BackingImage,
|
||||
Standby: volume.Standby,
|
||||
RevisionCounterDisabled: volume.RevisionCounterDisabled,
|
||||
DiskSelector: volume.DiskSelector,
|
||||
NodeSelector: volume.NodeSelector,
|
||||
SnapshotDataIntegrity: volume.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: volume.SnapshotMaxCount,
|
||||
SnapshotMaxSize: snapshotMaxSize,
|
||||
ReplicaRebuildingBandwidthLimit: volume.ReplicaRebuildingBandwidthLimit,
|
||||
BackupCompressionMethod: volume.BackupCompressionMethod,
|
||||
BackupBlockSize: backupBlockSize,
|
||||
UnmapMarkSnapChainRemoved: volume.UnmapMarkSnapChainRemoved,
|
||||
ReplicaSoftAntiAffinity: volume.ReplicaSoftAntiAffinity,
|
||||
ReplicaZoneSoftAntiAffinity: volume.ReplicaZoneSoftAntiAffinity,
|
||||
ReplicaDiskSoftAntiAffinity: volume.ReplicaDiskSoftAntiAffinity,
|
||||
DataEngine: volume.DataEngine,
|
||||
FreezeFilesystemForSnapshot: volume.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: volume.BackupTargetName,
|
||||
OfflineRebuilding: volume.OfflineRebuilding,
|
||||
}, volume.RecurringJobSelector)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create volume")
|
||||
|
@ -839,6 +846,33 @@ func (s *Server) VolumeUpdateSnapshotMaxSize(rw http.ResponseWriter, req *http.R
|
|||
return s.responseWithVolume(rw, req, "", v)
|
||||
}
|
||||
|
||||
func (s *Server) VolumeUpdateReplicaRebuildingBandwidthLimit(rw http.ResponseWriter, req *http.Request) error {
|
||||
var input UpdateReplicaRebuildingBandwidthLimit
|
||||
id := mux.Vars(req)["name"]
|
||||
|
||||
apiContext := api.GetApiContext(req)
|
||||
if err := apiContext.Read(&input); err != nil {
|
||||
return errors.Wrap(err, "failed to read ReplicaRebuildingBandwidthLimit input")
|
||||
}
|
||||
|
||||
replicaRebuildingBandwidthLimit, err := util.ConvertSize(input.ReplicaRebuildingBandwidthLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse replica rebuilding bandwidth limit %v", err)
|
||||
}
|
||||
|
||||
obj, err := util.RetryOnConflictCause(func() (interface{}, error) {
|
||||
return s.m.UpdateReplicaRebuildingBandwidthLimit(id, replicaRebuildingBandwidthLimit)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v, ok := obj.(*longhorn.Volume)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert to volume %v object", id)
|
||||
}
|
||||
return s.responseWithVolume(rw, req, "", v)
|
||||
}
|
||||
|
||||
func (s *Server) VolumeUpdateFreezeFilesystemForSnapshot(rw http.ResponseWriter, req *http.Request) error {
|
||||
var input UpdateFreezeFilesystemForSnapshotInput
|
||||
id := mux.Vars(req)["name"]
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/rancher/go-rancher/api"
|
||||
)
|
||||
|
||||
func (s *Server) VolumeAttachmentGet(rw http.ResponseWriter, req *http.Request) error {
|
||||
apiContext := api.GetApiContext(req)
|
||||
|
||||
id := mux.Vars(req)["name"]
|
||||
|
||||
volumeAttachment, err := s.m.GetVolumeAttachment(id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get volume attachment '%s'", id)
|
||||
}
|
||||
apiContext.Write(toVolumeAttachmentResource(volumeAttachment))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) VolumeAttachmentList(rw http.ResponseWriter, req *http.Request) (err error) {
|
||||
apiContext := api.GetApiContext(req)
|
||||
|
||||
volumeAttachmentList, err := s.m.ListVolumeAttachment()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list volume attachments")
|
||||
}
|
||||
apiContext.Write(toVolumeAttachmentCollection(volumeAttachmentList, apiContext))
|
||||
return nil
|
||||
}
|
|
@ -59,6 +59,10 @@ const (
|
|||
LeaseLockNameWebhook = "longhorn-manager-webhook-lock"
|
||||
)
|
||||
|
||||
const (
|
||||
enableConversionWebhook = false
|
||||
)
|
||||
|
||||
func DaemonCmd() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "daemon",
|
||||
|
@ -151,34 +155,36 @@ func startWebhooksByLeaderElection(ctx context.Context, kubeconfigPath, currentN
|
|||
}
|
||||
|
||||
fnStartWebhook := func(ctx context.Context) error {
|
||||
// Conversion webhook needs to be started first since we use its port 9501 as readiness port.
|
||||
// longhorn-manager pod becomes ready only when conversion webhook is running.
|
||||
// The services in the longhorn-manager can then start to receive the requests.
|
||||
// Conversion webhook does not use datastore, since it is a prerequisite for
|
||||
// datastore operation.
|
||||
clientsWithoutDatastore, err := client.NewClients(kubeconfigPath, false, ctx.Done())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := webhook.StartWebhook(ctx, types.WebhookTypeConversion, clientsWithoutDatastore); err != nil {
|
||||
return err
|
||||
}
|
||||
if enableConversionWebhook {
|
||||
// Conversion webhook needs to be started first since we use its port 9501 as readiness port.
|
||||
// longhorn-manager pod becomes ready only when conversion webhook is running.
|
||||
// The services in the longhorn-manager can then start to receive the requests.
|
||||
// Conversion webhook does not use datastore, since it is a prerequisite for
|
||||
// datastore operation.
|
||||
clientsWithoutDatastore, err := client.NewClients(kubeconfigPath, false, ctx.Done())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := webhook.StartWebhook(ctx, types.WebhookTypeConversion, clientsWithoutDatastore); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This adds the label for the conversion webhook's selector. We do it the hard way without datastore to avoid chicken-and-egg.
|
||||
pod, err := clientsWithoutDatastore.Clients.K8s.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels := types.GetConversionWebhookLabel()
|
||||
for key, value := range labels {
|
||||
pod.Labels[key] = value
|
||||
}
|
||||
_, err = clientsWithoutDatastore.Clients.K8s.CoreV1().Pods(podNamespace).Update(context.Background(), pod, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := webhook.CheckWebhookServiceAvailability(types.WebhookTypeConversion); err != nil {
|
||||
return err
|
||||
// This adds the label for the conversion webhook's selector. We do it the hard way without datastore to avoid chicken-and-egg.
|
||||
pod, err := clientsWithoutDatastore.Clients.K8s.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels := types.GetConversionWebhookLabel()
|
||||
for key, value := range labels {
|
||||
pod.Labels[key] = value
|
||||
}
|
||||
_, err = clientsWithoutDatastore.Clients.K8s.CoreV1().Pods(podNamespace).Update(context.Background(), pod, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := webhook.CheckWebhookServiceAvailability(types.WebhookTypeConversion); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
clients, err := client.NewClients(kubeconfigPath, true, ctx.Done())
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
func NewJob(name string, logger logrus.FieldLogger, managerURL string, recurringJob *longhorn.RecurringJob, lhClient *lhclientset.Clientset) (*Job, error) {
|
||||
func NewJob(name string, logger *logrus.Logger, managerURL string, recurringJob *longhorn.RecurringJob, lhClient *lhclientset.Clientset) (*Job, error) {
|
||||
namespace := os.Getenv(types.EnvPodNamespace)
|
||||
if namespace == "" {
|
||||
return nil, fmt.Errorf("failed detect pod namespace, environment variable %v is missing", types.EnvPodNamespace)
|
||||
|
|
|
@ -18,7 +18,7 @@ type Job struct {
|
|||
lhClient *lhclientset.Clientset // Kubernetes clientset for Longhorn resources.
|
||||
|
||||
eventRecorder record.EventRecorder // Used to record events related to the job.
|
||||
logger logrus.FieldLogger // Log messages related to the job.
|
||||
logger *logrus.Logger // Log messages related to the job.
|
||||
|
||||
name string // Name for the RecurringJob.
|
||||
namespace string // Kubernetes namespace in which the RecurringJob is running.
|
||||
|
@ -33,7 +33,7 @@ type Job struct {
|
|||
type VolumeJob struct {
|
||||
*Job // Embedding the base Job struct.
|
||||
|
||||
logger logrus.FieldLogger // Log messages related to the volume job.
|
||||
logger *logrus.Entry // Log messages related to the volume job.
|
||||
|
||||
volumeName string // Name of the volume on which the job operates.
|
||||
snapshotName string // Name of the snapshot associated with the job.
|
||||
|
@ -47,7 +47,7 @@ type VolumeJob struct {
|
|||
type SystemBackupJob struct {
|
||||
*Job // Embedding the base Job struct.
|
||||
|
||||
logger logrus.FieldLogger // Log messages related to the volume job.
|
||||
logger *logrus.Entry // Log messages related to the volume job.
|
||||
|
||||
systemBackupName string // Name of the SystemBackup.
|
||||
volumeBackupPolicy longhorn.SystemBackupCreateVolumeBackupPolicy // backup policy used for the SystemBackup.Spec.
|
||||
|
|
|
@ -70,17 +70,17 @@ func StartVolumeJobs(job *Job, recurringJob *longhorn.RecurringJob) error {
|
|||
func startVolumeJob(job *Job, recurringJob *longhorn.RecurringJob,
|
||||
volumeName string, concurrentLimiter chan struct{}, jobGroups []string) error {
|
||||
|
||||
concurrentLimiter <- struct{}{}
|
||||
defer func() {
|
||||
<-concurrentLimiter
|
||||
}()
|
||||
|
||||
volumeJob, err := newVolumeJob(job, recurringJob, volumeName, jobGroups)
|
||||
if err != nil {
|
||||
job.logger.WithError(err).Errorf("Failed to initialize job for volume %v", volumeName)
|
||||
return err
|
||||
}
|
||||
|
||||
concurrentLimiter <- struct{}{}
|
||||
defer func() {
|
||||
<-concurrentLimiter
|
||||
}()
|
||||
|
||||
volumeJob.logger.Info("Creating volume job")
|
||||
|
||||
err = volumeJob.run()
|
||||
|
@ -96,7 +96,10 @@ func startVolumeJob(job *Job, recurringJob *longhorn.RecurringJob,
|
|||
func newVolumeJob(job *Job, recurringJob *longhorn.RecurringJob, volumeName string, groups []string) (*VolumeJob, error) {
|
||||
specLabels := map[string]string{}
|
||||
if recurringJob.Spec.Labels != nil {
|
||||
specLabels = recurringJob.Spec.Labels
|
||||
specLabels = make(map[string]string, len(recurringJob.Spec.Labels))
|
||||
for k, v := range recurringJob.Spec.Labels {
|
||||
specLabels[k] = v
|
||||
}
|
||||
}
|
||||
specLabels[types.RecurringJobLabel] = recurringJob.Name
|
||||
|
||||
|
|
|
@ -7,8 +7,12 @@ const (
|
|||
type Backup struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backup_mode,omitempty"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
|
||||
BlockSize string `json:"blockSize,omitempty" yaml:"block_size,omitempty"`
|
||||
|
||||
CompressionMethod string `json:"compressionMethod,omitempty" yaml:"compression_method,omitempty"`
|
||||
|
||||
Created string `json:"created,omitempty" yaml:"created,omitempty"`
|
||||
|
@ -21,8 +25,12 @@ type Backup struct {
|
|||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NewlyUploadDataSize string `json:"newlyUploadDataSize,omitempty" yaml:"newly_upload_data_size,omitempty"`
|
||||
|
||||
Progress int64 `json:"progress,omitempty" yaml:"progress,omitempty"`
|
||||
|
||||
ReUploadedDataSize string `json:"reUploadedDataSize,omitempty" yaml:"re_uploaded_data_size,omitempty"`
|
||||
|
||||
Size string `json:"size,omitempty" yaml:"size,omitempty"`
|
||||
|
||||
SnapshotCreated string `json:"snapshotCreated,omitempty" yaml:"snapshot_created,omitempty"`
|
||||
|
|
|
@ -7,6 +7,10 @@ const (
|
|||
type BackupBackingImage struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackingImageName string `json:"backingImageName,omitempty" yaml:"backing_image_name,omitempty"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
|
||||
CompressionMethod string `json:"compressionMethod,omitempty" yaml:"compression_method,omitempty"`
|
||||
|
||||
Created string `json:"created,omitempty" yaml:"created,omitempty"`
|
||||
|
@ -23,7 +27,7 @@ type BackupBackingImage struct {
|
|||
|
||||
Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
|
||||
|
||||
SecretNamespace string `json:"secretNamespace,omitempty" yaml:"secretNamespace,omitempty"`
|
||||
SecretNamespace string `json:"secretNamespace,omitempty" yaml:"secret_namespace,omitempty"`
|
||||
|
||||
Size int64 `json:"size,omitempty" yaml:"size,omitempty"`
|
||||
|
||||
|
|
|
@ -36,6 +36,10 @@ type BackupTargetOperations interface {
|
|||
Update(existing *BackupTarget, updates interface{}) (*BackupTarget, error)
|
||||
ById(id string) (*BackupTarget, error)
|
||||
Delete(container *BackupTarget) error
|
||||
|
||||
ActionBackupTargetSync(*BackupTarget, *SyncBackupResource) (*BackupTargetListOutput, error)
|
||||
|
||||
ActionBackupTargetUpdate(*BackupTarget, *BackupTarget) (*BackupTargetListOutput, error)
|
||||
}
|
||||
|
||||
func newBackupTargetClient(rancherClient *RancherClient) *BackupTargetClient {
|
||||
|
@ -87,3 +91,21 @@ func (c *BackupTargetClient) ById(id string) (*BackupTarget, error) {
|
|||
func (c *BackupTargetClient) Delete(container *BackupTarget) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_TARGET_TYPE, &container.Resource)
|
||||
}
|
||||
|
||||
func (c *BackupTargetClient) ActionBackupTargetSync(resource *BackupTarget, input *SyncBackupResource) (*BackupTargetListOutput, error) {
|
||||
|
||||
resp := &BackupTargetListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_TARGET_TYPE, "backupTargetSync", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetClient) ActionBackupTargetUpdate(resource *BackupTarget, input *BackupTarget) (*BackupTargetListOutput, error) {
|
||||
|
||||
resp := &BackupTargetListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_TARGET_TYPE, "backupTargetUpdate", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
BACKUP_TARGET_LIST_OUTPUT_TYPE = "backupTargetListOutput"
|
||||
)
|
||||
|
||||
type BackupTargetListOutput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Data []BackupTarget `json:"data,omitempty" yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
type BackupTargetListOutputCollection struct {
|
||||
Collection
|
||||
Data []BackupTargetListOutput `json:"data,omitempty"`
|
||||
client *BackupTargetListOutputClient
|
||||
}
|
||||
|
||||
type BackupTargetListOutputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type BackupTargetListOutputOperations interface {
|
||||
List(opts *ListOpts) (*BackupTargetListOutputCollection, error)
|
||||
Create(opts *BackupTargetListOutput) (*BackupTargetListOutput, error)
|
||||
Update(existing *BackupTargetListOutput, updates interface{}) (*BackupTargetListOutput, error)
|
||||
ById(id string) (*BackupTargetListOutput, error)
|
||||
Delete(container *BackupTargetListOutput) error
|
||||
}
|
||||
|
||||
func newBackupTargetListOutputClient(rancherClient *RancherClient) *BackupTargetListOutputClient {
|
||||
return &BackupTargetListOutputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Create(container *BackupTargetListOutput) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doCreate(BACKUP_TARGET_LIST_OUTPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Update(existing *BackupTargetListOutput, updates interface{}) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doUpdate(BACKUP_TARGET_LIST_OUTPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) List(opts *ListOpts) (*BackupTargetListOutputCollection, error) {
|
||||
resp := &BackupTargetListOutputCollection{}
|
||||
err := c.rancherClient.doList(BACKUP_TARGET_LIST_OUTPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *BackupTargetListOutputCollection) Next() (*BackupTargetListOutputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &BackupTargetListOutputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) ById(id string) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doById(BACKUP_TARGET_LIST_OUTPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Delete(container *BackupTargetListOutput) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_TARGET_LIST_OUTPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -58,6 +58,8 @@ type BackupVolumeOperations interface {
|
|||
ActionBackupList(*BackupVolume) (*BackupListOutput, error)
|
||||
|
||||
ActionBackupListByVolume(*BackupVolume, *Volume) (*BackupListOutput, error)
|
||||
|
||||
ActionBackupVolumeSync(*BackupVolume, *SyncBackupResource) (*BackupVolumeListOutput, error)
|
||||
}
|
||||
|
||||
func newBackupVolumeClient(rancherClient *RancherClient) *BackupVolumeClient {
|
||||
|
@ -145,3 +147,12 @@ func (c *BackupVolumeClient) ActionBackupListByVolume(resource *BackupVolume, in
|
|||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeClient) ActionBackupVolumeSync(resource *BackupVolume, input *SyncBackupResource) (*BackupVolumeListOutput, error) {
|
||||
|
||||
resp := &BackupVolumeListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_VOLUME_TYPE, "backupVolumeSync", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
BACKUP_VOLUME_LIST_OUTPUT_TYPE = "backupVolumeListOutput"
|
||||
)
|
||||
|
||||
type BackupVolumeListOutput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Data []BackupVolume `json:"data,omitempty" yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputCollection struct {
|
||||
Collection
|
||||
Data []BackupVolumeListOutput `json:"data,omitempty"`
|
||||
client *BackupVolumeListOutputClient
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputOperations interface {
|
||||
List(opts *ListOpts) (*BackupVolumeListOutputCollection, error)
|
||||
Create(opts *BackupVolumeListOutput) (*BackupVolumeListOutput, error)
|
||||
Update(existing *BackupVolumeListOutput, updates interface{}) (*BackupVolumeListOutput, error)
|
||||
ById(id string) (*BackupVolumeListOutput, error)
|
||||
Delete(container *BackupVolumeListOutput) error
|
||||
}
|
||||
|
||||
func newBackupVolumeListOutputClient(rancherClient *RancherClient) *BackupVolumeListOutputClient {
|
||||
return &BackupVolumeListOutputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Create(container *BackupVolumeListOutput) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doCreate(BACKUP_VOLUME_LIST_OUTPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Update(existing *BackupVolumeListOutput, updates interface{}) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doUpdate(BACKUP_VOLUME_LIST_OUTPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) List(opts *ListOpts) (*BackupVolumeListOutputCollection, error) {
|
||||
resp := &BackupVolumeListOutputCollection{}
|
||||
err := c.rancherClient.doList(BACKUP_VOLUME_LIST_OUTPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *BackupVolumeListOutputCollection) Next() (*BackupVolumeListOutputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &BackupVolumeListOutputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) ById(id string) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doById(BACKUP_VOLUME_LIST_OUTPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Delete(container *BackupVolumeListOutput) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_VOLUME_LIST_OUTPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -9,10 +9,10 @@ type RancherClient struct {
|
|||
DetachInput DetachInputOperations
|
||||
SnapshotInput SnapshotInputOperations
|
||||
SnapshotCRInput SnapshotCRInputOperations
|
||||
BackupTarget BackupTargetOperations
|
||||
Backup BackupOperations
|
||||
BackupInput BackupInputOperations
|
||||
BackupStatus BackupStatusOperations
|
||||
SyncBackupResource SyncBackupResourceOperations
|
||||
Orphan OrphanOperations
|
||||
RestoreStatus RestoreStatusOperations
|
||||
PurgeStatus PurgeStatusOperations
|
||||
|
@ -38,6 +38,8 @@ type RancherClient struct {
|
|||
UpdateReplicaZoneSoftAntiAffinityInput UpdateReplicaZoneSoftAntiAffinityInputOperations
|
||||
UpdateReplicaDiskSoftAntiAffinityInput UpdateReplicaDiskSoftAntiAffinityInputOperations
|
||||
UpdateFreezeFSForSnapshotInput UpdateFreezeFSForSnapshotInputOperations
|
||||
UpdateBackupTargetInput UpdateBackupTargetInputOperations
|
||||
UpdateOfflineRebuildingInput UpdateOfflineRebuildingInputOperations
|
||||
WorkloadStatus WorkloadStatusOperations
|
||||
CloneStatus CloneStatusOperations
|
||||
Empty EmptyOperations
|
||||
|
@ -56,13 +58,14 @@ type RancherClient struct {
|
|||
InstanceManager InstanceManagerOperations
|
||||
BackingImageDiskFileStatus BackingImageDiskFileStatusOperations
|
||||
BackingImageCleanupInput BackingImageCleanupInputOperations
|
||||
BackingImageRestoreInput BackingImageRestoreInputOperations
|
||||
UpdateMinNumberOfCopiesInput UpdateMinNumberOfCopiesInputOperations
|
||||
BackingImageRestoreInput BackingImageRestoreInputOperations
|
||||
Attachment AttachmentOperations
|
||||
VolumeAttachment VolumeAttachmentOperations
|
||||
Volume VolumeOperations
|
||||
Snapshot SnapshotOperations
|
||||
SnapshotCR SnapshotCROperations
|
||||
BackupTarget BackupTargetOperations
|
||||
BackupVolume BackupVolumeOperations
|
||||
BackupBackingImage BackupBackingImageOperations
|
||||
Setting SettingOperations
|
||||
|
@ -73,6 +76,8 @@ type RancherClient struct {
|
|||
DiskUpdateInput DiskUpdateInputOperations
|
||||
DiskInfo DiskInfoOperations
|
||||
KubernetesStatus KubernetesStatusOperations
|
||||
BackupTargetListOutput BackupTargetListOutputOperations
|
||||
BackupVolumeListOutput BackupVolumeListOutputOperations
|
||||
BackupListOutput BackupListOutputOperations
|
||||
SnapshotListOutput SnapshotListOutputOperations
|
||||
SystemBackup SystemBackupOperations
|
||||
|
@ -91,10 +96,10 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.DetachInput = newDetachInputClient(client)
|
||||
client.SnapshotInput = newSnapshotInputClient(client)
|
||||
client.SnapshotCRInput = newSnapshotCRInputClient(client)
|
||||
client.BackupTarget = newBackupTargetClient(client)
|
||||
client.Backup = newBackupClient(client)
|
||||
client.BackupInput = newBackupInputClient(client)
|
||||
client.BackupStatus = newBackupStatusClient(client)
|
||||
client.SyncBackupResource = newSyncBackupResourceClient(client)
|
||||
client.Orphan = newOrphanClient(client)
|
||||
client.RestoreStatus = newRestoreStatusClient(client)
|
||||
client.PurgeStatus = newPurgeStatusClient(client)
|
||||
|
@ -120,6 +125,8 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.UpdateReplicaZoneSoftAntiAffinityInput = newUpdateReplicaZoneSoftAntiAffinityInputClient(client)
|
||||
client.UpdateReplicaDiskSoftAntiAffinityInput = newUpdateReplicaDiskSoftAntiAffinityInputClient(client)
|
||||
client.UpdateFreezeFSForSnapshotInput = newUpdateFreezeFSForSnapshotInputClient(client)
|
||||
client.UpdateBackupTargetInput = newUpdateBackupTargetInputClient(client)
|
||||
client.UpdateOfflineRebuildingInput = newUpdateOfflineRebuildingInputClient(client)
|
||||
client.WorkloadStatus = newWorkloadStatusClient(client)
|
||||
client.CloneStatus = newCloneStatusClient(client)
|
||||
client.Empty = newEmptyClient(client)
|
||||
|
@ -145,6 +152,7 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.Volume = newVolumeClient(client)
|
||||
client.Snapshot = newSnapshotClient(client)
|
||||
client.SnapshotCR = newSnapshotCRClient(client)
|
||||
client.BackupTarget = newBackupTargetClient(client)
|
||||
client.BackupVolume = newBackupVolumeClient(client)
|
||||
client.BackupBackingImage = newBackupBackingImageClient(client)
|
||||
client.Setting = newSettingClient(client)
|
||||
|
@ -155,6 +163,8 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.DiskUpdateInput = newDiskUpdateInputClient(client)
|
||||
client.DiskInfo = newDiskInfoClient(client)
|
||||
client.KubernetesStatus = newKubernetesStatusClient(client)
|
||||
client.BackupTargetListOutput = newBackupTargetListOutputClient(client)
|
||||
client.BackupVolumeListOutput = newBackupVolumeListOutputClient(client)
|
||||
client.BackupListOutput = newBackupListOutputClient(client)
|
||||
client.SnapshotListOutput = newSnapshotListOutputClient(client)
|
||||
client.SystemBackup = newSystemBackupClient(client)
|
||||
|
|
|
@ -7,6 +7,10 @@ const (
|
|||
type CloneStatus struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
AttemptCount int64 `json:"attemptCount,omitempty" yaml:"attempt_count,omitempty"`
|
||||
|
||||
NextAllowedAttemptAt string `json:"nextAllowedAttemptAt,omitempty" yaml:"next_allowed_attempt_at,omitempty"`
|
||||
|
||||
Snapshot string `json:"snapshot,omitempty" yaml:"snapshot,omitempty"`
|
||||
|
||||
SourceVolume string `json:"sourceVolume,omitempty" yaml:"source_volume,omitempty"`
|
||||
|
|
|
@ -11,6 +11,8 @@ type DiskInfo struct {
|
|||
|
||||
Conditions map[string]interface{} `json:"conditions,omitempty" yaml:"conditions,omitempty"`
|
||||
|
||||
DiskDriver string `json:"diskDriver,omitempty" yaml:"disk_driver,omitempty"`
|
||||
|
||||
DiskType string `json:"diskType,omitempty" yaml:"disk_type,omitempty"`
|
||||
|
||||
DiskUUID string `json:"diskUUID,omitempty" yaml:"disk_uuid,omitempty"`
|
||||
|
|
|
@ -9,6 +9,8 @@ type DiskUpdate struct {
|
|||
|
||||
AllowScheduling bool `json:"allowScheduling,omitempty" yaml:"allow_scheduling,omitempty"`
|
||||
|
||||
DiskDriver string `json:"diskDriver,omitempty" yaml:"disk_driver,omitempty"`
|
||||
|
||||
DiskType string `json:"diskType,omitempty" yaml:"disk_type,omitempty"`
|
||||
|
||||
EvictionRequested bool `json:"evictionRequested,omitempty" yaml:"eviction_requested,omitempty"`
|
||||
|
|
|
@ -29,6 +29,8 @@ type EngineImage struct {
|
|||
|
||||
Image string `json:"image,omitempty" yaml:"image,omitempty"`
|
||||
|
||||
Incompatible bool `json:"incompatible,omitempty" yaml:"incompatible,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NoRefSince string `json:"noRefSince,omitempty" yaml:"no_ref_since,omitempty"`
|
||||
|
|
|
@ -7,6 +7,8 @@ const (
|
|||
type Orphan struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
DataEngine string `json:"dataEngine,omitempty" yaml:"data_engine,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NodeID string `json:"nodeID,omitempty" yaml:"node_id,omitempty"`
|
||||
|
|
|
@ -19,6 +19,10 @@ type RecurringJob struct {
|
|||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
OwnerID string `json:"ownerID,omitempty" yaml:"owner_id,omitempty"`
|
||||
|
||||
Parameters map[string]string `json:"parameters,omitempty" yaml:"parameters,omitempty"`
|
||||
|
||||
Retain int64 `json:"retain,omitempty" yaml:"retain,omitempty"`
|
||||
|
||||
Task string `json:"task,omitempty" yaml:"task,omitempty"`
|
||||
|
|
|
@ -7,6 +7,8 @@ const (
|
|||
type Setting struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Applied bool `json:"applied,omitempty" yaml:"applied,omitempty"`
|
||||
|
||||
Definition SettingDefinition `json:"definition,omitempty" yaml:"definition,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
|
|
@ -7,7 +7,7 @@ const (
|
|||
type SnapshotInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backupMode,omitempty"`
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backup_mode,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
|
||||
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
SYNC_BACKUP_RESOURCE_TYPE = "syncBackupResource"
|
||||
)
|
||||
|
||||
type SyncBackupResource struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
SyncAllBackupTargets bool `json:"syncAllBackupTargets,omitempty" yaml:"sync_all_backup_targets,omitempty"`
|
||||
|
||||
SyncAllBackupVolumes bool `json:"syncAllBackupVolumes,omitempty" yaml:"sync_all_backup_volumes,omitempty"`
|
||||
|
||||
SyncBackupTarget bool `json:"syncBackupTarget,omitempty" yaml:"sync_backup_target,omitempty"`
|
||||
|
||||
SyncBackupVolume bool `json:"syncBackupVolume,omitempty" yaml:"sync_backup_volume,omitempty"`
|
||||
}
|
||||
|
||||
type SyncBackupResourceCollection struct {
|
||||
Collection
|
||||
Data []SyncBackupResource `json:"data,omitempty"`
|
||||
client *SyncBackupResourceClient
|
||||
}
|
||||
|
||||
type SyncBackupResourceClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type SyncBackupResourceOperations interface {
|
||||
List(opts *ListOpts) (*SyncBackupResourceCollection, error)
|
||||
Create(opts *SyncBackupResource) (*SyncBackupResource, error)
|
||||
Update(existing *SyncBackupResource, updates interface{}) (*SyncBackupResource, error)
|
||||
ById(id string) (*SyncBackupResource, error)
|
||||
Delete(container *SyncBackupResource) error
|
||||
}
|
||||
|
||||
func newSyncBackupResourceClient(rancherClient *RancherClient) *SyncBackupResourceClient {
|
||||
return &SyncBackupResourceClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Create(container *SyncBackupResource) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doCreate(SYNC_BACKUP_RESOURCE_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Update(existing *SyncBackupResource, updates interface{}) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doUpdate(SYNC_BACKUP_RESOURCE_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) List(opts *ListOpts) (*SyncBackupResourceCollection, error) {
|
||||
resp := &SyncBackupResourceCollection{}
|
||||
err := c.rancherClient.doList(SYNC_BACKUP_RESOURCE_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *SyncBackupResourceCollection) Next() (*SyncBackupResourceCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &SyncBackupResourceCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) ById(id string) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doById(SYNC_BACKUP_RESOURCE_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Delete(container *SyncBackupResource) error {
|
||||
return c.rancherClient.doResourceDelete(SYNC_BACKUP_RESOURCE_TYPE, &container.Resource)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
UPDATE_BACKUP_TARGET_INPUT_TYPE = "UpdateBackupTargetInput"
|
||||
)
|
||||
|
||||
type UpdateBackupTargetInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputCollection struct {
|
||||
Collection
|
||||
Data []UpdateBackupTargetInput `json:"data,omitempty"`
|
||||
client *UpdateBackupTargetInputClient
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputOperations interface {
|
||||
List(opts *ListOpts) (*UpdateBackupTargetInputCollection, error)
|
||||
Create(opts *UpdateBackupTargetInput) (*UpdateBackupTargetInput, error)
|
||||
Update(existing *UpdateBackupTargetInput, updates interface{}) (*UpdateBackupTargetInput, error)
|
||||
ById(id string) (*UpdateBackupTargetInput, error)
|
||||
Delete(container *UpdateBackupTargetInput) error
|
||||
}
|
||||
|
||||
func newUpdateBackupTargetInputClient(rancherClient *RancherClient) *UpdateBackupTargetInputClient {
|
||||
return &UpdateBackupTargetInputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Create(container *UpdateBackupTargetInput) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doCreate(UPDATE_BACKUP_TARGET_INPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Update(existing *UpdateBackupTargetInput, updates interface{}) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doUpdate(UPDATE_BACKUP_TARGET_INPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) List(opts *ListOpts) (*UpdateBackupTargetInputCollection, error) {
|
||||
resp := &UpdateBackupTargetInputCollection{}
|
||||
err := c.rancherClient.doList(UPDATE_BACKUP_TARGET_INPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *UpdateBackupTargetInputCollection) Next() (*UpdateBackupTargetInputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &UpdateBackupTargetInputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) ById(id string) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doById(UPDATE_BACKUP_TARGET_INPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Delete(container *UpdateBackupTargetInput) error {
|
||||
return c.rancherClient.doResourceDelete(UPDATE_BACKUP_TARGET_INPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
UPDATE_OFFLINE_REBUILDING_INPUT_TYPE = "UpdateOfflineRebuildingInput"
|
||||
)
|
||||
|
||||
type UpdateOfflineRebuildingInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
OfflineRebuilding string `json:"offlineRebuilding,omitempty" yaml:"offline_rebuilding,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputCollection struct {
|
||||
Collection
|
||||
Data []UpdateOfflineRebuildingInput `json:"data,omitempty"`
|
||||
client *UpdateOfflineRebuildingInputClient
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputOperations interface {
|
||||
List(opts *ListOpts) (*UpdateOfflineRebuildingInputCollection, error)
|
||||
Create(opts *UpdateOfflineRebuildingInput) (*UpdateOfflineRebuildingInput, error)
|
||||
Update(existing *UpdateOfflineRebuildingInput, updates interface{}) (*UpdateOfflineRebuildingInput, error)
|
||||
ById(id string) (*UpdateOfflineRebuildingInput, error)
|
||||
Delete(container *UpdateOfflineRebuildingInput) error
|
||||
}
|
||||
|
||||
func newUpdateOfflineRebuildingInputClient(rancherClient *RancherClient) *UpdateOfflineRebuildingInputClient {
|
||||
return &UpdateOfflineRebuildingInputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Create(container *UpdateOfflineRebuildingInput) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doCreate(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Update(existing *UpdateOfflineRebuildingInput, updates interface{}) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doUpdate(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) List(opts *ListOpts) (*UpdateOfflineRebuildingInputCollection, error) {
|
||||
resp := &UpdateOfflineRebuildingInputCollection{}
|
||||
err := c.rancherClient.doList(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *UpdateOfflineRebuildingInputCollection) Next() (*UpdateOfflineRebuildingInputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &UpdateOfflineRebuildingInputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) ById(id string) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doById(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Delete(container *UpdateOfflineRebuildingInput) error {
|
||||
return c.rancherClient.doResourceDelete(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -11,6 +11,8 @@ type Volume struct {
|
|||
|
||||
BackingImage string `json:"backingImage,omitempty" yaml:"backing_image,omitempty"`
|
||||
|
||||
BackupBlockSize string `json:"backupBlockSize,omitempty" yaml:"backup_block_size,omitempty"`
|
||||
|
||||
BackupCompressionMethod string `json:"backupCompressionMethod,omitempty" yaml:"backup_compression_method,omitempty"`
|
||||
|
||||
BackupStatus []BackupStatus `json:"backupStatus,omitempty" yaml:"backup_status,omitempty"`
|
||||
|
@ -141,12 +143,12 @@ type VolumeOperations interface {
|
|||
|
||||
ActionCancelExpansion(*Volume) (*Volume, error)
|
||||
|
||||
ActionOfflineReplicaRebuilding(*Volume) (*Volume, error)
|
||||
|
||||
ActionDetach(*Volume, *DetachInput) (*Volume, error)
|
||||
|
||||
ActionExpand(*Volume, *ExpandInput) (*Volume, error)
|
||||
|
||||
ActionOfflineReplicaRebuilding(*Volume, *UpdateOfflineRebuildingInput) (*Volume, error)
|
||||
|
||||
ActionPvCreate(*Volume, *PVCreateInput) (*Volume, error)
|
||||
|
||||
ActionPvcCreate(*Volume, *PVCCreateInput) (*Volume, error)
|
||||
|
@ -265,15 +267,6 @@ func (c *VolumeClient) ActionCancelExpansion(resource *Volume) (*Volume, error)
|
|||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionOfflineReplicaRebuilding(resource *Volume) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
||||
err := c.rancherClient.doAction(VOLUME_TYPE, "offlineReplicaRebuilding", &resource.Resource, nil, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionDetach(resource *Volume, input *DetachInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
@ -292,6 +285,15 @@ func (c *VolumeClient) ActionExpand(resource *Volume, input *ExpandInput) (*Volu
|
|||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionOfflineReplicaRebuilding(resource *Volume, input *UpdateOfflineRebuildingInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
||||
err := c.rancherClient.doAction(VOLUME_TYPE, "offlineReplicaRebuilding", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionPvCreate(resource *Volume, input *PVCreateInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
|
|
@ -70,4 +70,6 @@ const (
|
|||
EventReasonRolloutSkippedFmt = "RolloutSkipped: %v %v"
|
||||
|
||||
EventReasonMigrationFailed = "MigrationFailed"
|
||||
|
||||
EventReasonOrphanCleanupCompleted = "OrphanCleanupCompleted"
|
||||
)
|
||||
|
|
|
@ -281,7 +281,7 @@ func (bic *BackingImageController) syncBackingImage(key string) (err error) {
|
|||
}()
|
||||
|
||||
if backingImage.DeletionTimestamp != nil {
|
||||
replicas, err := bic.ds.ListReplicasByBackingImage(backingImage.Name)
|
||||
replicas, err := bic.ds.ListReplicasByBackingImage(backingImage.Name, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -345,7 +345,9 @@ func (bic *BackingImageController) syncBackingImage(key string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
bic.cleanupEvictionRequestedBackingImageCopies(backingImage)
|
||||
if err := bic.cleanupEvictionRequestedBackingImageCopies(backingImage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if types.IsDataEngineV2(backingImage.Spec.DataEngine) {
|
||||
return bic.handleV2BackingImage(backingImage)
|
||||
|
@ -507,7 +509,7 @@ func (bic *BackingImageController) prepareFirstV2Copy(bi *longhorn.BackingImage)
|
|||
_, err = engineClientProxy.SPDKBackingImageCreate(bi.Name, bi.Status.UUID, firstV2CopyDiskUUID, bi.Status.Checksum, fileDownloadAddress, "", uint64(bi.Status.Size))
|
||||
if err != nil {
|
||||
if types.ErrorAlreadyExists(err) {
|
||||
log.Infof("backing image already exists when preparing first v2 copy on disk %v", firstV2CopyDiskUUID)
|
||||
log.Infof("Backing image already exists when preparing first v2 copy on disk %v", firstV2CopyDiskUUID)
|
||||
}
|
||||
bic.v2CopyBackoff.Next(firstV2CopyDiskUUID, time.Now())
|
||||
return errors.Wrapf(err, "failed to create backing image on disk %v when preparing first v2 copy for backing image %v", firstV2CopyDiskUUID, bi.Name)
|
||||
|
@ -863,7 +865,7 @@ func (bic *BackingImageController) replenishBackingImageCopies(bi *longhorn.Back
|
|||
return nil
|
||||
}
|
||||
|
||||
func (bic *BackingImageController) cleanupEvictionRequestedBackingImageCopies(bi *longhorn.BackingImage) {
|
||||
func (bic *BackingImageController) cleanupEvictionRequestedBackingImageCopies(bi *longhorn.BackingImage) error {
|
||||
log := getLoggerForBackingImage(bic.logger, bi)
|
||||
|
||||
// If there is no non-evicting healthy backing image copy,
|
||||
|
@ -901,9 +903,30 @@ func (bic *BackingImageController) cleanupEvictionRequestedBackingImageCopies(bi
|
|||
// only this controller can gather all the information of all the copies of this backing image at once.
|
||||
// By deleting the disk from the spec, backing image manager controller will delete the copy on that disk.
|
||||
// TODO: introduce a new CRD for the backing image copy so we can delete the copy like volume controller deletes replicas.
|
||||
isUsed, err := bic.isBIDiskFileUsedByReplicas(bi.Name, diskUUID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to check if the backing image copy on disk %v is used by replicas", diskUUID)
|
||||
}
|
||||
if isUsed {
|
||||
log.Debugf("Backing image copy on disk %v is used by replicas. Copy eviction is blocked", diskUUID)
|
||||
continue
|
||||
}
|
||||
delete(bi.Spec.DiskFileSpecMap, diskUUID)
|
||||
log.Infof("Evicted backing image copy on disk %v", diskUUID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bic *BackingImageController) isBIDiskFileUsedByReplicas(biName, diskUUID string) (used bool, err error) {
|
||||
replicas, err := bic.ds.ListReplicasByBackingImage(biName, diskUUID)
|
||||
if err != nil {
|
||||
if datastore.ErrorIsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return len(replicas) > 0, nil
|
||||
}
|
||||
|
||||
func (bic *BackingImageController) IsBackingImageDataSourceCleaned(bi *longhorn.BackingImage) (cleaned bool, err error) {
|
||||
|
@ -1409,7 +1432,7 @@ func (bic *BackingImageController) updateStatusWithFileInfo(bi *longhorn.Backing
|
|||
}
|
||||
|
||||
func (bic *BackingImageController) updateDiskLastReferenceMap(bi *longhorn.BackingImage) error {
|
||||
replicas, err := bic.ds.ListReplicasByBackingImage(bi.Name)
|
||||
replicas, err := bic.ds.ListReplicasByBackingImage(bi.Name, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1527,7 +1550,7 @@ func (bic *BackingImageController) enqueueBackingImageForNodeUpdate(oldObj, curr
|
|||
}
|
||||
}
|
||||
|
||||
diskBackingImageMap, err := bic.ds.GetDiskBackingImageMap()
|
||||
diskBackingImageMap, err := bic.ds.GetCurrentDiskBackingImageMap()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to get disk backing image map when handling node update"))
|
||||
return
|
||||
|
|
|
@ -3,6 +3,7 @@ package controller
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -676,8 +677,8 @@ func (c *BackingImageDataSourceController) generateBackingImageDataSourcePodMani
|
|||
cmd := []string{
|
||||
"backing-image-manager", "--debug",
|
||||
"data-source",
|
||||
"--listen", fmt.Sprintf("%s:%d", "0.0.0.0", engineapi.BackingImageDataSourceDefaultPort),
|
||||
"--sync-listen", fmt.Sprintf("%s:%d", "0.0.0.0", engineapi.BackingImageSyncServerDefaultPort),
|
||||
"--listen", fmt.Sprintf(":%d", engineapi.BackingImageDataSourceDefaultPort),
|
||||
"--sync-listen", fmt.Sprintf(":%d", engineapi.BackingImageSyncServerDefaultPort),
|
||||
"--name", bids.Name,
|
||||
"--uuid", bids.Spec.UUID,
|
||||
"--source-type", string(bids.Spec.SourceType),
|
||||
|
@ -942,7 +943,7 @@ func (c *BackingImageDataSourceController) prepareRunningParametersForExport(bid
|
|||
continue
|
||||
}
|
||||
rAddress := e.Status.CurrentReplicaAddressMap[rName]
|
||||
if rAddress == "" || rAddress != fmt.Sprintf("%s:%d", r.Status.StorageIP, r.Status.Port) {
|
||||
if rAddress == "" || rAddress != net.JoinHostPort(r.Status.StorageIP, strconv.Itoa(r.Status.Port)) {
|
||||
continue
|
||||
}
|
||||
bids.Status.RunningParameters[longhorn.DataSourceTypeExportFromVolumeParameterSenderAddress] = rAddress
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -64,6 +67,8 @@ type BackingImageManagerController struct {
|
|||
|
||||
replenishLock *sync.Mutex
|
||||
inProgressReplenishingMap map[string]string
|
||||
|
||||
podRecreateBackoff *flowcontrol.Backoff
|
||||
}
|
||||
|
||||
type BackingImageManagerMonitor struct {
|
||||
|
@ -133,6 +138,8 @@ func NewBackingImageManagerController(
|
|||
|
||||
replenishLock: &sync.Mutex{},
|
||||
inProgressReplenishingMap: map[string]string{},
|
||||
|
||||
podRecreateBackoff: newBackoff(context.TODO()),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@ -322,6 +329,9 @@ func (c *BackingImageManagerController) syncBackingImageManager(key string) (err
|
|||
bim.Status.CurrentState = longhorn.BackingImageManagerStateUnknown
|
||||
c.updateForUnknownBackingImageManager(bim)
|
||||
}
|
||||
if noReadyDisk {
|
||||
return c.evictMissingDiskBackingImageManager(bim)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -381,6 +391,38 @@ func (c *BackingImageManagerController) cleanupBackingImageManager(bim *longhorn
|
|||
return nil
|
||||
}
|
||||
|
||||
// evictMissingDiskBackingImageManager trigger image manager eviction for missing disks
|
||||
func (c *BackingImageManagerController) evictMissingDiskBackingImageManager(bim *longhorn.BackingImageManager) error {
|
||||
isDiskExist, err := c.ds.IsNodeHasDiskUUID(bim.Spec.NodeID, bim.Spec.DiskUUID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot check if backing image manager %v is serving on a existing disk %v", bim.Name, bim.Spec.DiskUUID)
|
||||
} else if isDiskExist {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Backing image manager is serving on the disk that no longer belongs to any node. Trigger the manager eviction.
|
||||
for imageName := range bim.Spec.BackingImages {
|
||||
bi, getImageErr := c.ds.GetBackingImageRO(imageName)
|
||||
if getImageErr != nil {
|
||||
if datastore.ErrorIsNotFound(getImageErr) {
|
||||
c.logger.Warnf("No corresponding backing image %v for missing disk backing image manager %v", imageName, bim.Name)
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(getImageErr, "failed to get backing image %v for missing disk backing image manager %v", bi.Name, bim.Name)
|
||||
}
|
||||
if bi.Spec.DiskFileSpecMap != nil {
|
||||
if bimDiskFileSpec, exist := bi.Spec.DiskFileSpecMap[bim.Spec.DiskUUID]; exist && !bimDiskFileSpec.EvictionRequested {
|
||||
c.logger.Infof("Evicting backing image manager %v because of missing disk %v", bim.Name, bim.Spec.DiskUUID)
|
||||
bimDiskFileSpec.EvictionRequested = true
|
||||
if _, updateErr := c.ds.UpdateBackingImage(bi); updateErr != nil {
|
||||
return errors.Wrapf(updateErr, "failed to evict missing disk backing image manager %v from backing image %v", bim.Name, bi.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BackingImageManagerController) updateForUnknownBackingImageManager(bim *longhorn.BackingImageManager) {
|
||||
if bim.Status.CurrentState != longhorn.BackingImageManagerStateUnknown {
|
||||
return
|
||||
|
@ -544,9 +586,16 @@ func (c *BackingImageManagerController) syncBackingImageManagerPod(bim *longhorn
|
|||
// Similar to InstanceManagerController.
|
||||
// Longhorn shouldn't create the pod when users set taints with NoExecute effect on a node the bim is preferred.
|
||||
if c.controllerID == bim.Spec.NodeID {
|
||||
log.Info("Creating backing image manager pod")
|
||||
if err := c.createBackingImageManagerPod(bim); err != nil {
|
||||
return err
|
||||
backoffID := bim.Name
|
||||
if c.podRecreateBackoff.IsInBackOffSinceUpdate(backoffID, time.Now()) {
|
||||
log.Infof("Skipping pod creation for backing image manager %s, will retry after backoff of %s", bim.Name, c.podRecreateBackoff.Get(backoffID))
|
||||
} else {
|
||||
log.Infof("Creating pod for backing image manager %s", bim.Name)
|
||||
c.podRecreateBackoff.Next(backoffID, time.Now())
|
||||
|
||||
if err := c.createBackingImageManagerPod(bim); err != nil {
|
||||
return errors.Wrap(err, "failed to create pod for backing image manager")
|
||||
}
|
||||
}
|
||||
bim.Status.CurrentState = longhorn.BackingImageManagerStateStarting
|
||||
c.eventRecorder.Eventf(bim, corev1.EventTypeNormal, constant.EventReasonCreate, "Creating backing image manager pod %v for disk %v on node %v. Backing image manager state will become %v", bim.Name, bim.Spec.DiskUUID, bim.Spec.NodeID, longhorn.BackingImageManagerStateStarting)
|
||||
|
@ -709,7 +758,7 @@ func (c *BackingImageManagerController) prepareBackingImageFiles(currentBIM *lon
|
|||
continue
|
||||
}
|
||||
log.Infof("Starting to fetch the data source file from the backing image data source work directory %v", bimtypes.DataSourceDirectoryName)
|
||||
if _, err := cli.Fetch(biRO.Name, biRO.Status.UUID, bids.Status.Checksum, fmt.Sprintf("%s:%d", bids.Status.StorageIP, engineapi.BackingImageDataSourceDefaultPort), bids.Status.Size); err != nil {
|
||||
if _, err := cli.Fetch(biRO.Name, biRO.Status.UUID, bids.Status.Checksum, net.JoinHostPort(bids.Status.StorageIP, strconv.Itoa(engineapi.BackingImageDataSourceDefaultPort)), bids.Status.Size); err != nil {
|
||||
if types.ErrorAlreadyExists(err) {
|
||||
continue
|
||||
}
|
||||
|
@ -873,8 +922,8 @@ func (c *BackingImageManagerController) generateBackingImageManagerPodManifest(b
|
|||
Command: []string{
|
||||
"backing-image-manager", "--debug",
|
||||
"daemon",
|
||||
"--listen", fmt.Sprintf("%s:%d", "0.0.0.0", engineapi.BackingImageManagerDefaultPort),
|
||||
"--sync-listen", fmt.Sprintf("%s:%d", "0.0.0.0", engineapi.BackingImageSyncServerDefaultPort),
|
||||
"--listen", fmt.Sprintf(":%d", engineapi.BackingImageManagerDefaultPort),
|
||||
"--sync-listen", fmt.Sprintf(":%d", engineapi.BackingImageSyncServerDefaultPort),
|
||||
},
|
||||
ReadinessProbe: &corev1.Probe{
|
||||
ProbeHandler: corev1.ProbeHandler{
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/lasso/pkg/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -197,7 +196,7 @@ func (bc *BackupBackingImageController) reconcile(backupBackingImageName string)
|
|||
}
|
||||
return err
|
||||
}
|
||||
log.Infof("Backup backing image got new owner %v", bc.controllerID)
|
||||
bc.logger.Infof("Backup backing image got new owner %v", bc.controllerID)
|
||||
}
|
||||
|
||||
log := getLoggerForBackupBackingImage(bc.logger, bbi)
|
||||
|
|
|
@ -402,7 +402,7 @@ func (bc *BackupController) reconcile(backupName string) (err error) {
|
|||
}
|
||||
if backup.Status.State == longhorn.BackupStateCompleted && existingBackupState != backup.Status.State {
|
||||
if err := bc.syncBackupVolume(backupTargetName, canonicalBackupVolumeName); err != nil {
|
||||
log.Warnf("failed to sync backup volume %v for backup target %v", canonicalBackupVolumeName, backupTargetName)
|
||||
log.Warnf("Failed to sync backup volume %v for backup target %v", canonicalBackupVolumeName, backupTargetName)
|
||||
return
|
||||
}
|
||||
if err := bc.deleteSnapshotAfterBackupCompleted(backup); err != nil {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
systembackupstore "github.com/longhorn/backupstore/systembackup"
|
||||
multierr "github.com/longhorn/go-common-libs/multierr"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/engineapi"
|
||||
|
@ -241,28 +242,19 @@ func getLoggerForBackupTarget(logger logrus.FieldLogger, backupTarget *longhorn.
|
|||
)
|
||||
}
|
||||
|
||||
func getAvailableDataEngine(ds *datastore.DataStore) (longhorn.DataEngineType, error) {
|
||||
func getBackupTarget(nodeID string, backupTarget *longhorn.BackupTarget, ds *datastore.DataStore, log logrus.FieldLogger, proxyConnCounter util.Counter) (engineClientProxy engineapi.EngineClientProxy, backupTargetClient *engineapi.BackupTargetClient, err error) {
|
||||
var instanceManager *longhorn.InstanceManager
|
||||
errs := multierr.NewMultiError()
|
||||
dataEngines := ds.GetDataEngines()
|
||||
if len(dataEngines) > 0 {
|
||||
for _, dataEngine := range []longhorn.DataEngineType{longhorn.DataEngineTypeV2, longhorn.DataEngineTypeV1} {
|
||||
if _, ok := dataEngines[dataEngine]; ok {
|
||||
return dataEngine, nil
|
||||
}
|
||||
for dataEngine := range dataEngines {
|
||||
instanceManager, err = ds.GetRunningInstanceManagerByNodeRO(nodeID, dataEngine)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
errs.Append("errors", errors.Wrapf(err, "failed to get running instance manager for node %v and data engine %v", nodeID, dataEngine))
|
||||
}
|
||||
|
||||
return "", errors.New("no data engine available")
|
||||
}
|
||||
|
||||
func getBackupTarget(controllerID string, backupTarget *longhorn.BackupTarget, ds *datastore.DataStore, log logrus.FieldLogger, proxyConnCounter util.Counter) (engineClientProxy engineapi.EngineClientProxy, backupTargetClient *engineapi.BackupTargetClient, err error) {
|
||||
dataEngine, err := getAvailableDataEngine(ds)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to get available data engine for getting backup target")
|
||||
}
|
||||
|
||||
instanceManager, err := ds.GetRunningInstanceManagerByNodeRO(controllerID, dataEngine)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to get running instance manager for proxy client")
|
||||
if instanceManager == nil {
|
||||
return nil, nil, fmt.Errorf("failed to find a running instance manager for node %v: %v", nodeID, errs.Error())
|
||||
}
|
||||
|
||||
engineClientProxy, err = engineapi.NewEngineClientProxy(instanceManager, log, proxyConnCounter, ds)
|
||||
|
@ -433,6 +425,16 @@ func (btc *BackupTargetController) reconcile(name string) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
// clean up invalid backup volumes that are created during split-brain
|
||||
// https://github.com/longhorn/longhorn/issues/11154
|
||||
clusterVolumeBVMap, duplicatedBackupVolumeSet, err := btc.getClusterBVsDuplicatedBVs(backupTarget)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := btc.cleanupDuplicateBackupVolumeForBackupTarget(backupTarget, duplicatedBackupVolumeSet); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if backupTarget.Spec.BackupTargetURL == "" {
|
||||
stopTimer(backupTarget.Name)
|
||||
|
||||
|
@ -456,14 +458,20 @@ func (btc *BackupTargetController) reconcile(name string) (err error) {
|
|||
log.WithError(err).Error("Failed to get info from backup store")
|
||||
return nil // Ignore error to allow status update as well as preventing enqueue
|
||||
}
|
||||
|
||||
if !backupTarget.Status.Available {
|
||||
backupTarget.Status.Available = true
|
||||
backupTarget.Status.Conditions = types.SetCondition(backupTarget.Status.Conditions,
|
||||
longhorn.BackupTargetConditionTypeUnavailable, longhorn.ConditionStatusFalse,
|
||||
"", "")
|
||||
// If the controller can communicate with the remote backup target while "backupTarget.Status.Available" is "false",
|
||||
// Longhorn should update the field to "true" first rather than continuing to fetch info from the target.
|
||||
// related issue: https://github.com/longhorn/longhorn/issues/11337
|
||||
return nil
|
||||
}
|
||||
syncTimeRequired = true // Errors beyond this point are NOT backup target related.
|
||||
|
||||
backupTarget.Status.Available = true
|
||||
backupTarget.Status.Conditions = types.SetCondition(backupTarget.Status.Conditions,
|
||||
longhorn.BackupTargetConditionTypeUnavailable, longhorn.ConditionStatusFalse,
|
||||
"", "")
|
||||
|
||||
if err = btc.syncBackupVolume(backupTarget, info.backupStoreBackupVolumeNames, syncTime, log); err != nil {
|
||||
if err = btc.syncBackupVolume(backupTarget, info.backupStoreBackupVolumeNames, clusterVolumeBVMap, syncTime, log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -530,6 +538,14 @@ func (btc *BackupTargetController) getInfoFromBackupStore(backupTarget *longhorn
|
|||
defer engineClientProxy.Close()
|
||||
|
||||
// Get required information using backup target client.
|
||||
// Get SystemBackups first to update the backup target to `available` while minimizing requests to S3.
|
||||
info.backupStoreSystemBackups, err = backupTargetClient.ListSystemBackup()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list system backups in %v", backupTargetClient.URL)
|
||||
}
|
||||
if !backupTarget.Status.Available {
|
||||
return info, nil
|
||||
}
|
||||
info.backupStoreBackupVolumeNames, err = backupTargetClient.BackupVolumeNameList()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list backup volumes in %v", backupTargetClient.URL)
|
||||
|
@ -538,28 +554,58 @@ func (btc *BackupTargetController) getInfoFromBackupStore(backupTarget *longhorn
|
|||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list backup backing images in %v", backupTargetClient.URL)
|
||||
}
|
||||
info.backupStoreSystemBackups, err = backupTargetClient.ListSystemBackup()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list system backups in %v", backupTargetClient.URL)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (btc *BackupTargetController) syncBackupVolume(backupTarget *longhorn.BackupTarget, backupStoreBackupVolumeNames []string, syncTime metav1.Time, log logrus.FieldLogger) error {
|
||||
backupStoreBackupVolumes := sets.New[string](backupStoreBackupVolumeNames...)
|
||||
func (btc *BackupTargetController) getClusterBVsDuplicatedBVs(backupTarget *longhorn.BackupTarget) (map[string]*longhorn.BackupVolume, sets.Set[string], error) {
|
||||
log := getLoggerForBackupTarget(btc.logger, backupTarget)
|
||||
backupTargetName := backupTarget.Name
|
||||
|
||||
// Get a list of all the backup volumes that exist as custom resources in the cluster
|
||||
clusterBackupVolumes, err := btc.ds.ListBackupVolumesWithBackupTargetNameRO(backupTarget.Name)
|
||||
// Get a list of the backup volumes of the backup target that exist as custom resources in the cluster
|
||||
backupVolumeList, err := btc.ds.ListBackupVolumesWithBackupTargetNameRO(backupTargetName)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
clusterVolumeBVMap := make(map[string]*longhorn.BackupVolume, len(clusterBackupVolumes))
|
||||
duplicateBackupVolumeSet := sets.New[string]()
|
||||
volumeBVMap := make(map[string]*longhorn.BackupVolume, len(backupVolumeList))
|
||||
for _, bv := range backupVolumeList {
|
||||
if bv.Spec.BackupTargetName == "" {
|
||||
log.WithField("backupVolume", bv.Name).Debug("spec.backupTargetName is empty")
|
||||
duplicateBackupVolumeSet.Insert(bv.Name)
|
||||
continue
|
||||
}
|
||||
if bv.Spec.VolumeName == "" {
|
||||
log.WithField("backupVolume", bv.Name).Debug("spec.volumeName is empty")
|
||||
duplicateBackupVolumeSet.Insert(bv.Name)
|
||||
continue
|
||||
}
|
||||
if bv.Spec.BackupTargetName != backupTargetName {
|
||||
log.WithField("backupVolume", bv.Name).Debugf("spec.backupTargetName %v is different from label backup-target", bv.Spec.BackupTargetName)
|
||||
duplicateBackupVolumeSet.Insert(bv.Name)
|
||||
continue
|
||||
}
|
||||
if existingBV, exists := volumeBVMap[bv.Spec.VolumeName]; exists {
|
||||
if existingBV.CreationTimestamp.Before(&bv.CreationTimestamp) {
|
||||
log.WithField("backupVolume", bv.Name).Warnf("Found duplicated BackupVolume with volume name %s", bv.Spec.VolumeName)
|
||||
duplicateBackupVolumeSet.Insert(bv.Name)
|
||||
continue
|
||||
}
|
||||
log.WithField("backupVolume", existingBV.Name).Warnf("Found duplicated BackupVolume with volume name %s", existingBV.Spec.VolumeName)
|
||||
duplicateBackupVolumeSet.Insert(existingBV.Name)
|
||||
}
|
||||
volumeBVMap[bv.Spec.VolumeName] = bv
|
||||
}
|
||||
|
||||
return volumeBVMap, duplicateBackupVolumeSet, nil
|
||||
}
|
||||
|
||||
func (btc *BackupTargetController) syncBackupVolume(backupTarget *longhorn.BackupTarget, backupStoreBackupVolumeNames []string, clusterVolumeBVMap map[string]*longhorn.BackupVolume, syncTime metav1.Time, log logrus.FieldLogger) error {
|
||||
backupStoreBackupVolumes := sets.New[string](backupStoreBackupVolumeNames...)
|
||||
clusterBackupVolumesSet := sets.New[string]()
|
||||
for _, bv := range clusterBackupVolumes {
|
||||
for _, bv := range clusterVolumeBVMap {
|
||||
clusterBackupVolumesSet.Insert(bv.Spec.VolumeName)
|
||||
clusterVolumeBVMap[bv.Spec.VolumeName] = bv
|
||||
}
|
||||
|
||||
// TODO: add a unit test
|
||||
|
@ -580,11 +626,19 @@ func (btc *BackupTargetController) syncBackupVolume(backupTarget *longhorn.Backu
|
|||
|
||||
// Update the BackupVolume CR spec.syncRequestAt to request the
|
||||
// backup_volume_controller to reconcile the BackupVolume CR
|
||||
for backupVolumeName, backupVolume := range clusterBackupVolumes {
|
||||
backupVolume.Spec.SyncRequestedAt = syncTime
|
||||
if _, err = btc.ds.UpdateBackupVolume(backupVolume); err != nil && !apierrors.IsConflict(errors.Cause(err)) {
|
||||
log.WithError(err).Errorf("Failed to update backup volume %s spec", backupVolumeName)
|
||||
multiError := util.NewMultiError()
|
||||
for volumeName, backupVolume := range clusterVolumeBVMap {
|
||||
if !backupStoreBackupVolumes.Has(volumeName) {
|
||||
continue
|
||||
}
|
||||
backupVolume.Spec.SyncRequestedAt = syncTime
|
||||
if _, err := btc.ds.UpdateBackupVolume(backupVolume); err != nil && !apierrors.IsConflict(errors.Cause(err)) {
|
||||
log.WithError(err).Errorf("Failed to update backup volume %s", backupVolume.Name)
|
||||
multiError.Append(util.NewMultiError(fmt.Sprintf("%v: %v", backupVolume.Name, err)))
|
||||
}
|
||||
}
|
||||
if len(multiError) > 0 {
|
||||
return fmt.Errorf("failed to update backup volumes: %v", multiError.Join())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -596,7 +650,7 @@ func (btc *BackupTargetController) pullBackupVolumeFromBackupTarget(backupTarget
|
|||
log.Infof("Found %d backup volumes in the backup target that do not exist in the cluster and need to be pulled", count)
|
||||
}
|
||||
for remoteVolumeName := range backupVolumesToPull {
|
||||
backupVolumeName := types.GetBackupVolumeNameFromVolumeName(remoteVolumeName)
|
||||
backupVolumeName := types.GetBackupVolumeNameFromVolumeName(remoteVolumeName, backupTarget.Name)
|
||||
backupVolume := &longhorn.BackupVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: backupVolumeName,
|
||||
|
@ -624,6 +678,7 @@ func (btc *BackupTargetController) cleanupBackupVolumeNotExistOnBackupTarget(clu
|
|||
log.Infof("Found %d backup volumes in the backup target that do not exist in the cluster and need to be deleted from the cluster", count)
|
||||
}
|
||||
|
||||
multiError := util.NewMultiError()
|
||||
for volumeName := range backupVolumesToDelete {
|
||||
bv, exists := clusterVolumeBVMap[volumeName]
|
||||
if !exists {
|
||||
|
@ -633,14 +688,50 @@ func (btc *BackupTargetController) cleanupBackupVolumeNotExistOnBackupTarget(clu
|
|||
|
||||
backupVolumeName := bv.Name
|
||||
log.WithField("backupVolume", backupVolumeName).Info("Deleting BackupVolume not exist in backupstore")
|
||||
if err = datastore.AddBackupVolumeDeleteCustomResourceOnlyLabel(btc.ds, backupVolumeName); err != nil {
|
||||
return errors.Wrapf(err, "failed to add label delete-custom-resource-only to Backupvolume %s", backupVolumeName)
|
||||
}
|
||||
if err = btc.ds.DeleteBackupVolume(backupVolumeName); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete backup volume %s from cluster", backupVolumeName)
|
||||
if err := btc.deleteBackupVolumeCROnly(backupVolumeName, log); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
multiError.Append(util.NewMultiError(fmt.Sprintf("%v: %v", backupVolumeName, err)))
|
||||
}
|
||||
}
|
||||
|
||||
if len(multiError) > 0 {
|
||||
return fmt.Errorf("failed to delete backup volumes from cluster: %v", multiError.Join())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (btc *BackupTargetController) deleteBackupVolumeCROnly(backupVolumeName string, log logrus.FieldLogger) error {
|
||||
if err := datastore.AddBackupVolumeDeleteCustomResourceOnlyLabel(btc.ds, backupVolumeName); err != nil {
|
||||
return errors.Wrapf(err, "failed to add label delete-custom-resource-only to BackupVolume %s", backupVolumeName)
|
||||
}
|
||||
if err := btc.ds.DeleteBackupVolume(backupVolumeName); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete BackupVolume %s", backupVolumeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (btc *BackupTargetController) cleanupDuplicateBackupVolumeForBackupTarget(backupTarget *longhorn.BackupTarget, duplicateBackupVolumesSet sets.Set[string]) (err error) {
|
||||
log := getLoggerForBackupTarget(btc.logger, backupTarget)
|
||||
if count := duplicateBackupVolumesSet.Len(); count > 0 {
|
||||
log.Infof("Found %d duplicated backup volume CRs for the backup target and need to be deleted from the cluster", count)
|
||||
}
|
||||
|
||||
multiError := util.NewMultiError()
|
||||
for bvName := range duplicateBackupVolumesSet {
|
||||
log.WithField("backupVolume", bvName).Info("Deleting BackupVolume that has duplicate volume name in cluster")
|
||||
if err := btc.deleteBackupVolumeCROnly(bvName, log); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
multiError.Append(util.NewMultiError(fmt.Sprintf("%v: %v", bvName, err)))
|
||||
}
|
||||
}
|
||||
|
||||
if len(multiError) > 0 {
|
||||
return fmt.Errorf("failed to delete backup volumes: %v", multiError.Join())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
|
||||
"github.com/longhorn/backupstore"
|
||||
|
||||
lhbackup "github.com/longhorn/go-common-libs/backup"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
@ -321,6 +323,11 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
backupLabelMap := map[string]string{}
|
||||
|
||||
backupURL := backupstore.EncodeBackupURL(backupName, canonicalBVName, backupTargetClient.URL)
|
||||
|
||||
// If the block size is unavailable from legacy remote backup, the size fallback to legacy default value 2MiB.
|
||||
// If the size value is invalid, it still creates a backup with invalid block size, but the volume restoring will be rejected by the volume validator.
|
||||
var blockSize = types.BackupBlockSizeInvalid
|
||||
|
||||
if backupInfo, err := backupTargetClient.BackupGet(backupURL, backupTargetClient.Credential); err != nil && !types.ErrorIsNotFound(err) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"backup": backupName,
|
||||
|
@ -331,6 +338,18 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
if accessMode, exist := backupInfo.Labels[types.GetLonghornLabelKey(types.LonghornLabelVolumeAccessMode)]; exist {
|
||||
backupLabelMap[types.GetLonghornLabelKey(types.LonghornLabelVolumeAccessMode)] = accessMode
|
||||
}
|
||||
backupBlockSizeParam := backupInfo.Parameters[lhbackup.LonghornBackupParameterBackupBlockSize]
|
||||
if blockSizeBytes, convertErr := util.ConvertSize(backupBlockSizeParam); convertErr != nil {
|
||||
log.WithError(convertErr).Warnf("Invalid backup block size string from the remote backup %v: %v", backupName, backupBlockSizeParam)
|
||||
} else if sizeErr := types.ValidateBackupBlockSize(-1, blockSizeBytes); sizeErr != nil {
|
||||
log.WithError(sizeErr).Warnf("Invalid backup block size from the remote backup %v: %v", backupName, backupBlockSizeParam)
|
||||
} else {
|
||||
if blockSizeBytes == 0 {
|
||||
blockSize = types.BackupBlockSize2Mi
|
||||
} else {
|
||||
blockSize = blockSizeBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +362,8 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
OwnerReferences: datastore.GetOwnerReferencesForBackupVolume(backupVolume),
|
||||
},
|
||||
Spec: longhorn.BackupSpec{
|
||||
Labels: backupLabelMap,
|
||||
Labels: backupLabelMap,
|
||||
BackupBlockSize: blockSize,
|
||||
},
|
||||
}
|
||||
if _, err = bvc.ds.CreateBackup(backup, canonicalBVName); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -259,31 +258,18 @@ func GetInstanceManagerCPURequirement(ds *datastore.DataStore, imName string) (*
|
|||
|
||||
cpuRequest := 0
|
||||
switch im.Spec.DataEngine {
|
||||
case longhorn.DataEngineTypeV1:
|
||||
case longhorn.DataEngineTypeV1, longhorn.DataEngineTypeV2:
|
||||
// TODO: Currently lhNode.Spec.InstanceManagerCPURequest is applied to both v1 and v2 data engines.
|
||||
// In the future, we may want to support different CPU requests for them.
|
||||
cpuRequest = lhNode.Spec.InstanceManagerCPURequest
|
||||
if cpuRequest == 0 {
|
||||
guaranteedCPUSetting, err := ds.GetSettingWithAutoFillingRO(types.SettingNameGuaranteedInstanceManagerCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
guaranteedCPUPercentage, err := strconv.ParseFloat(guaranteedCPUSetting.Value, 64)
|
||||
guaranteedCPUPercentage, err := ds.GetSettingAsFloatByDataEngine(types.SettingNameGuaranteedInstanceManagerCPU, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allocatableMilliCPU := float64(kubeNode.Status.Allocatable.Cpu().MilliValue())
|
||||
cpuRequest = int(math.Round(allocatableMilliCPU * guaranteedCPUPercentage / 100.0))
|
||||
}
|
||||
case longhorn.DataEngineTypeV2:
|
||||
// TODO: Support CPU request per node for v2 volumes
|
||||
guaranteedCPUSetting, err := ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
guaranteedCPURequest, err := strconv.ParseFloat(guaranteedCPUSetting.Value, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuRequest = int(guaranteedCPURequest)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown data engine %v", im.Spec.DataEngine)
|
||||
}
|
||||
|
|
|
@ -620,8 +620,8 @@ func randomPort() int {
|
|||
return rand.Int() % 30000
|
||||
}
|
||||
|
||||
func fakeEngineBinaryChecker(image string) bool {
|
||||
return true
|
||||
func fakeEngineBinaryChecker(image string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fakeEngineImageUpdater(ei *longhorn.EngineImage) error {
|
||||
|
|
|
@ -467,7 +467,7 @@ func (ec *EngineController) CreateInstance(obj interface{}) (*longhorn.InstanceP
|
|||
}
|
||||
}(c)
|
||||
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsInt(types.SettingNameEngineReplicaTimeout)
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsIntByDataEngine(types.SettingNameEngineReplicaTimeout, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -494,6 +494,11 @@ func (ec *EngineController) CreateInstance(obj interface{}) (*longhorn.InstanceP
|
|||
|
||||
instanceManagerStorageIP := ec.ds.GetStorageIPFromPod(instanceManagerPod)
|
||||
|
||||
e.Status.Starting = true
|
||||
if e, err = ec.ds.UpdateEngineStatus(e); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update engine %v status.starting to true before sending instance create request", e.Name)
|
||||
}
|
||||
|
||||
return c.EngineInstanceCreate(&engineapi.EngineInstanceCreateRequest{
|
||||
Engine: e,
|
||||
VolumeFrontend: frontend,
|
||||
|
@ -599,7 +604,7 @@ func (ec *EngineController) DeleteInstance(obj interface{}) (err error) {
|
|||
}
|
||||
}(c)
|
||||
|
||||
err = c.InstanceDelete(e.Spec.DataEngine, e.Name, string(longhorn.InstanceManagerTypeEngine), "", true)
|
||||
err = c.InstanceDelete(e.Spec.DataEngine, e.Name, "", string(longhorn.InstanceManagerTypeEngine), "", true)
|
||||
if err != nil && !types.ErrorIsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -944,6 +949,10 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.checkAndApplyRebuildQoS(engine, engineClientProxy, rebuildStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
engine.Status.RebuildStatus = rebuildStatus
|
||||
|
||||
// It's meaningless to sync the trim related field for old engines or engines in old engine instance managers
|
||||
|
@ -1129,6 +1138,66 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *EngineMonitor) checkAndApplyRebuildQoS(engine *longhorn.Engine, engineClientProxy engineapi.EngineClientProxy, rebuildStatus map[string]*longhorn.RebuildStatus) error {
|
||||
if !types.IsDataEngineV2(engine.Spec.DataEngine) {
|
||||
return nil
|
||||
}
|
||||
|
||||
expectedQoSValue, err := m.getEffectiveRebuildQoS(engine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for replica, newStatus := range rebuildStatus {
|
||||
if newStatus == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var appliedQoS int64
|
||||
if oldStatus, exists := engine.Status.RebuildStatus[replica]; exists && oldStatus != nil {
|
||||
appliedQoS = oldStatus.AppliedRebuildingMBps
|
||||
}
|
||||
|
||||
if appliedQoS == expectedQoSValue {
|
||||
newStatus.AppliedRebuildingMBps = appliedQoS
|
||||
continue
|
||||
}
|
||||
|
||||
if !newStatus.IsRebuilding {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := engineClientProxy.ReplicaRebuildQosSet(engine, expectedQoSValue); err != nil {
|
||||
m.logger.WithError(err).Warnf("[qos] Failed to set QoS for volume %s, replica %s", engine.Spec.VolumeName, replica)
|
||||
continue
|
||||
}
|
||||
newStatus.AppliedRebuildingMBps = expectedQoSValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *EngineMonitor) getEffectiveRebuildQoS(engine *longhorn.Engine) (int64, error) {
|
||||
if types.IsDataEngineV1(engine.Spec.DataEngine) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
globalQoS, err := m.ds.GetSettingAsIntByDataEngine(types.SettingNameReplicaRebuildingBandwidthLimit, engine.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
volume, err := m.ds.GetVolumeRO(engine.Spec.VolumeName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if volume.Spec.ReplicaRebuildingBandwidthLimit > 0 {
|
||||
return volume.Spec.ReplicaRebuildingBandwidthLimit, nil
|
||||
}
|
||||
|
||||
return globalQoS, nil
|
||||
}
|
||||
|
||||
func isBackupRestoreFailed(rsMap map[string]*longhorn.RestoreStatus) bool {
|
||||
for _, status := range rsMap {
|
||||
if status.IsRestoring {
|
||||
|
@ -1445,7 +1514,7 @@ func (m *EngineMonitor) restoreBackup(engine *longhorn.Engine, rsMap map[string]
|
|||
|
||||
backupTargetClient, err := newBackupTargetClientFromDefaultEngineImage(m.ds, backupTarget)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot get backup target config for backup restoration of engine %v", engine.Name)
|
||||
return errors.Wrapf(err, "failed to get backup target client for backup restoration of engine %v", engine.Name)
|
||||
}
|
||||
|
||||
mlog := m.logger.WithFields(logrus.Fields{
|
||||
|
@ -1457,7 +1526,8 @@ func (m *EngineMonitor) restoreBackup(engine *longhorn.Engine, rsMap map[string]
|
|||
|
||||
concurrentLimit, err := m.ds.GetSettingAsInt(types.SettingNameRestoreConcurrentLimit)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to assert %v value", types.SettingNameRestoreConcurrentLimit)
|
||||
return errors.Wrapf(err, "failed to get %v setting for backup restoration of engine %v",
|
||||
types.SettingNameRestoreConcurrentLimit, engine.Name)
|
||||
}
|
||||
|
||||
mlog.Info("Restoring backup")
|
||||
|
@ -1616,7 +1686,6 @@ func (ec *EngineController) ReconcileEngineState(e *longhorn.Engine) error {
|
|||
if err := ec.rebuildNewReplica(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1752,13 +1821,13 @@ func (ec *EngineController) startRebuilding(e *longhorn.Engine, replicaName, add
|
|||
go func() {
|
||||
autoCleanupSystemGeneratedSnapshot, err := ec.ds.GetSettingAsBool(types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingDefinitionAutoCleanupSystemGeneratedSnapshot)
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
return
|
||||
}
|
||||
|
||||
fastReplicaRebuild, err := ec.ds.GetSettingAsBool(types.SettingNameFastReplicaRebuildEnabled)
|
||||
fastReplicaRebuild, err := ec.ds.GetSettingAsBoolByDataEngine(types.SettingNameFastReplicaRebuildEnabled, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingNameFastReplicaRebuildEnabled)
|
||||
log.WithError(err).Errorf("Failed to get %v setting for data engine %v", types.SettingNameFastReplicaRebuildEnabled, e.Spec.DataEngine)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2040,8 +2109,8 @@ func getReplicaRebuildFailedReasonFromError(errMsg string) (string, longhorn.Con
|
|||
}
|
||||
}
|
||||
|
||||
func (ec *EngineController) waitForV2EngineRebuild(e *longhorn.Engine, replicaName string, timeout int64) (err error) {
|
||||
if !types.IsDataEngineV2(e.Spec.DataEngine) {
|
||||
func (ec *EngineController) waitForV2EngineRebuild(engine *longhorn.Engine, replicaName string, timeout int64) (err error) {
|
||||
if !types.IsDataEngineV2(engine.Spec.DataEngine) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -2052,11 +2121,11 @@ func (ec *EngineController) waitForV2EngineRebuild(e *longhorn.Engine, replicaNa
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
e, err = ec.ds.GetEngineRO(e.Name)
|
||||
e, err := ec.ds.GetEngineRO(engine.Name)
|
||||
if err != nil {
|
||||
// There is no need to continue if the engine is not found
|
||||
if apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "engine %v not found during v2 replica %s rebuild wait", e.Name, replicaName)
|
||||
return errors.Wrapf(err, "engine %v not found during v2 replica %s rebuild wait", engine.Name, replicaName)
|
||||
}
|
||||
// There may be something wrong with the indexer or the API sever, will retry
|
||||
continue
|
||||
|
@ -2159,7 +2228,7 @@ func (ec *EngineController) UpgradeEngineInstance(e *longhorn.Engine, log *logru
|
|||
}
|
||||
}(c)
|
||||
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsInt(types.SettingNameEngineReplicaTimeout)
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsIntByDataEngine(types.SettingNameEngineReplicaTimeout, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2201,6 +2270,7 @@ func (ec *EngineController) UpgradeEngineInstance(e *longhorn.Engine, log *logru
|
|||
}
|
||||
|
||||
e.Status.Port = int(engineInstance.Status.PortStart)
|
||||
e.Status.UUID = engineInstance.Status.UUID
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ type EngineImageController struct {
|
|||
|
||||
// for unit test
|
||||
nowHandler func() string
|
||||
engineBinaryChecker func(string) bool
|
||||
engineBinaryChecker func(string) (bool, error)
|
||||
engineImageVersionUpdater func(*longhorn.EngineImage) error
|
||||
}
|
||||
|
||||
|
@ -321,8 +321,9 @@ func (ic *EngineImageController) syncEngineImage(key string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if !ic.engineBinaryChecker(engineImage.Spec.Image) {
|
||||
engineImage.Status.Conditions = types.SetCondition(engineImage.Status.Conditions, longhorn.EngineImageConditionTypeReady, longhorn.ConditionStatusFalse, longhorn.EngineImageConditionTypeReadyReasonDaemonSet, "engine binary check failed")
|
||||
ok, err := ic.engineBinaryChecker(engineImage.Spec.Image)
|
||||
if !ok {
|
||||
engineImage.Status.Conditions = types.SetCondition(engineImage.Status.Conditions, longhorn.EngineImageConditionTypeReady, longhorn.ConditionStatusFalse, longhorn.EngineImageConditionTypeReadyReasonDaemonSet, errors.Errorf("engine binary check failed: %v", err).Error())
|
||||
engineImage.Status.State = longhorn.EngineImageStateDeploying
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewInstanceHandler(ds *datastore.DataStore, instanceManagerHandler Instance
|
|||
}
|
||||
}
|
||||
|
||||
func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceManager, instanceName string, spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus, instances map[string]longhorn.InstanceProcess) {
|
||||
func (h *InstanceHandler) syncStatusWithInstanceManager(log *logrus.Entry, im *longhorn.InstanceManager, instanceName string, spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus, instances map[string]longhorn.InstanceProcess) {
|
||||
defer func() {
|
||||
if status.CurrentState == longhorn.InstanceStateStopped {
|
||||
status.InstanceManagerName = ""
|
||||
|
@ -64,7 +64,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
if im == nil || im.Status.CurrentState == longhorn.InstanceManagerStateUnknown || isDelinquent {
|
||||
if status.Started {
|
||||
if status.CurrentState != longhorn.InstanceStateUnknown {
|
||||
logrus.Warnf("Marking the instance as state UNKNOWN since the related node %v of instance %v is down or deleted", spec.NodeID, instanceName)
|
||||
log.Warnf("Marking the instance as state UNKNOWN since the related node %v of instance %v is down or deleted", spec.NodeID, instanceName)
|
||||
}
|
||||
status.CurrentState = longhorn.InstanceStateUnknown
|
||||
} else {
|
||||
|
@ -75,6 +75,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
return
|
||||
}
|
||||
|
@ -95,6 +96,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
return
|
||||
}
|
||||
|
@ -110,6 +112,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
}
|
||||
return
|
||||
|
@ -119,7 +122,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
if !exists {
|
||||
if status.Started {
|
||||
if status.CurrentState != longhorn.InstanceStateError {
|
||||
logrus.Warnf("Marking the instance as state ERROR since failed to find the instance status in instance manager %v for the running instance %v", im.Name, instanceName)
|
||||
log.Warnf("Marking the instance as state ERROR since failed to find the instance status in instance manager %v for the running instance %v", im.Name, instanceName)
|
||||
}
|
||||
status.CurrentState = longhorn.InstanceStateError
|
||||
} else {
|
||||
|
@ -130,12 +133,13 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
return
|
||||
}
|
||||
|
||||
if status.InstanceManagerName != "" && status.InstanceManagerName != im.Name {
|
||||
logrus.Errorf("The related process of instance %v is found in the instance manager %v, but the instance manager name in the instance status is %v. "+
|
||||
log.Errorf("The related process of instance %v is found in the instance manager %v, but the instance manager name in the instance status is %v. "+
|
||||
"The instance manager name shouldn't change except for cleanup",
|
||||
instanceName, im.Name, status.InstanceManagerName)
|
||||
}
|
||||
|
@ -153,34 +157,41 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
case longhorn.InstanceStateRunning:
|
||||
status.CurrentState = longhorn.InstanceStateRunning
|
||||
|
||||
imPod, err := h.ds.GetPodRO(im.Namespace, im.Name)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("Failed to get instance manager pod from %v", im.Name)
|
||||
log.WithError(err).Errorf("Failed to get instance manager pod from %v", im.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if imPod == nil {
|
||||
logrus.Warnf("Instance manager pod from %v not exist in datastore", im.Name)
|
||||
log.Warnf("Instance manager pod from %v not exist in datastore", im.Name)
|
||||
return
|
||||
}
|
||||
|
||||
storageIP := h.ds.GetStorageIPFromPod(imPod)
|
||||
if status.StorageIP != storageIP {
|
||||
if status.StorageIP != "" {
|
||||
log.Warnf("Instance %v is state running in instance manager %s, but its status Storage IP %s does not match the instance manager recorded Storage IP %s", instanceName, im.Name, status.StorageIP, storageIP)
|
||||
}
|
||||
status.StorageIP = storageIP
|
||||
logrus.Warnf("Instance %v starts running, Storage IP %v", instanceName, status.StorageIP)
|
||||
}
|
||||
|
||||
if status.IP != im.Status.IP {
|
||||
if status.IP != "" {
|
||||
log.Warnf("Instance %v is state running in instance manager %s, but its status IP %s does not match the instance manager recorded IP %s", instanceName, im.Name, status.IP, im.Status.IP)
|
||||
}
|
||||
status.IP = im.Status.IP
|
||||
logrus.Warnf("Instance %v starts running, IP %v", instanceName, status.IP)
|
||||
}
|
||||
if status.Port != int(instance.Status.PortStart) {
|
||||
if status.Port != 0 {
|
||||
log.Warnf("Instance %v is state running in instance manager %s, but its status Port %d does not match the instance manager recorded Port %d", instanceName, im.Name, status.Port, instance.Status.PortStart)
|
||||
}
|
||||
status.Port = int(instance.Status.PortStart)
|
||||
logrus.Warnf("Instance %v starts running, Port %d", instanceName, status.Port)
|
||||
}
|
||||
if status.UblkID != instance.Status.UblkID {
|
||||
status.UblkID = instance.Status.UblkID
|
||||
|
@ -190,6 +201,11 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
if status.CurrentImage == "" {
|
||||
status.CurrentImage = spec.Image
|
||||
}
|
||||
|
||||
if status.UUID != instance.Status.UUID {
|
||||
status.UUID = instance.Status.UUID
|
||||
}
|
||||
|
||||
h.syncInstanceCondition(instance, status)
|
||||
|
||||
case longhorn.InstanceStateStopping:
|
||||
|
@ -203,6 +219,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
case longhorn.InstanceStateStopped:
|
||||
if status.Started {
|
||||
|
@ -215,10 +232,11 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
default:
|
||||
if status.CurrentState != longhorn.InstanceStateError {
|
||||
logrus.Warnf("Instance %v is state %v, error message: %v", instanceName, instance.Status.State, instance.Status.ErrorMsg)
|
||||
log.Warnf("Instance %v is state %v, error message: %v", instanceName, instance.Status.State, instance.Status.ErrorMsg)
|
||||
}
|
||||
status.CurrentState = longhorn.InstanceStateError
|
||||
status.CurrentImage = ""
|
||||
|
@ -226,6 +244,7 @@ func (h *InstanceHandler) syncStatusWithInstanceManager(im *longhorn.InstanceMan
|
|||
status.StorageIP = ""
|
||||
status.Port = 0
|
||||
status.UblkID = 0
|
||||
status.UUID = ""
|
||||
h.resetInstanceErrorCondition(status)
|
||||
}
|
||||
}
|
||||
|
@ -265,7 +284,14 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
return err
|
||||
}
|
||||
|
||||
log := logrus.WithField("instance", instanceName)
|
||||
log := logrus.WithFields(logrus.Fields{"instance": instanceName, "volumeName": spec.VolumeName, "dataEngine": spec.DataEngine, "specNodeID": spec.NodeID})
|
||||
|
||||
stateBeforeReconcile := status.CurrentState
|
||||
defer func() {
|
||||
if stateBeforeReconcile != status.CurrentState {
|
||||
log.Infof("Instance %v state is updated from %v to %v", instanceName, stateBeforeReconcile, status.CurrentState)
|
||||
}
|
||||
}()
|
||||
|
||||
var im *longhorn.InstanceManager
|
||||
if status.InstanceManagerName != "" {
|
||||
|
@ -297,6 +323,9 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
}
|
||||
}
|
||||
}
|
||||
if im != nil {
|
||||
log = log.WithFields(logrus.Fields{"instanceManager": im.Name})
|
||||
}
|
||||
|
||||
if spec.LogRequested {
|
||||
if !status.LogFetched {
|
||||
|
@ -339,6 +368,7 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
|
||||
if i, exists := instances[instanceName]; exists && i.Status.State == longhorn.InstanceStateRunning {
|
||||
status.Started = true
|
||||
status.Starting = false
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -359,23 +389,29 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
}
|
||||
|
||||
case longhorn.InstanceStateStopped:
|
||||
shouldDelete := false
|
||||
if im != nil && im.DeletionTimestamp == nil {
|
||||
if _, exists := instances[instanceName]; exists {
|
||||
shouldDelete = true
|
||||
}
|
||||
}
|
||||
if status.Starting {
|
||||
shouldDelete = true
|
||||
}
|
||||
if shouldDelete {
|
||||
// there is a delay between deleteInstance() invocation and state/InstanceManager update,
|
||||
// deleteInstance() may be called multiple times.
|
||||
if instance, exists := instances[instanceName]; exists {
|
||||
if shouldDeleteInstance(&instance) {
|
||||
if err := h.deleteInstance(instanceName, runtimeObj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := h.deleteInstance(instanceName, runtimeObj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
status.Started = false
|
||||
status.Starting = false
|
||||
default:
|
||||
return fmt.Errorf("unknown instance desire state: desire %v", spec.DesireState)
|
||||
}
|
||||
|
||||
h.syncStatusWithInstanceManager(im, instanceName, spec, status, instances)
|
||||
h.syncStatusWithInstanceManager(log, im, instanceName, spec, status, instances)
|
||||
|
||||
switch status.CurrentState {
|
||||
case longhorn.InstanceStateRunning:
|
||||
|
@ -404,10 +440,10 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
}
|
||||
|
||||
if types.IsDataEngineV1(instance.Spec.DataEngine) {
|
||||
logrus.Warnf("Instance %v crashed on Instance Manager %v at %v, getting log",
|
||||
log.Warnf("Instance %v crashed on Instance Manager %v at %v, getting log",
|
||||
instanceName, im.Name, im.Spec.NodeID)
|
||||
if err := h.printInstanceLogs(instanceName, runtimeObj); err != nil {
|
||||
logrus.WithError(err).Warnf("failed to get crash log for instance %v on Instance Manager %v at %v",
|
||||
log.WithError(err).Warnf("failed to get crash log for instance %v on Instance Manager %v at %v",
|
||||
instanceName, im.Name, im.Spec.NodeID)
|
||||
}
|
||||
}
|
||||
|
@ -416,17 +452,6 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
return nil
|
||||
}
|
||||
|
||||
func shouldDeleteInstance(instance *longhorn.InstanceProcess) bool {
|
||||
// For a replica of a SPDK volume, a stopped replica means the lvol is not exposed,
|
||||
// but the lvol is still there. We don't need to delete it.
|
||||
if types.IsDataEngineV2(instance.Spec.DataEngine) {
|
||||
if instance.Status.State == longhorn.InstanceStateStopped {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *InstanceHandler) getInstancesFromInstanceManager(obj runtime.Object, instanceManager *longhorn.InstanceManager) (map[string]longhorn.InstanceProcess, error) {
|
||||
switch obj.(type) {
|
||||
case *longhorn.Engine:
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -62,6 +63,8 @@ type InstanceManagerController struct {
|
|||
|
||||
proxyConnCounter util.Counter
|
||||
|
||||
backoff *flowcontrol.Backoff
|
||||
|
||||
// for unit test
|
||||
versionUpdater func(*longhorn.InstanceManager) error
|
||||
}
|
||||
|
@ -140,6 +143,8 @@ func NewInstanceManagerController(
|
|||
proxyConnCounter: proxyConnCounter,
|
||||
|
||||
versionUpdater: updateInstanceManagerVersion,
|
||||
|
||||
backoff: newBackoff(context.TODO()),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@ -209,7 +214,7 @@ func (imc *InstanceManagerController) isResponsibleForSetting(obj interface{}) b
|
|||
}
|
||||
|
||||
return types.SettingName(setting.Name) == types.SettingNameKubernetesClusterAutoscalerEnabled ||
|
||||
types.SettingName(setting.Name) == types.SettingNameV2DataEngineCPUMask ||
|
||||
types.SettingName(setting.Name) == types.SettingNameDataEngineCPUMask ||
|
||||
types.SettingName(setting.Name) == types.SettingNameOrphanResourceAutoDeletion
|
||||
}
|
||||
|
||||
|
@ -460,12 +465,14 @@ func (imc *InstanceManagerController) syncStatusWithPod(im *longhorn.InstanceMan
|
|||
im.Status.CurrentState = longhorn.InstanceManagerStateStopped
|
||||
return nil
|
||||
}
|
||||
imc.logger.Warnf("Instance manager pod %v is not found, updating the instance manager state from %s to error", im.Name, im.Status.CurrentState)
|
||||
im.Status.CurrentState = longhorn.InstanceManagerStateError
|
||||
return nil
|
||||
}
|
||||
|
||||
// By design instance manager pods should not be terminated.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
imc.logger.Warnf("Instance manager pod %v is being deleted, updating the instance manager state from %s to error", im.Name, im.Status.CurrentState)
|
||||
im.Status.CurrentState = longhorn.InstanceManagerStateError
|
||||
return nil
|
||||
}
|
||||
|
@ -488,6 +495,7 @@ func (imc *InstanceManagerController) syncStatusWithPod(im *longhorn.InstanceMan
|
|||
im.Status.CurrentState = longhorn.InstanceManagerStateStarting
|
||||
}
|
||||
default:
|
||||
imc.logger.Warnf("Instance manager pod %v is in phase %s, updating the instance manager state from %s to error", im.Name, pod.Status.Phase, im.Status.CurrentState)
|
||||
im.Status.CurrentState = longhorn.InstanceManagerStateError
|
||||
}
|
||||
|
||||
|
@ -541,12 +549,12 @@ func (imc *InstanceManagerController) isDateEngineCPUMaskApplied(im *longhorn.In
|
|||
return im.Spec.DataEngineSpec.V2.CPUMask == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
}
|
||||
|
||||
setting, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineCPUMask)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineCPUMask, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to get %v setting for updating data engine CPU mask", types.SettingNameV2DataEngineCPUMask)
|
||||
return true, errors.Wrapf(err, "failed to get %v setting for updating data engine CPU mask", types.SettingNameDataEngineCPUMask)
|
||||
}
|
||||
|
||||
return setting.Value == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
return value == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) syncLogSettingsToInstanceManagerPod(im *longhorn.InstanceManager) error {
|
||||
|
@ -566,36 +574,41 @@ func (imc *InstanceManagerController) syncLogSettingsToInstanceManagerPod(im *lo
|
|||
|
||||
settingNames := []types.SettingName{
|
||||
types.SettingNameLogLevel,
|
||||
types.SettingNameV2DataEngineLogLevel,
|
||||
types.SettingNameV2DataEngineLogFlags,
|
||||
types.SettingNameDataEngineLogLevel,
|
||||
types.SettingNameDataEngineLogFlags,
|
||||
}
|
||||
|
||||
for _, settingName := range settingNames {
|
||||
setting, err := imc.ds.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch settingName {
|
||||
case types.SettingNameLogLevel:
|
||||
// We use this to set the instance-manager log level, for either engine type.
|
||||
err = client.LogSetLevel("", "", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExisted(settingName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set instance-manager log level to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
case types.SettingNameV2DataEngineLogLevel:
|
||||
// We use this to set the spdk_tgt log level independently of the instance-manager's.
|
||||
// We use this to set the instance-manager log level, for either engine type.
|
||||
err = client.LogSetLevel("", "", value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set instance-manager log level to setting %v value: %v", settingName, value)
|
||||
}
|
||||
case types.SettingNameDataEngineLogLevel:
|
||||
// We use this to set the data engine (such as spdk_tgt for v2 data engine) log level independently of the instance-manager's.
|
||||
if types.IsDataEngineV2(im.Spec.DataEngine) {
|
||||
err = client.LogSetLevel(longhorn.DataEngineTypeV2, "", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(settingName, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set spdk_tgt log level to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
if err := client.LogSetLevel(longhorn.DataEngineTypeV2, "", value); err != nil {
|
||||
return errors.Wrapf(err, "failed to set data engine log level to setting %v value: %v", settingName, value)
|
||||
}
|
||||
}
|
||||
case types.SettingNameV2DataEngineLogFlags:
|
||||
case types.SettingNameDataEngineLogFlags:
|
||||
if types.IsDataEngineV2(im.Spec.DataEngine) {
|
||||
err = client.LogSetFlags(longhorn.DataEngineTypeV2, "spdk_tgt", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(settingName, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set spdk_tgt log flags to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
if err := client.LogSetFlags(longhorn.DataEngineTypeV2, "spdk_tgt", value); err != nil {
|
||||
return errors.Wrapf(err, "failed to set data engine log flags to setting %v value: %v", settingName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -652,8 +665,16 @@ func (imc *InstanceManagerController) handlePod(im *longhorn.InstanceManager) er
|
|||
return err
|
||||
}
|
||||
|
||||
if err := imc.createInstanceManagerPod(im); err != nil {
|
||||
return err
|
||||
backoffID := im.Name
|
||||
if imc.backoff.IsInBackOffSinceUpdate(backoffID, time.Now()) {
|
||||
log.Infof("Skipping pod creation for instance manager %s, will retry after backoff of %s", im.Name, imc.backoff.Get(backoffID))
|
||||
} else {
|
||||
log.Infof("Creating pod for instance manager %s", im.Name)
|
||||
imc.backoff.Next(backoffID, time.Now())
|
||||
|
||||
if err := imc.createInstanceManagerPod(im); err != nil {
|
||||
return errors.Wrap(err, "failed to create pod for instance manager")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -726,7 +747,7 @@ func (imc *InstanceManagerController) areDangerZoneSettingsSyncedToIMPod(im *lon
|
|||
isSettingSynced, err = imc.isSettingTaintTolerationSynced(setting, pod)
|
||||
case types.SettingNameSystemManagedComponentsNodeSelector:
|
||||
isSettingSynced, err = imc.isSettingNodeSelectorSynced(setting, pod)
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU:
|
||||
isSettingSynced, err = imc.isSettingGuaranteedInstanceManagerCPUSynced(setting, pod)
|
||||
case types.SettingNamePriorityClass:
|
||||
isSettingSynced, err = imc.isSettingPriorityClassSynced(setting, pod)
|
||||
|
@ -802,7 +823,7 @@ func (imc *InstanceManagerController) isSettingStorageNetworkSynced(setting *lon
|
|||
func (imc *InstanceManagerController) isSettingDataEngineSynced(settingName types.SettingName, im *longhorn.InstanceManager) (bool, error) {
|
||||
enabled, err := imc.ds.GetSettingAsBool(settingName)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to get %v setting for updating data engine", settingName)
|
||||
return false, errors.Wrapf(err, "failed to get %v setting for checking data engine sync", settingName)
|
||||
}
|
||||
var dataEngine longhorn.DataEngineType
|
||||
switch settingName {
|
||||
|
@ -814,6 +835,7 @@ func (imc *InstanceManagerController) isSettingDataEngineSynced(settingName type
|
|||
if !enabled && im.Spec.DataEngine == dataEngine {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -1471,24 +1493,27 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
if types.IsDataEngineV2(dataEngine) {
|
||||
// spdk_tgt doesn't support log level option, so we don't need to pass the log level to the instance manager.
|
||||
// The log level will be applied in the reconciliation of instance manager controller.
|
||||
logFlagsSetting, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineLogFlags)
|
||||
logFlagsSetting, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineLogFlags, dataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logFlags := "all"
|
||||
if logFlagsSetting.Value != "" {
|
||||
logFlags = strings.ToLower(logFlagsSetting.Value)
|
||||
if logFlagsSetting != "" {
|
||||
logFlags = strings.ToLower(logFlagsSetting)
|
||||
}
|
||||
|
||||
cpuMask := im.Spec.DataEngineSpec.V2.CPUMask
|
||||
if cpuMask == "" {
|
||||
value, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineCPUMask)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineCPUMask, dataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cpuMask = value.Value
|
||||
cpuMask = value
|
||||
if cpuMask == "" {
|
||||
return nil, fmt.Errorf("failed to get CPU mask setting for data engine %v", dataEngine)
|
||||
}
|
||||
}
|
||||
|
||||
im.Status.DataEngineStatus.V2.CPUMask = cpuMask
|
||||
|
@ -1506,7 +1531,7 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
|
||||
podSpec.Spec.Containers[0].Args = args
|
||||
|
||||
hugepage, err := imc.ds.GetSettingAsInt(types.SettingNameV2DataEngineHugepageLimit)
|
||||
hugepage, err := imc.ds.GetSettingAsIntByDataEngine(types.SettingNameDataEngineHugepageLimit, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1529,7 +1554,7 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
}
|
||||
} else {
|
||||
podSpec.Spec.Containers[0].Args = []string{
|
||||
"instance-manager", "--debug", "daemon", "--listen", fmt.Sprintf("0.0.0.0:%d", engineapi.InstanceManagerProcessManagerServiceDefaultPort),
|
||||
"instance-manager", "--debug", "daemon", "--listen", fmt.Sprintf(":%d", engineapi.InstanceManagerProcessManagerServiceDefaultPort),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1660,18 +1685,29 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
return podSpec, nil
|
||||
}
|
||||
|
||||
// deleteOrphans examines existing instance orphan CRs, and initiates CR deletion if needed.
|
||||
//
|
||||
// Orphan CRs will be deleted under any of the following conditions:
|
||||
// - The instance manager is terminating or has terminated, which results in the removal of its managed instances.
|
||||
// - An instance is missing from the instance manager.
|
||||
// - An instance has been rescheduled to the instance manager and is no longer considered orphaned.
|
||||
// - Automatic deletion of orphan resources is enabled.
|
||||
func (imc *InstanceManagerController) deleteOrphans(im *longhorn.InstanceManager, isInstanceManagerTerminating bool) error {
|
||||
autoDeletionTypes, err := imc.ds.GetSettingOrphanResourceAutoDeletion()
|
||||
var autoDeleteEngine = false
|
||||
var autoDeleteReplica = false
|
||||
if err != nil {
|
||||
imc.logger.WithError(err).Warnf("Failed to fetch orphan auto deletion setting, disabled by default")
|
||||
} else {
|
||||
autoDeleteEngine = autoDeletionTypes[types.OrphanResourceTypeEngineInstance]
|
||||
autoDeleteReplica = autoDeletionTypes[types.OrphanResourceTypeReplicaInstance]
|
||||
return errors.Wrapf(err, "failed to get setting %v", types.SettingNameOrphanResourceAutoDeletion)
|
||||
}
|
||||
autoDeleteEnabled, ok := autoDeletionTypes[types.OrphanResourceTypeInstance]
|
||||
if !ok {
|
||||
autoDeleteEnabled = false
|
||||
}
|
||||
|
||||
orphanList, err := imc.ds.ListOrphansForEngineAndReplicaInstancesRO(im.Spec.NodeID)
|
||||
autoDeleteGracePeriod, err := imc.ds.GetSettingAsInt(types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
}
|
||||
|
||||
orphanList, err := imc.ds.ListInstanceOrphansByInstanceManagerRO(im.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1681,97 +1717,110 @@ func (imc *InstanceManagerController) deleteOrphans(im *longhorn.InstanceManager
|
|||
continue
|
||||
}
|
||||
|
||||
var isDeletable = false
|
||||
instanceManager := orphan.Spec.Parameters[longhorn.OrphanInstanceManager]
|
||||
instanceName := orphan.Spec.Parameters[longhorn.OrphanInstanceName]
|
||||
var instanceExist = false
|
||||
var instanceCRScheduledBack = false
|
||||
switch orphan.Spec.Type {
|
||||
case longhorn.OrphanTypeEngineInstance:
|
||||
isDeletable, err = imc.isOrphanDeletable(orphan, isInstanceManagerTerminating || autoDeleteEngine, im.Status.InstanceEngines, imc.isEngineScheduledOnCurrentNode)
|
||||
_, instanceExist = im.Status.InstanceEngines[instanceName]
|
||||
instanceCRScheduledBack, err = imc.isEngineOnInstanceManager(instanceManager, instanceName)
|
||||
case longhorn.OrphanTypeReplicaInstance:
|
||||
isDeletable, err = imc.isOrphanDeletable(orphan, isInstanceManagerTerminating || autoDeleteReplica, im.Status.InstanceReplicas, imc.isReplicaScheduledOnCurrentNode)
|
||||
_, instanceExist = im.Status.InstanceReplicas[instanceName]
|
||||
instanceCRScheduledBack, err = imc.isReplicaOnInstanceManager(instanceManager, instanceName)
|
||||
}
|
||||
if err != nil {
|
||||
multiError.Append(util.NewMultiError(fmt.Sprintf("%v: %v", orphan.Name, err)))
|
||||
} else if isDeletable {
|
||||
if err := imc.ds.DeleteOrphan(orphan.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
if imc.canDeleteOrphan(orphan, isInstanceManagerTerminating, autoDeleteEnabled, instanceExist, instanceCRScheduledBack, autoDeleteGracePeriod) {
|
||||
if err := imc.deleteOrphan(orphan); err != nil {
|
||||
multiError.Append(util.NewMultiError(fmt.Sprintf("%v: %v", orphan.Name, err)))
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(multiError) > 0 {
|
||||
return fmt.Errorf("instance manager failed to delete orphan CR: %v", multiError.Join())
|
||||
return fmt.Errorf("failed to delete orphans: %v", multiError.Join())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) isOrphanDeletable(orphan *longhorn.Orphan, autoDelete bool, instanceMap instanceProcessMap, isInstanceScheduledOnNode func(node, instance string) (bool, error)) (isDeletable bool, err error) {
|
||||
if autoDelete {
|
||||
return true, nil
|
||||
func (imc *InstanceManagerController) deleteOrphan(orphan *longhorn.Orphan) error {
|
||||
imc.logger.Infof("Deleting Orphan %v", orphan.Name)
|
||||
if err := imc.ds.DeleteOrphan(orphan.Name); err != nil {
|
||||
if datastore.ErrorIsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to delete orphan %q", orphan.Name)
|
||||
}
|
||||
|
||||
instanceName := orphan.Spec.Parameters[longhorn.OrphanInstanceName]
|
||||
instanceProc, instanceExist := instanceMap[instanceName]
|
||||
instanceCRScheduledBack, err := isInstanceScheduledOnNode(orphan.Spec.NodeID, instanceName)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to check CR scheduled node for %v orphan %v", orphan.Spec.Type, instanceName)
|
||||
}
|
||||
|
||||
var instanceState longhorn.InstanceState
|
||||
if instanceExist {
|
||||
instanceState = instanceProc.Status.State
|
||||
} else {
|
||||
instanceState = longhorn.InstanceStateTerminated
|
||||
}
|
||||
|
||||
imc.logger.WithFields(logrus.Fields{
|
||||
"instanceExist": instanceExist,
|
||||
"instanceState": instanceState,
|
||||
"instanceCRScheduledBack": instanceCRScheduledBack,
|
||||
}).Debugf("Checked deletable for %s orphan instance %s", orphan.Spec.Type, orphan.Name)
|
||||
if !instanceExist || instanceCRScheduledBack {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) isEngineScheduledOnCurrentNode(node string, instance string) (bool, error) {
|
||||
func (imc *InstanceManagerController) canDeleteOrphan(orphan *longhorn.Orphan, imTerminating, autoDeleteEnabled, instanceExist, instanceCRScheduledBack bool, autoDeleteGracePeriod int64) bool {
|
||||
autoDeleteAllowed := false
|
||||
if autoDeleteEnabled {
|
||||
elapsedTime := time.Since(orphan.CreationTimestamp.Time).Seconds()
|
||||
if elapsedTime > float64(autoDeleteGracePeriod) {
|
||||
autoDeleteAllowed = true
|
||||
}
|
||||
}
|
||||
|
||||
canDelete := imTerminating || autoDeleteAllowed || !instanceExist || instanceCRScheduledBack
|
||||
if !canDelete {
|
||||
imc.logger.Debugf("Orphan %v is not ready to be deleted, imTerminating: %v, autoDeleteAllowed: %v, instanceExist: %v, instanceCRScheduledBack: %v", orphan.Name, imTerminating, autoDeleteAllowed, instanceExist, instanceCRScheduledBack)
|
||||
}
|
||||
|
||||
return canDelete
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) isEngineOnInstanceManager(instanceManager string, instance string) (bool, error) {
|
||||
existEngine, err := imc.ds.GetEngineRO(instance)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Engine CR still exists - check the ownership
|
||||
return imc.isInstanceScheduledOnCurrentNode(node, &existEngine.Spec.InstanceSpec, &existEngine.Status.InstanceStatus), nil
|
||||
case apierrors.IsNotFound(err):
|
||||
// Engine CR not found - instance is orphaned
|
||||
return false, nil
|
||||
default:
|
||||
// Unexpected error - unable to check if engine instance is orphaned or not
|
||||
return false, err
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Engine CR not found - instance is orphaned
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "failed to check if engine instance %q is scheduled on instance manager %q", instance, instanceManager)
|
||||
}
|
||||
return imc.isInstanceOnInstanceManager(instanceManager, &existEngine.ObjectMeta, &existEngine.Spec.InstanceSpec, &existEngine.Status.InstanceStatus), nil
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) isReplicaScheduledOnCurrentNode(node string, instance string) (bool, error) {
|
||||
func (imc *InstanceManagerController) isReplicaOnInstanceManager(instanceManager string, instance string) (bool, error) {
|
||||
existReplica, err := imc.ds.GetReplicaRO(instance)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Replica CR still exists - check the ownership
|
||||
return imc.isInstanceScheduledOnCurrentNode(node, &existReplica.Spec.InstanceSpec, &existReplica.Status.InstanceStatus), nil
|
||||
case apierrors.IsNotFound(err):
|
||||
// Replica CR not found - instance is orphaned
|
||||
return false, nil
|
||||
default:
|
||||
// Unexpected error - unable to check if replica instance is orphaned or not
|
||||
return false, err
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Replica CR not found - instance is orphaned
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "failed to check if replica instance %q is scheduled on instance manager %q", instance, instanceManager)
|
||||
}
|
||||
return imc.isInstanceOnInstanceManager(instanceManager, &existReplica.ObjectMeta, &existReplica.Spec.InstanceSpec, &existReplica.Status.InstanceStatus), nil
|
||||
}
|
||||
|
||||
// isInstanceScheduledOnCurrentNode returns true only when it is very certain that an instance is scheduled on this node
|
||||
func (imc *InstanceManagerController) isInstanceScheduledOnCurrentNode(node string, spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus) bool {
|
||||
if status.CurrentState != spec.DesireState {
|
||||
// isInstanceOnInstanceManager returns true only when it is very certain that an instance is scheduled in a given instance manager
|
||||
func (imc *InstanceManagerController) isInstanceOnInstanceManager(instanceManager string, meta *metav1.ObjectMeta, spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus) bool {
|
||||
if !meta.DeletionTimestamp.IsZero() {
|
||||
imc.logger.Debugf("Skipping check if Instance %q is scheduled on instance manager %q; instance is marked for deletion", meta.Name, instanceManager)
|
||||
return false
|
||||
}
|
||||
|
||||
if status.CurrentState != spec.DesireState || status.OwnerID != spec.NodeID {
|
||||
imc.logger.WithFields(logrus.Fields{
|
||||
"currentState": status.CurrentState,
|
||||
"desiredState": spec.DesireState,
|
||||
"currentNode": status.OwnerID,
|
||||
"desiredNode": spec.NodeID,
|
||||
}).Debugf("Skipping check if instance %q is scheduled on instance manager %q; instance is in state transition", meta.Name, instanceManager)
|
||||
return false
|
||||
}
|
||||
|
||||
switch status.CurrentState {
|
||||
case longhorn.InstanceStateRunning:
|
||||
return status.OwnerID == spec.NodeID && spec.NodeID == node
|
||||
return status.InstanceManagerName == instanceManager
|
||||
case longhorn.InstanceStateStopped:
|
||||
return spec.NodeID == node
|
||||
// Instance manager is not assigned in the stopped state. Instance is not expected to live in any instance manager.
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
@ -2211,7 +2260,7 @@ func (m *InstanceManagerMonitor) StopMonitorWithLock() {
|
|||
|
||||
func (m *InstanceManagerMonitor) syncOrphans(im *longhorn.InstanceManager, instanceMap instanceProcessMap) {
|
||||
engineProcesses, replicaProcesses := m.categorizeProcesses(instanceMap)
|
||||
existOrphansList, err := m.ds.ListOrphansForEngineAndReplicaInstancesRO(im.Spec.NodeID)
|
||||
existOrphansList, err := m.ds.ListInstanceOrphansByInstanceManagerRO(im.Name)
|
||||
if err != nil {
|
||||
m.logger.WithError(err).Errorf("Failed to list orphans on node %s", im.Spec.NodeID)
|
||||
return
|
||||
|
@ -2226,62 +2275,93 @@ func (m *InstanceManagerMonitor) syncOrphans(im *longhorn.InstanceManager, insta
|
|||
m.createOrphanForInstances(existOrphans, im, replicaProcesses, longhorn.OrphanTypeReplicaInstance, m.isReplicaOrphaned)
|
||||
}
|
||||
|
||||
func (m *InstanceManagerMonitor) isEngineOrphaned(instanceName, node string) (bool, error) {
|
||||
func (m *InstanceManagerMonitor) isEngineOrphaned(instanceName, instanceManager string) (bool, error) {
|
||||
existEngine, err := m.ds.GetEngineRO(instanceName)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Engine CR still exists - check the ownership
|
||||
return m.isInstanceOrphanedOnNode(&existEngine.Spec.InstanceSpec, &existEngine.Status.InstanceStatus, node), nil
|
||||
case apierrors.IsNotFound(err):
|
||||
// Engine CR not found - instance is orphaned
|
||||
return true, nil
|
||||
default:
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Engine CR not found - instance is orphaned
|
||||
return true, nil
|
||||
}
|
||||
// Unexpected error - unable to check if engine instance is orphaned or not
|
||||
return false, err
|
||||
}
|
||||
// Engine CR still exists - check the ownership
|
||||
return m.isInstanceOrphanedInInstanceManager(&existEngine.ObjectMeta, &existEngine.Spec.InstanceSpec, &existEngine.Status.InstanceStatus, instanceManager), nil
|
||||
}
|
||||
|
||||
func (m *InstanceManagerMonitor) isReplicaOrphaned(instanceName, node string) (bool, error) {
|
||||
func (m *InstanceManagerMonitor) isReplicaOrphaned(instanceName, instanceManager string) (bool, error) {
|
||||
existReplica, err := m.ds.GetReplicaRO(instanceName)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Replica CR still exists - check the ownership
|
||||
return m.isInstanceOrphanedOnNode(&existReplica.Spec.InstanceSpec, &existReplica.Status.InstanceStatus, node), nil
|
||||
case apierrors.IsNotFound(err):
|
||||
// Replica CR not found - instance is orphaned
|
||||
return true, nil
|
||||
default:
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Replica CR not found - instance is orphaned
|
||||
return true, nil
|
||||
}
|
||||
// Unexpected error - unable to check if replica instance is orphaned or not
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Replica CR still exists - check the ownership
|
||||
return m.isInstanceOrphanedInInstanceManager(&existReplica.ObjectMeta, &existReplica.Spec.InstanceSpec, &existReplica.Status.InstanceStatus, instanceManager), nil
|
||||
}
|
||||
|
||||
// isInstanceOrphanedOnNode returns true only when it is very certain that an instance is scheduled on another node
|
||||
func (m *InstanceManagerMonitor) isInstanceOrphanedOnNode(spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus, node string) bool {
|
||||
if status.CurrentState != spec.DesireState {
|
||||
// isInstanceOrphanedInInstanceManager returns true only when it is very certain that an instance is scheduled on another instance manager
|
||||
func (m *InstanceManagerMonitor) isInstanceOrphanedInInstanceManager(meta *metav1.ObjectMeta, spec *longhorn.InstanceSpec, status *longhorn.InstanceStatus, instanceManager string) bool {
|
||||
if !meta.DeletionTimestamp.IsZero() {
|
||||
m.logger.Debugf("Skipping orphan check; Instance %s is marked for deletion", meta.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
if status.CurrentState != spec.DesireState || status.OwnerID != spec.NodeID {
|
||||
m.logger.WithFields(logrus.Fields{
|
||||
"currentState": status.CurrentState,
|
||||
"desiredState": spec.DesireState,
|
||||
"currentNode": status.OwnerID,
|
||||
"desiredNode": spec.NodeID,
|
||||
}).Debugf("Skipping orphan check; Instance %s is in state transition", meta.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
switch status.CurrentState {
|
||||
case longhorn.InstanceStateRunning:
|
||||
return status.OwnerID == spec.NodeID && spec.NodeID != node
|
||||
return status.InstanceManagerName != instanceManager
|
||||
case longhorn.InstanceStateStopped:
|
||||
return spec.NodeID != node
|
||||
// Instance manager is not assigned in the stopped state. Instance is not expected to live in any instance manager.
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (m *InstanceManagerMonitor) createOrphanForInstances(existOrphans map[string]bool, im *longhorn.InstanceManager, instanceMap instanceProcessMap, orphanType longhorn.OrphanType, orphanFilter func(instanceName, node string) (bool, error)) {
|
||||
func (m *InstanceManagerMonitor) createOrphanForInstances(existOrphans map[string]bool, im *longhorn.InstanceManager, instanceMap instanceProcessMap, orphanType longhorn.OrphanType, orphanFilter func(instanceName, instanceManager string) (bool, error)) {
|
||||
for instanceName, instance := range instanceMap {
|
||||
orphanName := types.GetOrphanChecksumNameForOrphanedInstance(instanceName, m.controllerID, string(instance.Spec.DataEngine))
|
||||
if instance.Status.State == longhorn.InstanceStateStarting ||
|
||||
instance.Status.State == longhorn.InstanceStateStopping ||
|
||||
instance.Status.State == longhorn.InstanceStateStopped {
|
||||
// Starting: Status transitioning. Will handle this after running.
|
||||
// Stopping, Stopped: Terminating. No orphan CR needed, and the orphaned instances will be cleanup by instance manager after stopped.
|
||||
continue
|
||||
}
|
||||
if instance.Spec.DataEngine != longhorn.DataEngineTypeV1 {
|
||||
m.logger.Debugf("Skipping orphan creation, instance %s is not data engine v1", instanceName)
|
||||
continue
|
||||
}
|
||||
if instance.Status.UUID == "" {
|
||||
// skip the instance without UUID to prevent accidental deletion on processes
|
||||
continue
|
||||
}
|
||||
|
||||
orphanName := types.GetOrphanChecksumNameForOrphanedInstance(instanceName, instance.Status.UUID, im.Name, string(instance.Spec.DataEngine))
|
||||
if _, isExist := existOrphans[orphanName]; isExist {
|
||||
continue
|
||||
}
|
||||
if isOrphaned, err := orphanFilter(instanceName, im.Spec.NodeID); err != nil {
|
||||
if isOrphaned, err := orphanFilter(instanceName, im.Name); err != nil {
|
||||
m.logger.WithError(err).Errorf("Failed to check %v orphan for instance %v", orphanType, instanceName)
|
||||
} else if isOrphaned {
|
||||
m.logger.Infof("Instance %v is orphaned", instanceName)
|
||||
newOrphan, err := m.createOrphan(orphanName, im, instanceName, orphanType, instance.Spec.DataEngine)
|
||||
m.logger.WithFields(logrus.Fields{
|
||||
"instanceState": instance.Status.State,
|
||||
"instanceUUID": instance.Status.UUID,
|
||||
}).Infof("Creating %s Orphan %v for orphaned instance %v", orphanType, orphanName, instanceName)
|
||||
newOrphan, err := m.createOrphan(orphanName, im, instanceName, instance.Status.UUID, orphanType, instance.Spec.DataEngine)
|
||||
if err != nil {
|
||||
m.logger.WithError(err).Errorf("Failed to create %v orphan for instance %v", orphanType, instanceName)
|
||||
} else if newOrphan != nil {
|
||||
|
@ -2291,7 +2371,7 @@ func (m *InstanceManagerMonitor) createOrphanForInstances(existOrphans map[strin
|
|||
}
|
||||
}
|
||||
|
||||
func (m *InstanceManagerMonitor) createOrphan(name string, im *longhorn.InstanceManager, instanceName string, orphanType longhorn.OrphanType, dataEngineType longhorn.DataEngineType) (*longhorn.Orphan, error) {
|
||||
func (m *InstanceManagerMonitor) createOrphan(name string, im *longhorn.InstanceManager, instanceName, instanceUUID string, orphanType longhorn.OrphanType, dataEngineType longhorn.DataEngineType) (*longhorn.Orphan, error) {
|
||||
if _, err := m.ds.GetOrphanRO(name); err == nil || !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2299,8 +2379,7 @@ func (m *InstanceManagerMonitor) createOrphan(name string, im *longhorn.Instance
|
|||
// labels will be attached by mutator webhook
|
||||
orphan := &longhorn.Orphan{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
OwnerReferences: datastore.GetOwnerReferencesForInstanceManager(im),
|
||||
Name: name,
|
||||
},
|
||||
Spec: longhorn.OrphanSpec{
|
||||
NodeID: m.controllerID,
|
||||
|
@ -2308,6 +2387,7 @@ func (m *InstanceManagerMonitor) createOrphan(name string, im *longhorn.Instance
|
|||
DataEngine: dataEngineType,
|
||||
Parameters: map[string]string{
|
||||
longhorn.OrphanInstanceName: instanceName,
|
||||
longhorn.OrphanInstanceUUID: instanceUUID,
|
||||
longhorn.OrphanInstanceManager: im.Name,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/longhorn/longhorn-manager/constant"
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
)
|
||||
|
@ -267,6 +268,11 @@ func (kc *KubernetesPodController) handleWorkloadPodDeletionIfCSIPluginPodIsDown
|
|||
continue
|
||||
}
|
||||
|
||||
if util.IsMigratableVolume(volume) {
|
||||
_log.Debugf("%s. Volume is migratable RWX volume", logSkip)
|
||||
continue
|
||||
}
|
||||
|
||||
filteredVolumes = append(filteredVolumes, volume)
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
EnvironmentCheckMonitorSyncPeriod = 1800 * time.Second
|
||||
environmentCheckMonitorSyncPeriod = 1800 * time.Second
|
||||
|
||||
defaultHugePageLimitInMiB = 2048
|
||||
|
||||
kernelConfigDir = "/host/boot/"
|
||||
systemConfigDir = "/host/etc/"
|
||||
|
@ -64,7 +66,7 @@ func NewEnvironmentCheckMonitor(logger logrus.FieldLogger, ds *datastore.DataSto
|
|||
ctx, quit := context.WithCancel(context.Background())
|
||||
|
||||
m := &EnvironmentCheckMonitor{
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, EnvironmentCheckMonitorSyncPeriod),
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, environmentCheckMonitorSyncPeriod),
|
||||
|
||||
nodeName: nodeName,
|
||||
|
||||
|
@ -360,10 +362,11 @@ func (m *EnvironmentCheckMonitor) checkPackageInstalled(packageProbeExecutables
|
|||
}
|
||||
|
||||
func (m *EnvironmentCheckMonitor) checkHugePages(kubeNode *corev1.Node, collectedData *CollectedEnvironmentCheckInfo) {
|
||||
hugePageLimitInMiB, err := m.ds.GetSettingAsInt(types.SettingNameV2DataEngineHugepageLimit)
|
||||
hugePageLimitInMiB, err := m.ds.GetSettingAsIntByDataEngine(types.SettingNameDataEngineHugepageLimit, longhorn.DataEngineTypeV2)
|
||||
if err != nil {
|
||||
m.logger.Debugf("Failed to fetch v2-data-engine-hugepage-limit setting, using default value: %d", 2048)
|
||||
hugePageLimitInMiB = 2048
|
||||
m.logger.Warnf("Failed to get setting %v for data engine %v, using default value %d",
|
||||
types.SettingNameDataEngineHugepageLimit, longhorn.DataEngineTypeV2, defaultHugePageLimitInMiB)
|
||||
hugePageLimitInMiB = defaultHugePageLimitInMiB
|
||||
}
|
||||
|
||||
capacity := kubeNode.Status.Capacity
|
||||
|
|
|
@ -30,7 +30,7 @@ func NewFakeEnvironmentCheckMonitor(logger logrus.FieldLogger, ds *datastore.Dat
|
|||
ctx, quit := context.WithCancel(context.Background())
|
||||
|
||||
m := &FakeEnvironmentCheckMonitor{
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, EnvironmentCheckMonitorSyncPeriod),
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, environmentCheckMonitorSyncPeriod),
|
||||
|
||||
nodeName: nodeName,
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rancher/lasso/pkg/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -239,9 +238,9 @@ func (nc *NodeController) isResponsibleForSnapshot(obj interface{}) bool {
|
|||
}
|
||||
|
||||
func (nc *NodeController) snapshotHashRequired(volume *longhorn.Volume) bool {
|
||||
dataIntegrityImmediateChecking, err := nc.ds.GetSettingAsBool(types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
|
||||
dataIntegrityImmediateChecking, err := nc.ds.GetSettingAsBoolByDataEngine(types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation, volume.Spec.DataEngine)
|
||||
if err != nil {
|
||||
nc.logger.WithError(err).Warnf("Failed to get %v setting", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
|
||||
nc.logger.WithError(err).Warnf("Failed to get %v setting for data engine %v", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation, volume.Spec.DataEngine)
|
||||
return false
|
||||
}
|
||||
if !dataIntegrityImmediateChecking {
|
||||
|
@ -768,6 +767,7 @@ func (nc *NodeController) findNotReadyAndReadyDiskMaps(node *longhorn.Node, coll
|
|||
node.Status.DiskStatus[diskName].DiskDriver = diskInfo.DiskDriver
|
||||
node.Status.DiskStatus[diskName].DiskName = diskInfo.DiskName
|
||||
node.Status.DiskStatus[diskName].DiskPath = diskInfo.Path
|
||||
|
||||
readyDiskInfoMap[diskID][diskName] = diskInfo
|
||||
}
|
||||
}
|
||||
|
@ -835,10 +835,6 @@ func (nc *NodeController) updateDiskStatusSchedulableCondition(node *longhorn.No
|
|||
diskStatusMap := node.Status.DiskStatus
|
||||
|
||||
// update Schedulable condition
|
||||
minimalAvailablePercentage, err := nc.ds.GetSettingAsInt(types.SettingNameStorageMinimalAvailablePercentage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backingImages, err := nc.ds.ListBackingImagesRO()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -906,17 +902,19 @@ func (nc *NodeController) updateDiskStatusSchedulableCondition(node *longhorn.No
|
|||
diskStatus.StorageScheduled = storageScheduled
|
||||
diskStatus.ScheduledReplica = scheduledReplica
|
||||
diskStatus.ScheduledBackingImage = scheduledBackingImage
|
||||
|
||||
// check disk pressure
|
||||
info, err := nc.scheduler.GetDiskSchedulingInfo(disk, diskStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !nc.scheduler.IsSchedulableToDisk(0, 0, info) {
|
||||
|
||||
isSchedulableToDisk, message := nc.scheduler.IsSchedulableToDisk(0, 0, info)
|
||||
if !isSchedulableToDisk {
|
||||
diskStatus.Conditions = types.SetConditionAndRecord(diskStatus.Conditions,
|
||||
longhorn.DiskConditionTypeSchedulable, longhorn.ConditionStatusFalse,
|
||||
string(longhorn.DiskConditionReasonDiskPressure),
|
||||
fmt.Sprintf("Disk %v (%v) on the node %v has %v available, but requires reserved %v, minimal %v%s to schedule more replicas",
|
||||
diskName, disk.Path, node.Name, diskStatus.StorageAvailable, disk.StorageReserved, minimalAvailablePercentage, "%"),
|
||||
fmt.Sprintf("Disk %v (%v) on the node %v is not schedulable for more replica; %s", diskName, disk.Path, node.Name, message),
|
||||
nc.eventRecorder, node, corev1.EventTypeWarning)
|
||||
} else {
|
||||
diskStatus.Conditions = types.SetConditionAndRecord(diskStatus.Conditions,
|
||||
|
@ -1153,7 +1151,7 @@ func (nc *NodeController) cleanUpBackingImagesInDisks(node *longhorn.Node) error
|
|||
|
||||
settingValue, err := nc.ds.GetSettingAsInt(types.SettingNameBackingImageCleanupWaitInterval)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get setting %v, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
|
||||
log.WithError(err).Warnf("Failed to get %v setting, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
|
||||
return nil
|
||||
}
|
||||
waitInterval := time.Duration(settingValue) * time.Minute
|
||||
|
@ -1301,6 +1299,11 @@ func (nc *NodeController) enqueueNodeForMonitor(key string) {
|
|||
}
|
||||
|
||||
func (nc *NodeController) syncOrphans(node *longhorn.Node, collectedDataInfo map[string]*monitor.CollectedDiskInfo) error {
|
||||
autoDeleteGracePeriod, err := nc.ds.GetSettingAsInt(types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
}
|
||||
|
||||
for diskName, diskInfo := range collectedDataInfo {
|
||||
newOrphanedReplicaDataStores, missingOrphanedReplicaDataStores :=
|
||||
nc.getNewAndMissingOrphanedReplicaDataStores(diskName, diskInfo.DiskUUID, diskInfo.Path, diskInfo.OrphanedReplicaDataStores)
|
||||
|
@ -1308,7 +1311,7 @@ func (nc *NodeController) syncOrphans(node *longhorn.Node, collectedDataInfo map
|
|||
if err := nc.createOrphansForReplicaDataStore(node, diskName, diskInfo, newOrphanedReplicaDataStores); err != nil {
|
||||
return errors.Wrapf(err, "failed to create orphans for disk %v", diskName)
|
||||
}
|
||||
if err := nc.deleteOrphansForReplicaDataStore(node, diskName, diskInfo, missingOrphanedReplicaDataStores); err != nil {
|
||||
if err := nc.deleteOrphansForReplicaDataStore(node, diskName, diskInfo, missingOrphanedReplicaDataStores, autoDeleteGracePeriod); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete orphans for disk %v", diskName)
|
||||
}
|
||||
}
|
||||
|
@ -1383,12 +1386,15 @@ func (nc *NodeController) deleteOrphansForEngineAndReplicaInstances(node *longho
|
|||
return nil
|
||||
}
|
||||
|
||||
func (nc *NodeController) deleteOrphansForReplicaDataStore(node *longhorn.Node, diskName string, diskInfo *monitor.CollectedDiskInfo, missingOrphanedReplicaDataStores map[string]string) error {
|
||||
autoDeletionResourceTypes, err := nc.ds.GetSettingOrphanResourceAutoDeletion()
|
||||
func (nc *NodeController) deleteOrphansForReplicaDataStore(node *longhorn.Node, diskName string, diskInfo *monitor.CollectedDiskInfo, missingOrphanedReplicaDataStores map[string]string, autoDeleteGracePeriod int64) error {
|
||||
autoDeletionTypes, err := nc.ds.GetSettingOrphanResourceAutoDeletion()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameOrphanResourceAutoDeletion)
|
||||
}
|
||||
autoDeletionEnabled := autoDeletionResourceTypes[types.OrphanResourceTypeReplicaData]
|
||||
autoDeleteEnabled, ok := autoDeletionTypes[types.OrphanResourceTypeReplicaData]
|
||||
if !ok {
|
||||
autoDeleteEnabled = false
|
||||
}
|
||||
|
||||
for dataStore := range missingOrphanedReplicaDataStores {
|
||||
orphanName := types.GetOrphanChecksumNameForOrphanedDataStore(node.Name, diskName, diskInfo.Path, diskInfo.DiskUUID, dataStore)
|
||||
|
@ -1403,16 +1409,7 @@ func (nc *NodeController) deleteOrphansForReplicaDataStore(node *longhorn.Node,
|
|||
}
|
||||
|
||||
for _, orphan := range orphans {
|
||||
if orphan.Status.OwnerID != nc.controllerID {
|
||||
continue
|
||||
}
|
||||
|
||||
dataCleanableCondition := types.GetCondition(orphan.Status.Conditions, longhorn.OrphanConditionTypeDataCleanable)
|
||||
if dataCleanableCondition.Status == longhorn.ConditionStatusUnknown {
|
||||
continue
|
||||
}
|
||||
|
||||
if autoDeletionEnabled || dataCleanableCondition.Status == longhorn.ConditionStatusFalse {
|
||||
if nc.canDeleteOrphan(orphan, autoDeleteEnabled, autoDeleteGracePeriod) {
|
||||
if err := nc.ds.DeleteOrphan(orphan.Name); err != nil && !datastore.ErrorIsNotFound(err) {
|
||||
return errors.Wrapf(err, "failed to delete orphan %v", orphan.Name)
|
||||
}
|
||||
|
@ -1421,6 +1418,34 @@ func (nc *NodeController) deleteOrphansForReplicaDataStore(node *longhorn.Node,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (nc *NodeController) canDeleteOrphan(orphan *longhorn.Orphan, autoDeleteEnabled bool, autoDeleteGracePeriod int64) bool {
|
||||
if orphan.Status.OwnerID != nc.controllerID {
|
||||
return false
|
||||
}
|
||||
|
||||
dataCleanableCondition := types.GetCondition(orphan.Status.Conditions, longhorn.OrphanConditionTypeDataCleanable)
|
||||
if dataCleanableCondition.Status == longhorn.ConditionStatusUnknown {
|
||||
return false
|
||||
}
|
||||
|
||||
autoDeleteAllowed := false
|
||||
if autoDeleteEnabled {
|
||||
elapsedTime := time.Since(orphan.CreationTimestamp.Time).Seconds()
|
||||
if elapsedTime > float64(autoDeleteGracePeriod) {
|
||||
autoDeleteAllowed = true
|
||||
}
|
||||
}
|
||||
|
||||
// When dataCleanableCondition is false, it means the associated node is not ready, missing or evicted (check updateDataCleanableCondition()).
|
||||
// In this case, we can delete the orphan directly because the data is not reachable and no need to keep the orphan resource.
|
||||
canDelete := autoDeleteAllowed || dataCleanableCondition.Status == longhorn.ConditionStatusFalse
|
||||
if !canDelete {
|
||||
nc.logger.Debugf("Orphan %v is not ready to be deleted, autoDeleteAllowed: %v, dataCleanableCondition: %v", orphan.Name, autoDeleteAllowed, dataCleanableCondition.Status)
|
||||
}
|
||||
|
||||
return canDelete
|
||||
}
|
||||
|
||||
func (nc *NodeController) createOrphansForReplicaDataStore(node *longhorn.Node, diskName string, diskInfo *monitor.CollectedDiskInfo, newOrphanedReplicaDataStores map[string]string) error {
|
||||
for dataStore := range newOrphanedReplicaDataStores {
|
||||
if err := nc.createOrphan(node, diskName, dataStore, diskInfo); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
|
@ -1553,7 +1578,7 @@ func (nc *NodeController) alignDiskSpecAndStatus(node *longhorn.Node) {
|
|||
}
|
||||
|
||||
func (nc *NodeController) deleteDisk(diskType longhorn.DiskType, diskName, diskUUID, diskPath, diskDriver string) error {
|
||||
log.Infof("Deleting disk %v with diskUUID %v", diskName, diskUUID)
|
||||
nc.logger.Infof("Deleting disk %v with diskUUID %v", diskName, diskUUID)
|
||||
|
||||
dataEngine := util.GetDataEngineForDiskType(diskType)
|
||||
|
||||
|
@ -1632,7 +1657,7 @@ func (nc *NodeController) syncBackingImageEvictionRequested(node *longhorn.Node)
|
|||
}
|
||||
log := getLoggerForNode(nc.logger, node)
|
||||
|
||||
diskBackingImageMap, err := nc.ds.GetDiskBackingImageMap()
|
||||
diskBackingImageMap, err := nc.ds.GetCurrentDiskBackingImageMap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1644,26 +1669,26 @@ func (nc *NodeController) syncBackingImageEvictionRequested(node *longhorn.Node)
|
|||
}
|
||||
backingImagesToSync := []backingImageToSync{}
|
||||
|
||||
var diskFileSpecNotSync = false
|
||||
for diskName, diskSpec := range node.Spec.Disks {
|
||||
diskStatus := node.Status.DiskStatus[diskName]
|
||||
diskUUID := diskStatus.DiskUUID
|
||||
|
||||
if diskSpec.EvictionRequested || node.Spec.EvictionRequested {
|
||||
for _, backingImage := range diskBackingImageMap[diskUUID] {
|
||||
// trigger eviction request
|
||||
backingImage.Spec.DiskFileSpecMap[diskUUID].EvictionRequested = true
|
||||
backingImagesToSync = append(backingImagesToSync, backingImageToSync{backingImage, diskUUID, true})
|
||||
}
|
||||
} else {
|
||||
for _, backingImage := range diskBackingImageMap[diskUUID] {
|
||||
if diskFileSpec, ok := backingImage.Spec.DiskFileSpecMap[diskUUID]; ok && diskFileSpec.EvictionRequested {
|
||||
// if it is previously set to true, cancel the eviction request
|
||||
backingImage.Spec.DiskFileSpecMap[diskUUID].EvictionRequested = false
|
||||
backingImagesToSync = append(backingImagesToSync, backingImageToSync{backingImage, diskUUID, false})
|
||||
}
|
||||
requireDiskFileEviction := diskSpec.EvictionRequested || node.Spec.EvictionRequested
|
||||
for _, backingImage := range diskBackingImageMap[diskUUID] {
|
||||
// trigger or cancel the eviction request on disks
|
||||
if diskFileSpec, ok := backingImage.Spec.DiskFileSpecMap[diskUUID]; ok && diskFileSpec.EvictionRequested != requireDiskFileEviction {
|
||||
diskFileSpec.EvictionRequested = requireDiskFileEviction
|
||||
backingImagesToSync = append(backingImagesToSync, backingImageToSync{backingImage, diskUUID, requireDiskFileEviction})
|
||||
} else if !ok {
|
||||
log.Infof("Evicting missing disk %s from backing image %s. Will enqueue then resync the node %s", diskUUID, backingImage.Name, node.Name)
|
||||
diskFileSpecNotSync = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if diskFileSpecNotSync {
|
||||
nc.enqueueNodeRateLimited(node)
|
||||
}
|
||||
|
||||
for _, backingImageToSync := range backingImagesToSync {
|
||||
backingImageLog := log.WithField("backingimage", backingImageToSync.Name).WithField("disk", backingImageToSync.diskUUID)
|
||||
|
@ -1821,8 +1846,7 @@ func (nc *NodeController) setReadyAndSchedulableConditions(node *longhorn.Node,
|
|||
nc.eventRecorder, node, corev1.EventTypeNormal)
|
||||
}
|
||||
|
||||
disableSchedulingOnCordonedNode, err :=
|
||||
nc.ds.GetSettingAsBool(types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
disableSchedulingOnCordonedNode, err := nc.ds.GetSettingAsBool(types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
}
|
||||
|
@ -1931,7 +1955,7 @@ func (nc *NodeController) SetSchedulableCondition(node *longhorn.Node, kubeNode
|
|||
func (nc *NodeController) clearDelinquentLeasesIfNodeNotReady(node *longhorn.Node) error {
|
||||
enabled, err := nc.ds.GetSettingAsBool(types.SettingNameRWXVolumeFastFailover)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get setting %v", types.SettingNameRWXVolumeFastFailover)
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameRWXVolumeFastFailover)
|
||||
}
|
||||
if !enabled {
|
||||
return nil
|
||||
|
|
|
@ -1364,7 +1364,7 @@ func (s *NodeControllerSuite) TestEventOnNotReady(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
"node1-not-ready": {
|
||||
Type: "Warning",
|
||||
|
@ -1453,7 +1453,7 @@ func (s *NodeControllerSuite) TestEventOnDiskPressure(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
"node1-disk-pressure": {
|
||||
Type: "Warning",
|
||||
|
@ -1542,7 +1542,7 @@ func (s *NodeControllerSuite) TestEventOnMemoryPressure(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
"node1-memory-pressure": {
|
||||
Type: "Warning",
|
||||
|
@ -1631,7 +1631,7 @@ func (s *NodeControllerSuite) TestEventOnPidPressure(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
"node1-pid-pressure": {
|
||||
Type: "Warning",
|
||||
|
@ -1720,7 +1720,7 @@ func (s *NodeControllerSuite) TestEventOnNetworkPressure(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
"node1-network-pressure": {
|
||||
Type: "Warning",
|
||||
|
@ -1820,7 +1820,7 @@ func (s *NodeControllerSuite) TestNoEventOnUnknownTrueNodeCondition(c *C) {
|
|||
"": {
|
||||
Type: "Warning",
|
||||
Reason: "Schedulable",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 has 0 available, but requires reserved 0, minimal 25% to schedule more replicas",
|
||||
Message: "Disk fsid (/var/lib/longhorn) on the node test-node-name-1 is not schedulable for more replica",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
lhns "github.com/longhorn/go-common-libs/ns"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/constant"
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/engineapi"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
|
@ -157,7 +158,7 @@ func (oc *OrphanController) enqueueForInstanceManager(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
orphans, err := oc.ds.ListOrphansByNodeRO(im.Spec.NodeID)
|
||||
orphans, err := oc.ds.ListInstanceOrphansByInstanceManagerRO(im.Name)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to list orphans on instance manager %v since %v", im.Name, err))
|
||||
return
|
||||
|
@ -261,15 +262,30 @@ func (oc *OrphanController) reconcile(orphanName string) (err error) {
|
|||
log.Infof("Orphan got new owner %v", oc.controllerID)
|
||||
}
|
||||
|
||||
existingOrphan := orphan.DeepCopy()
|
||||
|
||||
if !orphan.DeletionTimestamp.IsZero() {
|
||||
defer func() {
|
||||
if reflect.DeepEqual(existingOrphan.Status, orphan.Status) {
|
||||
return
|
||||
}
|
||||
|
||||
if _, updateStatusErr := oc.ds.UpdateOrphanStatus(orphan); updateStatusErr != nil {
|
||||
log.WithError(updateStatusErr).Errorf("Failed to update condition while cleaning up %v orphan %v", orphan.Spec.Type, orphan.Name)
|
||||
if err == nil {
|
||||
err = updateStatusErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
isCleanupComplete, err := oc.cleanupOrphanedResource(orphan)
|
||||
if isCleanupComplete {
|
||||
oc.eventRecorder.Eventf(orphan, corev1.EventTypeNormal, constant.EventReasonOrphanCleanupCompleted, "Orphan %v cleanup completed", orphan.Name)
|
||||
return oc.ds.RemoveFinalizerForOrphan(orphan)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
existingOrphan := orphan.DeepCopy()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
|
@ -311,8 +327,9 @@ func (oc *OrphanController) cleanupOrphanedResource(orphan *longhorn.Orphan) (is
|
|||
}
|
||||
|
||||
err = errors.Wrapf(err, "failed to delete %v orphan %v", orphan.Spec.Type, orphan.Name)
|
||||
orphan.Status.Conditions = types.SetCondition(orphan.Status.Conditions,
|
||||
longhorn.OrphanConditionTypeError, longhorn.ConditionStatusTrue, "", err.Error())
|
||||
orphan.Status.Conditions = types.SetConditionAndRecord(orphan.Status.Conditions,
|
||||
longhorn.OrphanConditionTypeError, longhorn.ConditionStatusTrue, "", err.Error(),
|
||||
oc.eventRecorder, orphan, corev1.EventTypeWarning)
|
||||
}()
|
||||
|
||||
// Make sure if the orphan nodeID and controller ID are the same.
|
||||
|
@ -356,21 +373,18 @@ func (oc *OrphanController) cleanupOrphanedEngineInstance(orphan *longhorn.Orpha
|
|||
}
|
||||
}()
|
||||
|
||||
instance, imName, err := oc.extractOrphanedInstanceInfo(orphan)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
instanceParameters := getOrphanedInstanceParameters(orphan)
|
||||
|
||||
var spec *longhorn.InstanceSpec
|
||||
if engineCR, err := oc.ds.GetEngineRO(instance); err != nil {
|
||||
var status *longhorn.InstanceStatus
|
||||
if engine, err := oc.ds.GetEngineRO(instanceParameters.name); err != nil {
|
||||
if !datastore.ErrorIsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
spec = nil
|
||||
status = nil
|
||||
} else {
|
||||
spec = &engineCR.Spec.InstanceSpec
|
||||
status = &engine.Status.InstanceStatus
|
||||
}
|
||||
oc.cleanupOrphanedInstance(orphan, instance, imName, longhorn.InstanceManagerTypeEngine, spec)
|
||||
oc.cleanupOrphanedInstance(orphan, instanceParameters.name, instanceParameters.uuid, instanceParameters.instanceManager, longhorn.InstanceManagerTypeEngine, status)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -381,48 +395,24 @@ func (oc *OrphanController) cleanupOrphanedReplicaInstance(orphan *longhorn.Orph
|
|||
}
|
||||
}()
|
||||
|
||||
instance, imName, err := oc.extractOrphanedInstanceInfo(orphan)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
instanceParameters := getOrphanedInstanceParameters(orphan)
|
||||
|
||||
var spec *longhorn.InstanceSpec
|
||||
if replicaCR, err := oc.ds.GetReplicaRO(instance); err != nil {
|
||||
var status *longhorn.InstanceStatus
|
||||
if replica, err := oc.ds.GetReplicaRO(instanceParameters.name); err != nil {
|
||||
if !datastore.ErrorIsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
spec = nil
|
||||
status = nil
|
||||
} else {
|
||||
spec = &replicaCR.Spec.InstanceSpec
|
||||
status = &replica.Status.InstanceStatus
|
||||
}
|
||||
oc.cleanupOrphanedInstance(orphan, instance, imName, longhorn.InstanceManagerTypeReplica, spec)
|
||||
oc.cleanupOrphanedInstance(orphan, instanceParameters.name, instanceParameters.uuid, instanceParameters.instanceManager, longhorn.InstanceManagerTypeReplica, status)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (oc *OrphanController) extractOrphanedInstanceInfo(orphan *longhorn.Orphan) (name, instanceManager string, err error) {
|
||||
name, ok := orphan.Spec.Parameters[longhorn.OrphanInstanceName]
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("failed to get instance name for instance orphan %v", orphan.Name)
|
||||
}
|
||||
|
||||
instanceManager, ok = orphan.Spec.Parameters[longhorn.OrphanInstanceManager]
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("failed to get instance manager for instance orphan %v", orphan.Name)
|
||||
}
|
||||
|
||||
switch orphan.Spec.DataEngine {
|
||||
case longhorn.DataEngineTypeV1, longhorn.DataEngineTypeV2:
|
||||
// supported data engine type
|
||||
default:
|
||||
return "", "", fmt.Errorf("unknown data engine type %v for instance orphan %v", orphan.Spec.DataEngine, orphan.Name)
|
||||
}
|
||||
|
||||
return name, instanceManager, nil
|
||||
}
|
||||
|
||||
func (oc *OrphanController) cleanupOrphanedInstance(orphan *longhorn.Orphan, instance, imName string, imType longhorn.InstanceManagerType, instanceCRSpec *longhorn.InstanceSpec) {
|
||||
if instanceCRSpec != nil && instanceCRSpec.NodeID == orphan.Spec.NodeID {
|
||||
oc.logger.Infof("Orphan instance %v is scheduled back to current node %v. Skip cleaning up the instance resource and finalize the orphan CR.", instance, orphan.Spec.NodeID)
|
||||
func (oc *OrphanController) cleanupOrphanedInstance(orphan *longhorn.Orphan, instanceName, instanceUUID, imName string, imType longhorn.InstanceManagerType, instanceCRStatus *longhorn.InstanceStatus) {
|
||||
if instanceCRStatus != nil && instanceCRStatus.InstanceManagerName == imName {
|
||||
oc.logger.Infof("Orphan instance %v is scheduled back to instance manager %v. Skip cleaning up the instance resource and finalize the orphan CR.", instanceName, imName)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -430,10 +420,10 @@ func (oc *OrphanController) cleanupOrphanedInstance(orphan *longhorn.Orphan, ins
|
|||
// Later if the orphaned instance is still reachable, the orphan will be recreated.
|
||||
imc, err := oc.getRunningInstanceManagerClientForOrphan(orphan, imName)
|
||||
if err != nil {
|
||||
oc.logger.WithError(err).Warnf("Failed to delete orphan instance %v due to instance manager client initialization failure. Continue to finalize orphan %v", instance, orphan.Name)
|
||||
oc.logger.WithError(err).Warnf("Failed to delete orphan instance %v due to instance manager client initialization failure. Continue to finalize orphan %v", instanceName, orphan.Name)
|
||||
return
|
||||
} else if imc == nil {
|
||||
oc.logger.WithField("orphanInstanceNode", orphan.Spec.NodeID).Warnf("No running instance manager for deleting orphan instance %v", orphan.Name)
|
||||
oc.logger.WithField("instanceManager", imName).Warnf("No running instance manager for deleting orphan instance %v", orphan.Name)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
|
@ -442,9 +432,9 @@ func (oc *OrphanController) cleanupOrphanedInstance(orphan *longhorn.Orphan, ins
|
|||
}
|
||||
}()
|
||||
|
||||
err = imc.InstanceDelete(orphan.Spec.DataEngine, instance, string(imType), "", false)
|
||||
err = imc.InstanceDelete(orphan.Spec.DataEngine, instanceName, instanceUUID, string(imType), "", false)
|
||||
if err != nil && !types.ErrorIsNotFound(err) {
|
||||
oc.logger.WithError(err).Warnf("Failed to delete orphan instance %v. Continue to finalize orphan %v", instance, orphan.Name)
|
||||
oc.logger.WithError(err).Warnf("Failed to delete orphan instance %v with UUID %v. Continue to finalize orphan %v", instanceName, instanceUUID, orphan.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -553,8 +543,8 @@ func (oc *OrphanController) updateInstanceStateCondition(orphan *longhorn.Orphan
|
|||
orphan.Status.Conditions = types.SetCondition(orphan.Status.Conditions, longhorn.OrphanConditionTypeInstanceExist, status, string(instanceState), "")
|
||||
}()
|
||||
|
||||
instanceName, instanceManager, err := oc.extractOrphanedInstanceInfo(orphan)
|
||||
im, err := oc.ds.GetInstanceManager(instanceManager)
|
||||
instanceParameter := getOrphanedInstanceParameters(orphan)
|
||||
im, err := oc.ds.GetInstanceManager(instanceParameter.instanceManager)
|
||||
if err != nil {
|
||||
if datastore.ErrorIsNotFound(err) {
|
||||
oc.logger.WithError(err).Infof("No instance manager for node %v for update instance state of orphan instance %v", oc.controllerID, orphan.Name)
|
||||
|
@ -573,13 +563,16 @@ func (oc *OrphanController) updateInstanceStateCondition(orphan *longhorn.Orphan
|
|||
}
|
||||
}()
|
||||
|
||||
instance, err := imc.InstanceGet(orphan.Spec.DataEngine, instanceName, string(instanceType))
|
||||
switch {
|
||||
case err != nil:
|
||||
return errors.Wrapf(err, "failed to get instance %v", instanceName)
|
||||
case instance == nil:
|
||||
instance, err := imc.InstanceGet(orphan.Spec.DataEngine, instanceParameter.name, string(instanceType))
|
||||
if err != nil {
|
||||
if types.ErrorIsNotFound(err) {
|
||||
instanceState = longhorn.InstanceStateTerminated
|
||||
} else {
|
||||
return errors.Wrapf(err, "failed to get instance %v", instanceParameter.name)
|
||||
}
|
||||
} else if instance == nil {
|
||||
instanceState = longhorn.InstanceStateTerminated
|
||||
default:
|
||||
} else {
|
||||
instanceState = instance.Status.State
|
||||
}
|
||||
|
||||
|
@ -656,3 +649,17 @@ func (oc *OrphanController) checkOrphanedReplicaDataCleanable(node *longhorn.Nod
|
|||
|
||||
return ""
|
||||
}
|
||||
|
||||
type orphanedInstanceParameters struct {
|
||||
name string
|
||||
uuid string
|
||||
instanceManager string
|
||||
}
|
||||
|
||||
func getOrphanedInstanceParameters(orphan *longhorn.Orphan) orphanedInstanceParameters {
|
||||
return orphanedInstanceParameters{
|
||||
name: orphan.Spec.Parameters[longhorn.OrphanInstanceName],
|
||||
uuid: orphan.Spec.Parameters[longhorn.OrphanInstanceUUID],
|
||||
instanceManager: orphan.Spec.Parameters[longhorn.OrphanInstanceManager],
|
||||
}
|
||||
}
|
||||
|
|
|
@ -380,6 +380,11 @@ func (rc *ReplicaController) CreateInstance(obj interface{}) (*longhorn.Instance
|
|||
return nil, err
|
||||
}
|
||||
|
||||
r.Status.Starting = true
|
||||
if r, err = rc.ds.UpdateReplicaStatus(r); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update replica %v status.starting to true before sending instance create request", r.Name)
|
||||
}
|
||||
|
||||
return c.ReplicaInstanceCreate(&engineapi.ReplicaInstanceCreateRequest{
|
||||
Replica: r,
|
||||
DiskName: diskName,
|
||||
|
@ -585,7 +590,7 @@ func (rc *ReplicaController) DeleteInstance(obj interface{}) (err error) {
|
|||
|
||||
log.WithField("cleanupRequired", cleanupRequired).Infof("Deleting replica instance on disk %v", r.Spec.DiskPath)
|
||||
|
||||
err = c.InstanceDelete(r.Spec.DataEngine, r.Name, string(longhorn.InstanceManagerTypeReplica), r.Spec.DiskID, cleanupRequired)
|
||||
err = c.InstanceDelete(r.Spec.DataEngine, r.Name, "", string(longhorn.InstanceManagerTypeReplica), r.Spec.DiskID, cleanupRequired)
|
||||
if err != nil && !types.ErrorIsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -256,10 +256,6 @@ func (sc *SettingController) syncNonDangerZoneSettingsForManagedComponents(setti
|
|||
if err := sc.syncDefaultLonghornStaticStorageClass(); err != nil {
|
||||
return err
|
||||
}
|
||||
case types.SettingNameOrphanResourceAutoDeletion:
|
||||
if err := sc.syncOrphanResourceAutoDeletion(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -354,7 +350,6 @@ func (sc *SettingController) syncDangerZoneSettingsForManagedComponents(settingN
|
|||
types.SettingNameV1DataEngine,
|
||||
types.SettingNameV2DataEngine,
|
||||
types.SettingNameGuaranteedInstanceManagerCPU,
|
||||
types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU,
|
||||
}
|
||||
|
||||
if slices.Contains(dangerSettingsRequiringSpecificDataEngineVolumesDetached, settingName) {
|
||||
|
@ -364,14 +359,11 @@ func (sc *SettingController) syncDangerZoneSettingsForManagedComponents(settingN
|
|||
return errors.Wrapf(err, "failed to apply %v setting to Longhorn instance managers when there are attached volumes. "+
|
||||
"It will be eventually applied", settingName)
|
||||
}
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
dataEngine := longhorn.DataEngineTypeV1
|
||||
if settingName == types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU {
|
||||
dataEngine = longhorn.DataEngineTypeV2
|
||||
}
|
||||
|
||||
if err := sc.updateInstanceManagerCPURequest(dataEngine); err != nil {
|
||||
return err
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU:
|
||||
for _, dataEngine := range []longhorn.DataEngineType{longhorn.DataEngineTypeV1, longhorn.DataEngineTypeV2} {
|
||||
if err := sc.updateInstanceManagerCPURequest(dataEngine); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -900,32 +892,6 @@ func (sc *SettingController) syncDefaultLonghornStaticStorageClass() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (sc *SettingController) syncOrphanResourceAutoDeletion() error {
|
||||
setting, err := sc.ds.GetSettingOrphanResourceAutoDeletion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var replicaDataCleanupValue string
|
||||
if setting[types.OrphanResourceTypeReplicaData] {
|
||||
replicaDataCleanupValue = "true"
|
||||
} else {
|
||||
replicaDataCleanupValue = "false"
|
||||
}
|
||||
|
||||
deprecatedSetting, err := sc.ds.GetSetting(types.SettingNameOrphanAutoDeletion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if replicaDataCleanupValue != deprecatedSetting.Value {
|
||||
deprecatedSetting.Value = replicaDataCleanupValue
|
||||
if _, err := sc.ds.UpdateSetting(deprecatedSetting); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// updateDataEngine deletes the corresponding instance manager pods immediately if the data engine setting is disabled.
|
||||
func (sc *SettingController) updateDataEngine(setting types.SettingName) error {
|
||||
enabled, err := sc.ds.GetSettingAsBool(setting)
|
||||
|
@ -1255,10 +1221,6 @@ func (sc *SettingController) enqueueSettingForNode(obj interface{}) {
|
|||
|
||||
// updateInstanceManagerCPURequest deletes all instance manager pods immediately with the updated CPU request.
|
||||
func (sc *SettingController) updateInstanceManagerCPURequest(dataEngine longhorn.DataEngineType) error {
|
||||
settingName := types.SettingNameGuaranteedInstanceManagerCPU
|
||||
if types.IsDataEngineV2(dataEngine) {
|
||||
settingName = types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU
|
||||
}
|
||||
imPodList, err := sc.ds.ListInstanceManagerPodsBy("", "", longhorn.InstanceManagerTypeAllInOne, dataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list instance manager pods for toleration update")
|
||||
|
@ -1299,10 +1261,10 @@ func (sc *SettingController) updateInstanceManagerCPURequest(dataEngine longhorn
|
|||
|
||||
stopped, _, err := sc.ds.AreAllEngineInstancesStopped(dataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to check engine instances for %v setting update", settingName)
|
||||
return errors.Wrapf(err, "failed to check engine instances for %v setting update for data engine %v", types.SettingNameGuaranteedInstanceManagerCPU, dataEngine)
|
||||
}
|
||||
if !stopped {
|
||||
return &types.ErrorInvalidState{Reason: fmt.Sprintf("failed to apply %v setting to Longhorn components when there are running engine instances. It will be eventually applied", settingName)}
|
||||
return &types.ErrorInvalidState{Reason: fmt.Sprintf("failed to apply %v setting for data engine %v to Longhorn components when there are running engine instances. It will be eventually applied", types.SettingNameGuaranteedInstanceManagerCPU, dataEngine)}
|
||||
}
|
||||
|
||||
for _, pod := range notUpdatedPods {
|
||||
|
@ -1423,22 +1385,22 @@ const (
|
|||
ClusterInfoVolumeNumOfReplicas = util.StructName("LonghornVolumeNumberOfReplicas")
|
||||
ClusterInfoVolumeNumOfSnapshots = util.StructName("LonghornVolumeNumberOfSnapshots")
|
||||
|
||||
ClusterInfoPodAvgCPUUsageFmt = "Longhorn%sAverageCpuUsageMilliCores"
|
||||
ClusterInfoPodAvgMemoryUsageFmt = "Longhorn%sAverageMemoryUsageBytes"
|
||||
ClusterInfoSettingFmt = "LonghornSetting%s"
|
||||
ClusterInfoVolumeAccessModeCountFmt = "LonghornVolumeAccessMode%sCount"
|
||||
ClusterInfoVolumeDataEngineCountFmt = "LonghornVolumeDataEngine%sCount"
|
||||
ClusterInfoVolumeDataLocalityCountFmt = "LonghornVolumeDataLocality%sCount"
|
||||
ClusterInfoVolumeEncryptedCountFmt = "LonghornVolumeEncrypted%sCount"
|
||||
ClusterInfoVolumeFrontendCountFmt = "LonghornVolumeFrontend%sCount"
|
||||
ClusterInfoVolumeReplicaAutoBalanceCountFmt = "LonghornVolumeReplicaAutoBalance%sCount"
|
||||
ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt = "LonghornVolumeReplicaSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt = "LonghornVolumeReplicaZoneSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt = "LonghornVolumeReplicaDiskSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt = "LonghornVolumeRestoreVolumeRecurringJob%sCount"
|
||||
ClusterInfoVolumeSnapshotDataIntegrityCountFmt = "LonghornVolumeSnapshotDataIntegrity%sCount"
|
||||
ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt = "LonghornVolumeUnmapMarkSnapChainRemoved%sCount"
|
||||
ClusterInfoVolumeFreezeFilesystemForSnapshotCountFmt = "LonghornVolumeFreezeFilesystemForSnapshot%sCount"
|
||||
ClusterInfoPodAvgCPUUsageFmt = "Longhorn%sAverageCpuUsageMilliCores"
|
||||
ClusterInfoPodAvgMemoryUsageFmt = "Longhorn%sAverageMemoryUsageBytes"
|
||||
ClusterInfoSettingFmt = "LonghornSetting%s"
|
||||
ClusterInfoVolumeAccessModeCountFmt = "LonghornVolumeAccessMode%sCount"
|
||||
ClusterInfoVolumeDataEngineCountFmt = "LonghornVolumeDataEngine%sCount"
|
||||
ClusterInfoVolumeDataLocalityCountFmt = "LonghornVolumeDataLocality%sCount"
|
||||
ClusterInfoVolumeEncryptedCountFmt = "LonghornVolumeEncrypted%sCount"
|
||||
ClusterInfoVolumeFrontendCountFmt = "LonghornVolumeFrontend%sCount"
|
||||
ClusterInfoVolumeReplicaAutoBalanceCountFmt = "LonghornVolumeReplicaAutoBalance%sCount"
|
||||
ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt = "LonghornVolumeReplicaSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt = "LonghornVolumeReplicaZoneSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt = "LonghornVolumeReplicaDiskSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt = "LonghornVolumeRestoreVolumeRecurringJob%sCount"
|
||||
ClusterInfoVolumeSnapshotDataIntegrityCountFmt = "LonghornVolumeSnapshotDataIntegrity%sCount"
|
||||
ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt = "LonghornVolumeUnmapMarkSnapChainRemoved%sCount"
|
||||
ClusterInfoVolumeFreezeFilesystemForV1DataEngineSnapshotCountFmt = "LonghornVolumeFreezeFilesystemForV1DataEngineSnapshot%sCount"
|
||||
)
|
||||
|
||||
// Node Scope Info: will be sent from all Longhorn cluster nodes
|
||||
|
@ -1614,7 +1576,6 @@ func (info *ClusterInfo) collectSettings() error {
|
|||
types.SettingNameKubernetesClusterAutoscalerEnabled: true,
|
||||
types.SettingNameNodeDownPodDeletionPolicy: true,
|
||||
types.SettingNameNodeDrainPolicy: true,
|
||||
types.SettingNameOrphanAutoDeletion: true,
|
||||
types.SettingNameOrphanResourceAutoDeletion: true,
|
||||
types.SettingNameRecurringFailedJobsHistoryLimit: true,
|
||||
types.SettingNameRecurringSuccessfulJobsHistoryLimit: true,
|
||||
|
@ -1640,7 +1601,6 @@ func (info *ClusterInfo) collectSettings() error {
|
|||
types.SettingNameSystemManagedPodsImagePullPolicy: true,
|
||||
types.SettingNameV1DataEngine: true,
|
||||
types.SettingNameV2DataEngine: true,
|
||||
types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU: true,
|
||||
}
|
||||
|
||||
settings, err := info.ds.ListSettings()
|
||||
|
@ -1698,12 +1658,15 @@ func (info *ClusterInfo) convertSettingValueType(setting *longhorn.Setting) (con
|
|||
|
||||
switch definition.Type {
|
||||
case types.SettingTypeInt:
|
||||
return strconv.ParseInt(setting.Value, 10, 64)
|
||||
if !definition.DataEngineSpecific {
|
||||
return strconv.ParseInt(setting.Value, 10, 64)
|
||||
}
|
||||
case types.SettingTypeBool:
|
||||
return strconv.ParseBool(setting.Value)
|
||||
default:
|
||||
return setting.Value, nil
|
||||
if !definition.DataEngineSpecific {
|
||||
return strconv.ParseBool(setting.Value)
|
||||
}
|
||||
}
|
||||
return setting.Value, nil
|
||||
}
|
||||
|
||||
func (info *ClusterInfo) collectVolumesInfo() error {
|
||||
|
@ -1772,29 +1735,31 @@ func (info *ClusterInfo) collectVolumesInfo() error {
|
|||
frontendCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFrontendCountFmt, frontend))]++
|
||||
}
|
||||
|
||||
replicaAutoBalance := info.collectSettingInVolume(string(volume.Spec.ReplicaAutoBalance), string(longhorn.ReplicaAutoBalanceIgnored), types.SettingNameReplicaAutoBalance)
|
||||
replicaAutoBalance := info.collectSettingInVolume(string(volume.Spec.ReplicaAutoBalance), string(longhorn.ReplicaAutoBalanceIgnored), volume.Spec.DataEngine, types.SettingNameReplicaAutoBalance)
|
||||
replicaAutoBalanceCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaAutoBalanceCountFmt, util.ConvertToCamel(string(replicaAutoBalance), "-")))]++
|
||||
|
||||
replicaSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaSoftAntiAffinity), string(longhorn.ReplicaSoftAntiAffinityDefault), types.SettingNameReplicaSoftAntiAffinity)
|
||||
replicaSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaSoftAntiAffinity), string(longhorn.ReplicaSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaSoftAntiAffinity)
|
||||
replicaSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaSoftAntiAffinity), "-")))]++
|
||||
|
||||
replicaZoneSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaZoneSoftAntiAffinity), string(longhorn.ReplicaZoneSoftAntiAffinityDefault), types.SettingNameReplicaZoneSoftAntiAffinity)
|
||||
replicaZoneSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaZoneSoftAntiAffinity), string(longhorn.ReplicaZoneSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaZoneSoftAntiAffinity)
|
||||
replicaZoneSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaZoneSoftAntiAffinity), "-")))]++
|
||||
|
||||
replicaDiskSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaDiskSoftAntiAffinity), string(longhorn.ReplicaDiskSoftAntiAffinityDefault), types.SettingNameReplicaDiskSoftAntiAffinity)
|
||||
replicaDiskSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaDiskSoftAntiAffinity), string(longhorn.ReplicaDiskSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaDiskSoftAntiAffinity)
|
||||
replicaDiskSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaDiskSoftAntiAffinity), "-")))]++
|
||||
|
||||
restoreVolumeRecurringJob := info.collectSettingInVolume(string(volume.Spec.RestoreVolumeRecurringJob), string(longhorn.RestoreVolumeRecurringJobDefault), types.SettingNameRestoreVolumeRecurringJobs)
|
||||
restoreVolumeRecurringJob := info.collectSettingInVolume(string(volume.Spec.RestoreVolumeRecurringJob), string(longhorn.RestoreVolumeRecurringJobDefault), volume.Spec.DataEngine, types.SettingNameRestoreVolumeRecurringJobs)
|
||||
restoreVolumeRecurringJobCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt, util.ConvertToCamel(string(restoreVolumeRecurringJob), "-")))]++
|
||||
|
||||
snapshotDataIntegrity := info.collectSettingInVolume(string(volume.Spec.SnapshotDataIntegrity), string(longhorn.SnapshotDataIntegrityIgnored), types.SettingNameSnapshotDataIntegrity)
|
||||
snapshotDataIntegrity := info.collectSettingInVolume(string(volume.Spec.SnapshotDataIntegrity), string(longhorn.SnapshotDataIntegrityIgnored), volume.Spec.DataEngine, types.SettingNameSnapshotDataIntegrity)
|
||||
snapshotDataIntegrityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeSnapshotDataIntegrityCountFmt, util.ConvertToCamel(string(snapshotDataIntegrity), "-")))]++
|
||||
|
||||
unmapMarkSnapChainRemoved := info.collectSettingInVolume(string(volume.Spec.UnmapMarkSnapChainRemoved), string(longhorn.UnmapMarkSnapChainRemovedIgnored), types.SettingNameRemoveSnapshotsDuringFilesystemTrim)
|
||||
unmapMarkSnapChainRemoved := info.collectSettingInVolume(string(volume.Spec.UnmapMarkSnapChainRemoved), string(longhorn.UnmapMarkSnapChainRemovedIgnored), volume.Spec.DataEngine, types.SettingNameRemoveSnapshotsDuringFilesystemTrim)
|
||||
unmapMarkSnapChainRemovedCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt, util.ConvertToCamel(string(unmapMarkSnapChainRemoved), "-")))]++
|
||||
|
||||
freezeFilesystemForSnapshot := info.collectSettingInVolume(string(volume.Spec.FreezeFilesystemForSnapshot), string(longhorn.FreezeFilesystemForSnapshotDefault), types.SettingNameFreezeFilesystemForSnapshot)
|
||||
freezeFilesystemForSnapshotCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFreezeFilesystemForSnapshotCountFmt, util.ConvertToCamel(string(freezeFilesystemForSnapshot), "-")))]++
|
||||
if types.IsDataEngineV1(volume.Spec.DataEngine) {
|
||||
freezeFilesystemForSnapshot := info.collectSettingInVolume(string(volume.Spec.FreezeFilesystemForSnapshot), string(longhorn.FreezeFilesystemForSnapshotDefault), volume.Spec.DataEngine, types.SettingNameFreezeFilesystemForSnapshot)
|
||||
freezeFilesystemForSnapshotCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFreezeFilesystemForV1DataEngineSnapshotCountFmt, util.ConvertToCamel(string(freezeFilesystemForSnapshot), "-")))]++
|
||||
}
|
||||
}
|
||||
info.structFields.fields.Append(ClusterInfoVolumeNumOfReplicas, totalVolumeNumOfReplicas)
|
||||
info.structFields.fields.AppendCounted(accessModeCountStruct)
|
||||
|
@ -1845,13 +1810,14 @@ func (info *ClusterInfo) collectVolumesInfo() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue string, settingName types.SettingName) string {
|
||||
func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue string, dataEngine longhorn.DataEngineType, settingName types.SettingName) string {
|
||||
if volumeSpecValue == ignoredValue {
|
||||
globalSetting, err := info.ds.GetSettingWithAutoFillingRO(settingName)
|
||||
globalSettingValue, err := info.ds.GetSettingValueExistedByDataEngine(settingName, dataEngine)
|
||||
if err != nil {
|
||||
info.logger.WithError(err).Warnf("Failed to get Longhorn Setting %v", settingName)
|
||||
}
|
||||
return globalSetting.Value
|
||||
|
||||
return globalSettingValue
|
||||
}
|
||||
return volumeSpecValue
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
|
@ -58,6 +60,8 @@ type ShareManagerController struct {
|
|||
ds *datastore.DataStore
|
||||
|
||||
cacheSyncs []cache.InformerSynced
|
||||
|
||||
backoff *flowcontrol.Backoff
|
||||
}
|
||||
|
||||
func NewShareManagerController(
|
||||
|
@ -87,6 +91,8 @@ func NewShareManagerController(
|
|||
eventRecorder: eventBroadcaster.NewRecorder(scheme, corev1.EventSource{Component: "longhorn-share-manager-controller"}),
|
||||
|
||||
ds: ds,
|
||||
|
||||
backoff: newBackoff(context.TODO()),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@ -875,8 +881,16 @@ func (c *ShareManagerController) syncShareManagerPod(sm *longhorn.ShareManager)
|
|||
return nil
|
||||
}
|
||||
|
||||
if pod, err = c.createShareManagerPod(sm); err != nil {
|
||||
return errors.Wrap(err, "failed to create pod for share manager")
|
||||
backoffID := sm.Name
|
||||
if c.backoff.IsInBackOffSinceUpdate(backoffID, time.Now()) {
|
||||
log.Infof("Skipping pod creation for share manager %s, will retry after backoff of %s", sm.Name, c.backoff.Get(backoffID))
|
||||
} else {
|
||||
log.Infof("Creating pod for share manager %s", sm.Name)
|
||||
c.backoff.Next(backoffID, time.Now())
|
||||
|
||||
if pod, err = c.createShareManagerPod(sm); err != nil {
|
||||
return errors.Wrap(err, "failed to create pod for share manager")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1249,6 +1263,8 @@ func (c *ShareManagerController) createShareManagerPod(sm *longhorn.ShareManager
|
|||
|
||||
var affinity *corev1.Affinity
|
||||
|
||||
var formatOptions []string
|
||||
|
||||
if pv.Spec.StorageClassName != "" {
|
||||
sc, err := c.ds.GetStorageClass(pv.Spec.StorageClassName)
|
||||
if err != nil {
|
||||
|
@ -1271,6 +1287,9 @@ func (c *ShareManagerController) createShareManagerPod(sm *longhorn.ShareManager
|
|||
}
|
||||
tolerationsFromStorageClass := c.getShareManagerTolerationsFromStorageClass(sc)
|
||||
tolerations = append(tolerations, tolerationsFromStorageClass...)
|
||||
|
||||
// A storage class can override mkfs parameters which need to be passed to the share manager
|
||||
formatOptions = c.splitFormatOptions(sc)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1308,7 +1327,7 @@ func (c *ShareManagerController) createShareManagerPod(sm *longhorn.ShareManager
|
|||
}
|
||||
|
||||
manifest := c.createPodManifest(sm, volume.Spec.DataEngine, annotations, tolerations, affinity, imagePullPolicy, nil, registrySecret,
|
||||
priorityClass, nodeSelector, fsType, mountOptions, cryptoKey, cryptoParams, nfsConfig)
|
||||
priorityClass, nodeSelector, fsType, formatOptions, mountOptions, cryptoKey, cryptoParams, nfsConfig)
|
||||
|
||||
storageNetwork, err := c.ds.GetSettingWithAutoFillingRO(types.SettingNameStorageNetwork)
|
||||
if err != nil {
|
||||
|
@ -1335,6 +1354,29 @@ func (c *ShareManagerController) createShareManagerPod(sm *longhorn.ShareManager
|
|||
return pod, nil
|
||||
}
|
||||
|
||||
func (c *ShareManagerController) splitFormatOptions(sc *storagev1.StorageClass) []string {
|
||||
if mkfsParams, ok := sc.Parameters["mkfsParams"]; ok {
|
||||
regex, err := regexp.Compile("-[a-zA-Z_]+(?:\\s*=?\\s*(?:\"[^\"]*\"|'[^']*'|[^\\r\\n\\t\\f\\v -]+))?")
|
||||
|
||||
if err != nil {
|
||||
c.logger.WithError(err).Warnf("Failed to compile regex for mkfsParams %v, will continue the share manager pod creation", mkfsParams)
|
||||
return nil
|
||||
}
|
||||
|
||||
matches := regex.FindAllString(mkfsParams, -1)
|
||||
|
||||
if matches == nil {
|
||||
c.logger.Warnf("No valid mkfs parameters found in \"%v\", will continue the share manager pod creation", mkfsParams)
|
||||
return nil
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
||||
|
||||
c.logger.Debug("No mkfs parameters found, will continue the share manager pod creation")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ShareManagerController) createServiceManifest(sm *longhorn.ShareManager) *corev1.Service {
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -1422,7 +1464,7 @@ func (c *ShareManagerController) createLeaseManifest(sm *longhorn.ShareManager)
|
|||
|
||||
func (c *ShareManagerController) createPodManifest(sm *longhorn.ShareManager, dataEngine longhorn.DataEngineType, annotations map[string]string, tolerations []corev1.Toleration,
|
||||
affinity *corev1.Affinity, pullPolicy corev1.PullPolicy, resourceReq *corev1.ResourceRequirements, registrySecret, priorityClass string,
|
||||
nodeSelector map[string]string, fsType string, mountOptions []string, cryptoKey string, cryptoParams *crypto.EncryptParams,
|
||||
nodeSelector map[string]string, fsType string, formatOptions []string, mountOptions []string, cryptoKey string, cryptoParams *crypto.EncryptParams,
|
||||
nfsConfig *nfsServerConfig) *corev1.Pod {
|
||||
|
||||
// command args for the share-manager
|
||||
|
@ -1493,6 +1535,15 @@ func (c *ShareManagerController) createPodManifest(sm *longhorn.ShareManager, da
|
|||
},
|
||||
}
|
||||
|
||||
if len(formatOptions) > 0 {
|
||||
podSpec.Spec.Containers[0].Env = append(podSpec.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "FS_FORMAT_OPTIONS",
|
||||
Value: fmt.Sprint(strings.Join(formatOptions, ":")),
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
// this is an encrypted volume the cryptoKey is base64 encoded
|
||||
if len(cryptoKey) > 0 {
|
||||
podSpec.Spec.Containers[0].Env = append(podSpec.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestShareManagerController_splitFormatOptions(t *testing.T) {
|
||||
type args struct {
|
||||
sc *storagev1.StorageClass
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "mkfsParams with no mkfsParams",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with empty options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with multiple options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-O someopt -L label -n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-O someopt", "-L label", "-n"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with underscore options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-O someopt -label_value test -L label -n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-O someopt", "-label_value test", "-L label", "-n"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with quoted options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-O someopt -label_value \"test\" -L label -n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-O someopt", "-label_value \"test\"", "-L label", "-n"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with equal sign quoted options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-O someopt -label_value=\"test\" -L label -n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-O someopt", "-label_value=\"test\"", "-L label", "-n"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with equal sign quoted options with spaces",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-O someopt -label_value=\"test label \" -L label -n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-O someopt", "-label_value=\"test label \"", "-L label", "-n"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with equal sign quoted options and different spacing",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-n -O someopt -label_value=\"test\" -Llabel",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-n", "-O someopt", "-label_value=\"test\"", "-Llabel"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with special characters in options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-I 256 -b 4096 -O ^metadata_csum,^64bit",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-I 256", "-b 4096", "-O ^metadata_csum,^64bit"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with no spacing in options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-Osomeopt -Llabel",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-Osomeopt", "-Llabel"},
|
||||
},
|
||||
{
|
||||
name: "mkfsParams with different spacing between options",
|
||||
args: args{
|
||||
sc: &storagev1.StorageClass{
|
||||
Parameters: map[string]string{
|
||||
"mkfsParams": "-Osomeopt -L label",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"-Osomeopt", "-L label"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &ShareManagerController{
|
||||
baseController: newBaseController("test-controller", logrus.StandardLogger()),
|
||||
}
|
||||
if got := c.splitFormatOptions(tt.args.sc); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("splitFormatOptions() = %v (len %d), want %v (len %d)",
|
||||
got, len(got), tt.want, len(tt.want))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -100,7 +100,8 @@ func NewSnapshotController(
|
|||
sc.cacheSyncs = append(sc.cacheSyncs, ds.EngineInformer.HasSynced)
|
||||
|
||||
if _, err = ds.VolumeInformer.AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: sc.enqueueVolumeChange,
|
||||
UpdateFunc: sc.enqueueVolumeChange,
|
||||
DeleteFunc: sc.enqueueVolumeDeleted,
|
||||
}, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -211,7 +212,52 @@ func filterSnapshotsForEngineEnqueuing(oldEngine, curEngine *longhorn.Engine, sn
|
|||
return targetSnapshots
|
||||
}
|
||||
|
||||
func (sc *SnapshotController) enqueueVolumeChange(obj interface{}) {
|
||||
// There is a race condition for that all snapshot controllers will not process some snapshots when the volume owner ID changes.
|
||||
// https://github.com/longhorn/longhorn/issues/10874#issuecomment-2870915401
|
||||
// In that case, snapshot controllers should enqueue the snapshots again when the volume owner ID changes.
|
||||
func (sc *SnapshotController) enqueueVolumeChange(old, new interface{}) {
|
||||
oldVol, ok := old.(*longhorn.Volume)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("received unexpected obj: %#v", old))
|
||||
return
|
||||
}
|
||||
newVol, ok := new.(*longhorn.Volume)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("received unexpected obj: %#v", new))
|
||||
return
|
||||
}
|
||||
if !newVol.DeletionTimestamp.IsZero() {
|
||||
return
|
||||
}
|
||||
if oldVol.Status.OwnerID == newVol.Status.OwnerID {
|
||||
return
|
||||
}
|
||||
|
||||
va, err := sc.ds.GetLHVolumeAttachmentByVolumeName(newVol.Name)
|
||||
if err != nil {
|
||||
sc.logger.WithError(err).Warnf("Failed to get volume attachment for volume %s", newVol.Name)
|
||||
return
|
||||
}
|
||||
if va == nil || va.Spec.AttachmentTickets == nil {
|
||||
return
|
||||
}
|
||||
|
||||
snapshots, err := sc.ds.ListVolumeSnapshotsRO(newVol.Name)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("snapshot controller failed to list snapshots when enqueuing volume %v: %v", newVol.Name, err))
|
||||
return
|
||||
}
|
||||
for _, snap := range snapshots {
|
||||
// Longhorn#10874:
|
||||
// Requeue the snapshot if there is an attachment ticket for it to ensure the volumeattachment can be cleaned up after the snapshot is created.
|
||||
attachmentTicketID := longhorn.GetAttachmentTicketID(longhorn.AttacherTypeSnapshotController, snap.Name)
|
||||
if va.Spec.AttachmentTickets[attachmentTicketID] != nil {
|
||||
sc.enqueueSnapshot(snap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SnapshotController) enqueueVolumeDeleted(obj interface{}) {
|
||||
vol, ok := obj.(*longhorn.Volume)
|
||||
if !ok {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
|
@ -475,8 +521,15 @@ func (sc *SnapshotController) reconcile(snapshotName string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// newly created snapshotCR by user
|
||||
if requestCreateNewSnapshot && !alreadyCreatedBefore {
|
||||
// Skip handing new snapshot if they already exist in the engine CR.
|
||||
// The engine may be purging, and the snapshot may be deleted mid-reconciliation,
|
||||
// potentially lead to a mis-recreation.
|
||||
//
|
||||
// https://github.com/longhorn/longhorn/issues/10808
|
||||
snapshotExistInEngine := isSnapshotExistInEngine(snapshotName, engine)
|
||||
|
||||
// Newly created snapshot CR by user
|
||||
if requestCreateNewSnapshot && !alreadyCreatedBefore && !snapshotExistInEngine {
|
||||
if err := sc.handleAttachmentTicketCreation(snapshot, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -201,7 +201,6 @@ func (c *SupportBundleController) handleErr(err error, key interface{}) {
|
|||
}
|
||||
|
||||
func (c *SupportBundleController) handleStatusUpdate(record *supportBundleRecord, supportBundle *longhorn.SupportBundle, existing *longhorn.SupportBundle, log logrus.FieldLogger) {
|
||||
var err error
|
||||
switch record.recordType {
|
||||
case supportBundleRecordError:
|
||||
c.recordErrorState(record, supportBundle, log)
|
||||
|
@ -234,11 +233,19 @@ func (c *SupportBundleController) handleStatusUpdate(record *supportBundleRecord
|
|||
|
||||
isStatusChange := !reflect.DeepEqual(existing.Status, supportBundle.Status)
|
||||
if isStatusChange {
|
||||
supportBundle, err = c.ds.UpdateSupportBundleStatus(supportBundle)
|
||||
if apierrors.IsConflict(errors.Cause(err)) {
|
||||
_supportBundle, err := c.ds.UpdateSupportBundleStatus(supportBundle)
|
||||
if err != nil && apierrors.IsConflict(errors.Cause(err)) {
|
||||
log.WithError(err).Warnf(SupportBundleMsgRequeueOnConflictFmt, supportBundle.Name)
|
||||
c.enqueue(supportBundle)
|
||||
}
|
||||
// Avoid requeue if the object is nil to avoid unnecessary requeue loops.
|
||||
// This can happen if the update failed, for example, when the object was
|
||||
// deleted during reconciliation.
|
||||
if _supportBundle == nil {
|
||||
log.WithError(err).Warnf("SupportBundle %v is nil after update, skipping requeue", supportBundle.Name)
|
||||
return
|
||||
}
|
||||
supportBundle = _supportBundle
|
||||
|
||||
if supportBundle.Status.State != existing.Status.State {
|
||||
log.Infof(SupportBundleMsgRequeueNextPhaseFmt, supportBundle.Name, supportBundle.Status.State)
|
||||
|
|
|
@ -940,7 +940,8 @@ func (c *SystemBackupController) isVolumeBackupUpToDate(volume *longhorn.Volume,
|
|||
|
||||
lastBackupTime, err := time.Parse(time.RFC3339, lastBackupSnapshot.Status.CreationTime)
|
||||
if err != nil {
|
||||
return false, err
|
||||
log.WithError(err).Warnf("Failed to parse creation time %q for snapshot %v", lastBackupSnapshot.Status.CreationTime, lastBackupSnapshot.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Identify snapshots created after the last backup.
|
||||
|
|
|
@ -47,6 +47,7 @@ type SystemBackupTestCase struct {
|
|||
existVolumes map[SystemRolloutCRName]*longhorn.Volume
|
||||
existBackingImages map[SystemRolloutCRName]*longhorn.BackingImage
|
||||
existBackups map[string]*longhorn.Backup
|
||||
existSnapshots map[string]*longhorn.Snapshot
|
||||
|
||||
expectError bool
|
||||
expectErrorConditionMessage string
|
||||
|
@ -115,6 +116,70 @@ func (s *TestSuite) TestReconcileSystemBackup(c *C) {
|
|||
expectState: longhorn.SystemBackupStateBackingImageBackup,
|
||||
expectNewVolumBackupCount: 0,
|
||||
},
|
||||
"system backup create volume backup if-not-present when snapshot creationTime is not set": {
|
||||
state: longhorn.SystemBackupStateVolumeBackup,
|
||||
volumeBackupPolicy: longhorn.SystemBackupCreateVolumeBackupPolicyIfNotPresent,
|
||||
existVolumes: map[SystemRolloutCRName]*longhorn.Volume{
|
||||
SystemRolloutCRName(TestVolumeName): {
|
||||
Status: longhorn.VolumeStatus{
|
||||
LastBackup: "exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
existBackups: map[string]*longhorn.Backup{
|
||||
"exists": {
|
||||
Status: longhorn.BackupStatus{
|
||||
State: longhorn.BackupStateCompleted,
|
||||
SnapshotName: "exists",
|
||||
VolumeName: TestVolumeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
existSnapshots: map[string]*longhorn.Snapshot{
|
||||
"exists": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "exists"},
|
||||
Spec: longhorn.SnapshotSpec{Volume: TestVolumeName},
|
||||
Status: longhorn.SnapshotStatus{
|
||||
ReadyToUse: true,
|
||||
// CreationTime is not set
|
||||
},
|
||||
},
|
||||
},
|
||||
expectState: longhorn.SystemBackupStateBackingImageBackup,
|
||||
expectNewVolumBackupCount: 1,
|
||||
},
|
||||
"system backup create volume backup if-not-present when snapshot creationTime is null": {
|
||||
state: longhorn.SystemBackupStateVolumeBackup,
|
||||
volumeBackupPolicy: longhorn.SystemBackupCreateVolumeBackupPolicyIfNotPresent,
|
||||
existVolumes: map[SystemRolloutCRName]*longhorn.Volume{
|
||||
SystemRolloutCRName(TestVolumeName): {
|
||||
Status: longhorn.VolumeStatus{
|
||||
LastBackup: "exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
existBackups: map[string]*longhorn.Backup{
|
||||
"exists": {
|
||||
Status: longhorn.BackupStatus{
|
||||
State: longhorn.BackupStateCompleted,
|
||||
SnapshotName: "exists",
|
||||
VolumeName: TestVolumeName,
|
||||
},
|
||||
},
|
||||
},
|
||||
existSnapshots: map[string]*longhorn.Snapshot{
|
||||
"exists": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "exists"},
|
||||
Spec: longhorn.SnapshotSpec{Volume: TestVolumeName},
|
||||
Status: longhorn.SnapshotStatus{
|
||||
ReadyToUse: true,
|
||||
CreationTime: "null",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectState: longhorn.SystemBackupStateBackingImageBackup,
|
||||
expectNewVolumBackupCount: 1,
|
||||
},
|
||||
"system backup create volume backup always": {
|
||||
state: longhorn.SystemBackupStateVolumeBackup,
|
||||
volumeBackupPolicy: longhorn.SystemBackupCreateVolumeBackupPolicyAlways,
|
||||
|
@ -276,15 +341,25 @@ func (s *TestSuite) TestReconcileSystemBackup(c *C) {
|
|||
switch systemBackup.Status.State {
|
||||
case longhorn.SystemBackupStateVolumeBackup:
|
||||
if tc.existBackups != nil {
|
||||
existBackupSnap := &longhorn.Snapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "exists"},
|
||||
Spec: longhorn.SnapshotSpec{Volume: TestVolumeName},
|
||||
Status: longhorn.SnapshotStatus{
|
||||
ReadyToUse: true,
|
||||
CreationTime: metav1.Now().Format(time.RFC3339),
|
||||
},
|
||||
existBackupSnapshots := make(map[string]*longhorn.Snapshot)
|
||||
if tc.existSnapshots == nil {
|
||||
for _, backup := range tc.existBackups {
|
||||
existBackupSnapshots[backup.Status.SnapshotName] = &longhorn.Snapshot{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: backup.Status.SnapshotName},
|
||||
Spec: longhorn.SnapshotSpec{Volume: TestVolumeName},
|
||||
Status: longhorn.SnapshotStatus{
|
||||
ReadyToUse: true,
|
||||
CreationTime: metav1.Now().Format(time.RFC3339),
|
||||
},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
existBackupSnapshots = tc.existSnapshots
|
||||
}
|
||||
|
||||
for _, existBackupSnap := range existBackupSnapshots {
|
||||
fakeSystemRolloutSnapshot(existBackupSnap, c, informerFactories.LhInformerFactory, lhClient)
|
||||
}
|
||||
fakeSystemRolloutSnapshot(existBackupSnap, c, informerFactories.LhInformerFactory, lhClient)
|
||||
}
|
||||
backups, _ := systemBackupController.BackupVolumes(systemBackup)
|
||||
|
||||
|
|
|
@ -845,8 +845,21 @@ func (c *UninstallController) deleteEngineImages(engineImages map[string]*longho
|
|||
for _, ei := range engineImages {
|
||||
log := getLoggerForEngineImage(c.logger, ei)
|
||||
|
||||
if ei.Annotations == nil {
|
||||
ei.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
timeout := metav1.NewTime(time.Now().Add(-gracePeriod))
|
||||
if ei.DeletionTimestamp == nil {
|
||||
if defaultImage, errGetSetting := c.ds.GetSettingValueExisted(types.SettingNameDefaultEngineImage); errGetSetting != nil {
|
||||
return errors.Wrap(errGetSetting, "failed to get default engine image setting")
|
||||
} else if ei.Spec.Image == defaultImage {
|
||||
log.Infof("Adding annotation %v to engine image %s to mark for deletion", types.GetLonghornLabelKey(types.DeleteEngineImageFromLonghorn), ei.Name)
|
||||
ei.Annotations[types.GetLonghornLabelKey(types.DeleteEngineImageFromLonghorn)] = ""
|
||||
if _, err := c.ds.UpdateEngineImage(ei); err != nil {
|
||||
return errors.Wrap(err, "failed to update engine image annotations to mark for deletion")
|
||||
}
|
||||
}
|
||||
if errDelete := c.ds.DeleteEngineImage(ei.Name); errDelete != nil {
|
||||
if datastore.ErrorIsNotFound(errDelete) {
|
||||
log.Info("EngineImage is not found")
|
||||
|
@ -891,7 +904,16 @@ func (c *UninstallController) deleteNodes(nodes map[string]*longhorn.Node) (err
|
|||
for _, node := range nodes {
|
||||
log := getLoggerForNode(c.logger, node)
|
||||
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
if node.DeletionTimestamp == nil {
|
||||
log.Infof("Adding annotation %v to node %s to mark for deletion", types.GetLonghornLabelKey(types.DeleteNodeFromLonghorn), node.Name)
|
||||
node.Annotations[types.GetLonghornLabelKey(types.DeleteNodeFromLonghorn)] = ""
|
||||
if _, err := c.ds.UpdateNode(node); err != nil {
|
||||
return errors.Wrap(err, "failed to update node annotations to mark for deletion")
|
||||
}
|
||||
if errDelete := c.ds.DeleteNode(node.Name); errDelete != nil {
|
||||
if datastore.ErrorIsNotFound(errDelete) {
|
||||
log.Info("Node is not found")
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
|
@ -13,6 +17,32 @@ import (
|
|||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
)
|
||||
|
||||
const (
|
||||
podRecreateInitBackoff = 1 * time.Second
|
||||
podRecreateMaxBackoff = 120 * time.Second
|
||||
backoffGCPeriod = 12 * time.Hour
|
||||
)
|
||||
|
||||
// newBackoff returns a flowcontrol.Backoff and starts a background GC loop.
|
||||
func newBackoff(ctx context.Context) *flowcontrol.Backoff {
|
||||
backoff := flowcontrol.NewBackOff(podRecreateInitBackoff, podRecreateMaxBackoff)
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(backoffGCPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
backoff.GC()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return backoff
|
||||
}
|
||||
|
||||
func hasReplicaEvictionRequested(rs map[string]*longhorn.Replica) bool {
|
||||
for _, r := range rs {
|
||||
if r.Spec.EvictionRequested {
|
||||
|
@ -115,3 +145,22 @@ func isBackupTargetAvailable(backupTarget *longhorn.BackupTarget) bool {
|
|||
backupTarget.Spec.BackupTargetURL != "" &&
|
||||
backupTarget.Status.Available
|
||||
}
|
||||
|
||||
// isSnapshotExistInEngine checks if a snapshot with the given name exists in the specified engine.
|
||||
// It returns true if the snapshot is found, otherwise false.
|
||||
func isSnapshotExistInEngine(snapshotName string, engine *longhorn.Engine) bool {
|
||||
if engine == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if engine.Status.Snapshots == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for name := range engine.Status.Snapshots {
|
||||
if name == snapshotName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -345,7 +345,7 @@ func (vac *VolumeAttachmentController) handleNodeCordoned(va *longhorn.VolumeAtt
|
|||
|
||||
detachManuallyAttachedVolumesWhenCordoned, err := vac.ds.GetSettingAsBool(types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get setting %v", types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
log.WithError(err).Warnf("Failed to get %v setting", types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -528,7 +528,7 @@ func (c *VolumeController) syncVolume(key string) (err error) {
|
|||
}
|
||||
|
||||
if err := c.ReconcileBackupVolumeState(volume); err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ReconcileVolumeState(volume, engines, replicas); err != nil {
|
||||
|
@ -890,7 +890,8 @@ func isAutoSalvageNeeded(rs map[string]*longhorn.Replica) bool {
|
|||
if isFirstAttachment(rs) {
|
||||
return areAllReplicasFailed(rs)
|
||||
}
|
||||
return getHealthyAndActiveReplicaCount(rs) == 0 && getFailedReplicaCount(rs) > 0
|
||||
// We need to auto-salvage if there are no healthy and active replicas including those marked for deletion,
|
||||
return getHealthyAndActiveReplicaCount(rs, true) == 0 && getFailedReplicaCount(rs) > 0
|
||||
}
|
||||
|
||||
func areAllReplicasFailed(rs map[string]*longhorn.Replica) bool {
|
||||
|
@ -913,7 +914,7 @@ func isFirstAttachment(rs map[string]*longhorn.Replica) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func isHealthyAndActiveReplica(r *longhorn.Replica) bool {
|
||||
func isHealthyAndActiveReplica(r *longhorn.Replica, includeMarkedForDeletion bool) bool {
|
||||
if r.Spec.FailedAt != "" {
|
||||
return false
|
||||
}
|
||||
|
@ -923,6 +924,11 @@ func isHealthyAndActiveReplica(r *longhorn.Replica) bool {
|
|||
if !r.Spec.Active {
|
||||
return false
|
||||
}
|
||||
if !includeMarkedForDeletion {
|
||||
if !r.DeletionTimestamp.IsZero() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -937,7 +943,7 @@ func isHealthyAndActiveReplica(r *longhorn.Replica) bool {
|
|||
// it successfully became read/write in an engine) after spec.LastFailedAt. If the replica does not meet this condition,
|
||||
// it is not "safe as last replica", and we should not clean up the other replicas for its volume.
|
||||
func isSafeAsLastReplica(r *longhorn.Replica) bool {
|
||||
if !isHealthyAndActiveReplica(r) {
|
||||
if !isHealthyAndActiveReplica(r, false) {
|
||||
return false
|
||||
}
|
||||
// We know r.Spec.LastHealthyAt != "" because r.Spec.HealthyAt != "" from isHealthyAndActiveReplica.
|
||||
|
@ -953,10 +959,10 @@ func isSafeAsLastReplica(r *longhorn.Replica) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func getHealthyAndActiveReplicaCount(rs map[string]*longhorn.Replica) int {
|
||||
func getHealthyAndActiveReplicaCount(rs map[string]*longhorn.Replica, includeMarkedForDeletion bool) int {
|
||||
count := 0
|
||||
for _, r := range rs {
|
||||
if isHealthyAndActiveReplica(r) {
|
||||
if isHealthyAndActiveReplica(r, includeMarkedForDeletion) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
@ -1062,7 +1068,7 @@ func (c *VolumeController) cleanupCorruptedOrStaleReplicas(v *longhorn.Volume, r
|
|||
}
|
||||
|
||||
func (c *VolumeController) cleanupFailedToScheduleReplicas(v *longhorn.Volume, rs map[string]*longhorn.Replica) (err error) {
|
||||
healthyCount := getHealthyAndActiveReplicaCount(rs)
|
||||
healthyCount := getHealthyAndActiveReplicaCount(rs, false)
|
||||
var replicasToCleanUp []*longhorn.Replica
|
||||
|
||||
if hasReplicaEvictionRequested(rs) {
|
||||
|
@ -1095,7 +1101,7 @@ func (c *VolumeController) cleanupFailedToScheduleReplicas(v *longhorn.Volume, r
|
|||
}
|
||||
|
||||
func (c *VolumeController) cleanupExtraHealthyReplicas(v *longhorn.Volume, e *longhorn.Engine, rs map[string]*longhorn.Replica) (err error) {
|
||||
healthyCount := getHealthyAndActiveReplicaCount(rs)
|
||||
healthyCount := getHealthyAndActiveReplicaCount(rs, false)
|
||||
if healthyCount <= v.Spec.NumberOfReplicas {
|
||||
return nil
|
||||
}
|
||||
|
@ -1455,13 +1461,10 @@ func (c *VolumeController) ReconcileVolumeState(v *longhorn.Volume, es map[strin
|
|||
return nil
|
||||
}
|
||||
|
||||
// Reattach volume if
|
||||
// - volume is detached unexpectedly and there are still healthy replicas
|
||||
// - engine dead unexpectedly and there are still healthy replicas when the volume is not attached
|
||||
if e.Status.CurrentState == longhorn.InstanceStateError {
|
||||
if v.Status.CurrentNodeID != "" || (v.Spec.NodeID != "" && v.Status.CurrentNodeID == "" && v.Status.State != longhorn.VolumeStateAttached) {
|
||||
log.Warn("Reattaching the volume since engine of volume dead unexpectedly")
|
||||
msg := fmt.Sprintf("Engine of volume %v dead unexpectedly, reattach the volume", v.Name)
|
||||
log.Warn("Engine of volume dead unexpectedly, setting v.Status.Robustness to faulted")
|
||||
msg := fmt.Sprintf("Engine of volume %v dead unexpectedly, setting v.Status.Robustness to faulted", v.Name)
|
||||
c.eventRecorder.Event(v, corev1.EventTypeWarning, constant.EventReasonDetachedUnexpectedly, msg)
|
||||
e.Spec.LogRequested = true
|
||||
for _, r := range rs {
|
||||
|
@ -1745,12 +1748,17 @@ func (c *VolumeController) reconcileVolumeCondition(v *longhorn.Volume, e *longh
|
|||
if r.Spec.NodeID != "" {
|
||||
continue
|
||||
}
|
||||
if v.Spec.DataLocality == longhorn.DataLocalityStrictLocal {
|
||||
switch v.Spec.DataLocality {
|
||||
case longhorn.DataLocalityStrictLocal:
|
||||
if v.Spec.NodeID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
r.Spec.HardNodeAffinity = v.Spec.NodeID
|
||||
case longhorn.DataLocalityBestEffort:
|
||||
// For best-effort locality, wait until the volume is attached to a node before scheduling the replica.
|
||||
if v.Spec.NodeID == "" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
scheduledReplica, multiError, err := c.scheduler.ScheduleReplica(r, rs, v)
|
||||
if err != nil {
|
||||
|
@ -1760,12 +1768,12 @@ func (c *VolumeController) reconcileVolumeCondition(v *longhorn.Volume, e *longh
|
|||
|
||||
if scheduledReplica == nil {
|
||||
if r.Spec.HardNodeAffinity == "" {
|
||||
log.WithField("replica", r.Name).Warn("Failed to schedule replica")
|
||||
log.WithField("replica", r.Name).Debug("Failed to schedule replica")
|
||||
v.Status.Conditions = types.SetCondition(v.Status.Conditions,
|
||||
longhorn.VolumeConditionTypeScheduled, longhorn.ConditionStatusFalse,
|
||||
longhorn.VolumeConditionReasonReplicaSchedulingFailure, "")
|
||||
} else {
|
||||
log.WithField("replica", r.Name).Warnf("Failed to schedule replica of volume with HardNodeAffinity = %v", r.Spec.HardNodeAffinity)
|
||||
log.WithField("replica", r.Name).Debugf("Failed to schedule replica of volume with HardNodeAffinity = %v", r.Spec.HardNodeAffinity)
|
||||
v.Status.Conditions = types.SetCondition(v.Status.Conditions,
|
||||
longhorn.VolumeConditionTypeScheduled, longhorn.ConditionStatusFalse,
|
||||
longhorn.VolumeConditionReasonLocalReplicaSchedulingFailure, "")
|
||||
|
@ -3431,7 +3439,7 @@ func (c *VolumeController) checkAndFinishVolumeRestore(v *longhorn.Volume, e *lo
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get backup name from volume %s backup URL %v", v.Name, v.Spec.FromBackup)
|
||||
}
|
||||
bv, err := c.ds.GetBackupVolumeRO(bvName)
|
||||
bv, err := c.ds.GetBackupVolumeByBackupTargetAndVolumeRO(v.Spec.BackupTargetName, bvName)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -4998,17 +5006,6 @@ func (c *VolumeController) shouldCleanUpFailedReplica(v *longhorn.Volume, r *lon
|
|||
return true
|
||||
}
|
||||
|
||||
if types.IsDataEngineV2(v.Spec.DataEngine) {
|
||||
V2DataEngineFastReplicaRebuilding, err := c.ds.GetSettingAsBool(types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get the setting %v, will consider it as false", types.SettingDefinitionV2DataEngineFastReplicaRebuilding)
|
||||
V2DataEngineFastReplicaRebuilding = false
|
||||
}
|
||||
if !V2DataEngineFastReplicaRebuilding {
|
||||
log.Infof("Failed replica %v should be cleaned up blindly since setting %v is not enabled", r.Name, types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Failed too long ago to be useful during a rebuild.
|
||||
if v.Spec.StaleReplicaTimeout > 0 &&
|
||||
util.TimestampAfterTimeout(r.Spec.FailedAt, time.Duration(v.Spec.StaleReplicaTimeout)*time.Minute) {
|
||||
|
|
|
@ -286,66 +286,73 @@ func (vbc *VolumeRebuildingController) reconcile(volName string) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
isOfflineRebuildEnabled, err := vbc.isVolumeOfflineRebuildEnabled(vol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isOfflineRebuildEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !vol.DeletionTimestamp.IsZero() {
|
||||
vbc.logger.Infof("Volume %v is deleting, skip offline rebuilding", volName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if vol.Status.Robustness == longhorn.VolumeRobustnessFaulted {
|
||||
vbc.logger.Warnf("Volume %v is faulted, skip offline rebuilding", volName)
|
||||
return nil
|
||||
}
|
||||
|
||||
isOfflineRebuildEnabled, err := vbc.isVolumeOfflineRebuildEnabled(vol.Spec.OfflineRebuilding)
|
||||
if err != nil {
|
||||
return err
|
||||
if util.IsHigherPriorityVATicketExisting(va, longhorn.AttacherTypeVolumeRebuildingController) {
|
||||
return nil
|
||||
}
|
||||
if isOfflineRebuildEnabled && !util.IsHigherPriorityVATicketExisting(va, longhorn.AttacherTypeVolumeRebuildingController) {
|
||||
if vol.Status.State == longhorn.VolumeStateDetached {
|
||||
_, err = vbc.syncLHVolumeAttachmentForOfflineRebuild(vol, va, rebuildingAttachmentTicketID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
if vol.Status.State == longhorn.VolumeStateAttaching {
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
|
||||
engine, err := vbc.getVolumeEngine(vol)
|
||||
if vol.Status.State == longhorn.VolumeStateDetached {
|
||||
_, err = vbc.syncLHVolumeAttachmentForOfflineRebuild(vol, va, rebuildingAttachmentTicketID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if engine == nil {
|
||||
vbc.logger.Warnf("Volume %v engine not found, skip offline rebuilding", volName)
|
||||
return nil
|
||||
}
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
if vol.Status.State == longhorn.VolumeStateAttaching {
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
|
||||
if engine.Status.ReplicaModeMap == nil {
|
||||
// wait for engine status synced
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
if vbc.isVolumeReplicasRebuilding(vol, engine) {
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
engine, err := vbc.getVolumeEngine(vol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if engine == nil {
|
||||
vbc.logger.Warnf("Volume %v engine not found, skip offline rebuilding", volName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if engine.Status.ReplicaModeMap == nil {
|
||||
// wait for engine status synced
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
if vbc.isVolumeReplicasRebuilding(vol, engine) {
|
||||
deleteVATicketRequired = false
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vbc *VolumeRebuildingController) isVolumeOfflineRebuildEnabled(offlineRebuilding longhorn.VolumeOfflineRebuilding) (bool, error) {
|
||||
if offlineRebuilding == longhorn.VolumeOfflineRebuildingEnabled {
|
||||
func (vbc *VolumeRebuildingController) isVolumeOfflineRebuildEnabled(vol *longhorn.Volume) (bool, error) {
|
||||
if vol.Spec.OfflineRebuilding == longhorn.VolumeOfflineRebuildingEnabled {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
globalOfflineRebuildingEnabled, err := vbc.ds.GetSettingAsBool(types.SettingNameOfflineReplicaRebuilding)
|
||||
globalOfflineRebuildingEnabled, err := vbc.ds.GetSettingAsBoolByDataEngine(types.SettingNameOfflineReplicaRebuilding, vol.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return globalOfflineRebuildingEnabled && offlineRebuilding != longhorn.VolumeOfflineRebuildingDisabled, nil
|
||||
return globalOfflineRebuildingEnabled && vol.Spec.OfflineRebuilding != longhorn.VolumeOfflineRebuildingDisabled, nil
|
||||
}
|
||||
|
||||
func (vbc *VolumeRebuildingController) syncLHVolumeAttachmentForOfflineRebuild(vol *longhorn.Volume, va *longhorn.VolumeAttachment, attachmentID string) (*longhorn.VolumeAttachment, error) {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
@ -19,12 +20,18 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
||||
longhornclient "github.com/longhorn/longhorn-manager/client"
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -52,9 +59,26 @@ type ControllerServer struct {
|
|||
caps []*csi.ControllerServiceCapability
|
||||
accessModes []*csi.VolumeCapability_AccessMode
|
||||
log *logrus.Entry
|
||||
lhClient lhclientset.Interface
|
||||
lhNamespace string
|
||||
}
|
||||
|
||||
func NewControllerServer(apiClient *longhornclient.RancherClient, nodeID string) *ControllerServer {
|
||||
func NewControllerServer(apiClient *longhornclient.RancherClient, nodeID string) (*ControllerServer, error) {
|
||||
lhNamespace := os.Getenv(types.EnvPodNamespace)
|
||||
if lhNamespace == "" {
|
||||
return nil, fmt.Errorf("failed to detect pod namespace, environment variable %v is missing", types.EnvPodNamespace)
|
||||
}
|
||||
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get client config")
|
||||
}
|
||||
|
||||
lhClient, err := lhclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get longhorn clientset")
|
||||
}
|
||||
|
||||
return &ControllerServer{
|
||||
apiClient: apiClient,
|
||||
nodeID: nodeID,
|
||||
|
@ -65,14 +89,17 @@ func NewControllerServer(apiClient *longhornclient.RancherClient, nodeID string)
|
|||
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
|
||||
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
|
||||
csi.ControllerServiceCapability_RPC_GET_CAPACITY,
|
||||
}),
|
||||
accessModes: getVolumeCapabilityAccessModes(
|
||||
[]csi.VolumeCapability_AccessMode_Mode{
|
||||
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
||||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
}),
|
||||
log: logrus.StandardLogger().WithField("component", "csi-controller-server"),
|
||||
}
|
||||
log: logrus.StandardLogger().WithField("component", "csi-controller-server"),
|
||||
lhClient: lhClient,
|
||||
lhNamespace: lhNamespace,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
|
@ -642,8 +669,118 @@ func (cs *ControllerServer) ListVolumes(context.Context, *csi.ListVolumesRequest
|
|||
return nil, status.Error(codes.Unimplemented, "")
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) GetCapacity(context.Context, *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "")
|
||||
func (cs *ControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
|
||||
log := cs.log.WithFields(logrus.Fields{"function": "GetCapacity"})
|
||||
|
||||
log.Infof("GetCapacity is called with req %+v", req)
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to get capacity")
|
||||
}
|
||||
}()
|
||||
|
||||
scParameters := req.GetParameters()
|
||||
if scParameters == nil {
|
||||
scParameters = map[string]string{}
|
||||
}
|
||||
|
||||
nodeID, err := parseNodeID(req.GetAccessibleTopology())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "failed to parse node id: %v", err)
|
||||
}
|
||||
node, err := cs.lhClient.LonghornV1beta2().Nodes(cs.lhNamespace).Get(ctx, nodeID, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, status.Errorf(codes.NotFound, "node %s not found", nodeID)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "unexpected error: %v", err)
|
||||
}
|
||||
if types.GetCondition(node.Status.Conditions, longhorn.NodeConditionTypeReady).Status != longhorn.ConditionStatusTrue {
|
||||
return &csi.GetCapacityResponse{}, nil
|
||||
}
|
||||
if types.GetCondition(node.Status.Conditions, longhorn.NodeConditionTypeSchedulable).Status != longhorn.ConditionStatusTrue {
|
||||
return &csi.GetCapacityResponse{}, nil
|
||||
}
|
||||
if !node.Spec.AllowScheduling || node.Spec.EvictionRequested {
|
||||
return &csi.GetCapacityResponse{}, nil
|
||||
}
|
||||
|
||||
allowEmptyNodeSelectorVolume, err := cs.getSettingAsBoolean(types.SettingNameAllowEmptyNodeSelectorVolume)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get setting %v: %v", types.SettingNameAllowEmptyNodeSelectorVolume, err)
|
||||
}
|
||||
var nodeSelector []string
|
||||
if nodeSelectorRaw, ok := scParameters["nodeSelector"]; ok && len(nodeSelectorRaw) > 0 {
|
||||
nodeSelector = strings.Split(nodeSelectorRaw, ",")
|
||||
}
|
||||
if !types.IsSelectorsInTags(node.Spec.Tags, nodeSelector, allowEmptyNodeSelectorVolume) {
|
||||
return &csi.GetCapacityResponse{}, nil
|
||||
}
|
||||
|
||||
var diskSelector []string
|
||||
if diskSelectorRaw, ok := scParameters["diskSelector"]; ok && len(diskSelectorRaw) > 0 {
|
||||
diskSelector = strings.Split(diskSelectorRaw, ",")
|
||||
}
|
||||
allowEmptyDiskSelectorVolume, err := cs.getSettingAsBoolean(types.SettingNameAllowEmptyDiskSelectorVolume)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get setting %v: %v", types.SettingNameAllowEmptyDiskSelectorVolume, err)
|
||||
}
|
||||
|
||||
var v1AvailableCapacity int64 = 0
|
||||
var v2AvailableCapacity int64 = 0
|
||||
for diskName, diskStatus := range node.Status.DiskStatus {
|
||||
diskSpec, exists := node.Spec.Disks[diskName]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if !diskSpec.AllowScheduling || diskSpec.EvictionRequested {
|
||||
continue
|
||||
}
|
||||
if types.GetCondition(diskStatus.Conditions, longhorn.DiskConditionTypeSchedulable).Status != longhorn.ConditionStatusTrue {
|
||||
continue
|
||||
}
|
||||
if !types.IsSelectorsInTags(diskSpec.Tags, diskSelector, allowEmptyDiskSelectorVolume) {
|
||||
continue
|
||||
}
|
||||
storageSchedulable := diskStatus.StorageAvailable - diskSpec.StorageReserved
|
||||
if diskStatus.Type == longhorn.DiskTypeFilesystem {
|
||||
v1AvailableCapacity = max(v1AvailableCapacity, storageSchedulable)
|
||||
}
|
||||
if diskStatus.Type == longhorn.DiskTypeBlock {
|
||||
v2AvailableCapacity = max(v2AvailableCapacity, storageSchedulable)
|
||||
}
|
||||
}
|
||||
|
||||
dataEngine, ok := scParameters["dataEngine"]
|
||||
if !ok {
|
||||
return nil, status.Error(codes.InvalidArgument, "storage class parameters missing 'dataEngine' key")
|
||||
}
|
||||
rsp := &csi.GetCapacityResponse{}
|
||||
switch longhorn.DataEngineType(dataEngine) {
|
||||
case longhorn.DataEngineTypeV1:
|
||||
rsp.AvailableCapacity = v1AvailableCapacity
|
||||
case longhorn.DataEngineTypeV2:
|
||||
rsp.AvailableCapacity = v2AvailableCapacity
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unknown data engine type %v", dataEngine)
|
||||
}
|
||||
|
||||
log.Infof("Node: %s, DataEngine: %s, v1AvailableCapacity: %d, v2AvailableCapacity: %d", nodeID, dataEngine, v1AvailableCapacity, v2AvailableCapacity)
|
||||
return rsp, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) getSettingAsBoolean(name types.SettingName) (bool, error) {
|
||||
obj, err := cs.lhClient.LonghornV1beta2().Settings(cs.lhNamespace).Get(context.TODO(), string(name), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
value, err := strconv.ParseBool(obj.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
|
|
|
@ -0,0 +1,341 @@
|
|||
package csi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
lhfake "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned/fake"
|
||||
)
|
||||
|
||||
type disk struct {
|
||||
spec longhorn.DiskSpec
|
||||
status longhorn.DiskStatus
|
||||
}
|
||||
|
||||
func TestGetCapacity(t *testing.T) {
|
||||
cs := &ControllerServer{
|
||||
lhNamespace: "longhorn-system-test",
|
||||
log: logrus.StandardLogger().WithField("component", "test-get-capacity"),
|
||||
}
|
||||
for _, test := range []struct {
|
||||
testName string
|
||||
node *longhorn.Node
|
||||
skipNodeCreation bool
|
||||
skipNodeSettingCreation bool
|
||||
skipDiskSettingCreation bool
|
||||
dataEngine string
|
||||
diskSelector string
|
||||
nodeSelector string
|
||||
availableCapacity int64
|
||||
disks []*disk
|
||||
err error
|
||||
}{
|
||||
{
|
||||
testName: "Node not found",
|
||||
skipNodeCreation: true,
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
err: status.Errorf(codes.NotFound, "node node-0 not found"),
|
||||
},
|
||||
{
|
||||
testName: "Node setting not found",
|
||||
skipNodeSettingCreation: true,
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
err: status.Errorf(codes.Internal, "failed to get setting allow-empty-node-selector-volume: settings.longhorn.io \"allow-empty-node-selector-volume\" not found"),
|
||||
},
|
||||
{
|
||||
testName: "Disk setting not found",
|
||||
skipDiskSettingCreation: true,
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
err: status.Errorf(codes.Internal, "failed to get setting allow-empty-disk-selector-volume: settings.longhorn.io \"allow-empty-disk-selector-volume\" not found"),
|
||||
},
|
||||
{
|
||||
testName: "Missing data engine type",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
err: status.Errorf(codes.InvalidArgument, "storage class parameters missing 'dataEngine' key"),
|
||||
},
|
||||
{
|
||||
testName: "Unknown data engine type",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v5",
|
||||
err: status.Errorf(codes.InvalidArgument, "unknown data engine type v5"),
|
||||
},
|
||||
{
|
||||
testName: "v1 engine with no disks",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v1",
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with no disks",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "Node condition is not ready",
|
||||
node: newNode("node-0", "storage", false, true, true, false),
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "Node condition is not schedulable",
|
||||
node: newNode("node-0", "storage", true, false, true, false),
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "Scheduling not allowed on a node",
|
||||
node: newNode("node-0", "storage", true, true, false, false),
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "Node eviction is requested",
|
||||
node: newNode("node-0", "storage", true, true, true, true),
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "Node tags don't match node selector",
|
||||
node: newNode("node-0", "large,fast,linux", true, true, true, false),
|
||||
nodeSelector: "fast,storage",
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 0,
|
||||
},
|
||||
{
|
||||
testName: "v1 engine with two valid disks",
|
||||
node: newNode("node-0", "storage,large,fast,linux", true, true, true, false),
|
||||
nodeSelector: "fast,storage",
|
||||
dataEngine: "v1",
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false)},
|
||||
availableCapacity: 1150,
|
||||
},
|
||||
{
|
||||
testName: "v1 engine with two valid disks and one with mismatched engine type",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v1",
|
||||
availableCapacity: 1150,
|
||||
disks: []*disk{newDisk(1450, 300, "ssd", false, true, true, false), newDisk(1000, 500, "", false, true, true, false), newDisk(2000, 100, "", true, true, true, false)},
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with two valid disks and one with mismatched engine type",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
availableCapacity: 1650,
|
||||
disks: []*disk{newDisk(1950, 300, "", true, true, true, false), newDisk(1500, 500, "", true, true, true, false), newDisk(2000, 100, "", false, true, true, false)},
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with one valid disk and two with unmatched tags",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
diskSelector: "ssd,fast",
|
||||
availableCapacity: 1000,
|
||||
disks: []*disk{newDisk(1100, 100, "fast,nvmf,ssd,hot", true, true, true, false), newDisk(2500, 500, "ssd,slow,green", true, true, true, false), newDisk(2000, 100, "hdd,fast", true, true, true, false)},
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with one valid disk and one with unhealthy condition",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
availableCapacity: 400,
|
||||
disks: []*disk{newDisk(1100, 100, "ssd", true, false, true, false), newDisk(500, 100, "hdd", true, true, true, false)},
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with one valid disk and one with scheduling disabled",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
availableCapacity: 400,
|
||||
disks: []*disk{newDisk(1100, 100, "ssd", true, true, false, false), newDisk(500, 100, "hdd", true, true, true, false)},
|
||||
},
|
||||
{
|
||||
testName: "v2 engine with one valid disk and one marked for eviction",
|
||||
node: newNode("node-0", "storage", true, true, true, false),
|
||||
dataEngine: "v2",
|
||||
availableCapacity: 400,
|
||||
disks: []*disk{newDisk(1100, 100, "ssd", true, true, true, true), newDisk(500, 100, "hdd", true, true, true, false)},
|
||||
},
|
||||
} {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
cs.lhClient = lhfake.NewSimpleClientset()
|
||||
if !test.skipNodeCreation {
|
||||
addDisksToNode(test.node, test.disks)
|
||||
_, err := cs.lhClient.LonghornV1beta2().Nodes(cs.lhNamespace).Create(context.TODO(), test.node, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Error("failed to create node")
|
||||
}
|
||||
}
|
||||
if !test.skipNodeSettingCreation {
|
||||
_, err := cs.lhClient.LonghornV1beta2().Settings(cs.lhNamespace).Create(context.TODO(), newSetting(string(types.SettingNameAllowEmptyNodeSelectorVolume), "true"), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create setting %v", types.SettingNameAllowEmptyNodeSelectorVolume)
|
||||
}
|
||||
}
|
||||
if !test.skipDiskSettingCreation {
|
||||
_, err := cs.lhClient.LonghornV1beta2().Settings(cs.lhNamespace).Create(context.TODO(), newSetting(string(types.SettingNameAllowEmptyDiskSelectorVolume), "true"), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("failed to create setting %v", types.SettingNameAllowEmptyDiskSelectorVolume)
|
||||
}
|
||||
}
|
||||
|
||||
req := &csi.GetCapacityRequest{
|
||||
AccessibleTopology: &csi.Topology{
|
||||
Segments: map[string]string{
|
||||
nodeTopologyKey: test.node.Name,
|
||||
},
|
||||
},
|
||||
Parameters: map[string]string{},
|
||||
}
|
||||
if test.dataEngine != "" {
|
||||
req.Parameters["dataEngine"] = test.dataEngine
|
||||
}
|
||||
req.Parameters["diskSelector"] = test.diskSelector
|
||||
req.Parameters["nodeSelector"] = test.nodeSelector
|
||||
res, err := cs.GetCapacity(context.TODO(), req)
|
||||
|
||||
expectedStatus := status.Convert(test.err)
|
||||
actualStatus := status.Convert(err)
|
||||
if expectedStatus.Code() != actualStatus.Code() {
|
||||
t.Errorf("expected error code: %v, but got: %v", expectedStatus.Code(), actualStatus.Code())
|
||||
} else if expectedStatus.Message() != actualStatus.Message() {
|
||||
t.Errorf("expected error message: '%s', but got: '%s'", expectedStatus.Message(), actualStatus.Message())
|
||||
}
|
||||
if res != nil && res.AvailableCapacity != test.availableCapacity {
|
||||
t.Errorf("expected available capacity: %d, but got: %d", test.availableCapacity, res.AvailableCapacity)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseNodeID(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
topology *csi.Topology
|
||||
err error
|
||||
nodeID string
|
||||
}{
|
||||
{
|
||||
err: fmt.Errorf("missing accessible topology request parameter"),
|
||||
},
|
||||
{
|
||||
topology: &csi.Topology{
|
||||
Segments: nil,
|
||||
},
|
||||
err: fmt.Errorf("missing accessible topology request parameter"),
|
||||
},
|
||||
{
|
||||
topology: &csi.Topology{
|
||||
Segments: map[string]string{
|
||||
"some-key": "some-value",
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("accessible topology request parameter is missing kubernetes.io/hostname key"),
|
||||
},
|
||||
{
|
||||
topology: &csi.Topology{
|
||||
Segments: map[string]string{
|
||||
nodeTopologyKey: "node-0",
|
||||
},
|
||||
},
|
||||
nodeID: "node-0",
|
||||
},
|
||||
} {
|
||||
nodeID, err := parseNodeID(test.topology)
|
||||
checkError(t, test.err, err)
|
||||
if test.nodeID != nodeID {
|
||||
t.Errorf("expected nodeID: %s, but got: %s", test.nodeID, nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkError(t *testing.T, expected, actual error) {
|
||||
if expected == nil {
|
||||
if actual != nil {
|
||||
t.Errorf("expected no error but got: %v", actual)
|
||||
}
|
||||
} else {
|
||||
if actual == nil {
|
||||
t.Errorf("expected error: %v, but got no error", expected)
|
||||
}
|
||||
if expected.Error() != actual.Error() {
|
||||
t.Errorf("expected error: %v, but got: %v", expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newDisk(storageAvailable, storageReserved int64, tags string, isBlockType, isCondOk, allowScheduling, evictionRequested bool) *disk {
|
||||
disk := &disk{
|
||||
spec: longhorn.DiskSpec{
|
||||
StorageReserved: storageReserved,
|
||||
Tags: strings.Split(tags, ","),
|
||||
AllowScheduling: allowScheduling,
|
||||
EvictionRequested: evictionRequested,
|
||||
},
|
||||
status: longhorn.DiskStatus{
|
||||
StorageAvailable: storageAvailable,
|
||||
Type: longhorn.DiskTypeFilesystem,
|
||||
},
|
||||
}
|
||||
if isBlockType {
|
||||
disk.status.Type = longhorn.DiskTypeBlock
|
||||
}
|
||||
if isCondOk {
|
||||
disk.status.Conditions = []longhorn.Condition{{Type: longhorn.DiskConditionTypeSchedulable, Status: longhorn.ConditionStatusTrue}}
|
||||
}
|
||||
return disk
|
||||
}
|
||||
|
||||
func newNode(name, tags string, isCondReady, isCondSchedulable, allowScheduling, evictionRequested bool) *longhorn.Node {
|
||||
node := &longhorn.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: longhorn.NodeSpec{
|
||||
Disks: map[string]longhorn.DiskSpec{},
|
||||
Tags: strings.Split(tags, ","),
|
||||
AllowScheduling: allowScheduling,
|
||||
EvictionRequested: evictionRequested,
|
||||
},
|
||||
Status: longhorn.NodeStatus{
|
||||
DiskStatus: map[string]*longhorn.DiskStatus{},
|
||||
},
|
||||
}
|
||||
if isCondReady {
|
||||
node.Status.Conditions = append(node.Status.Conditions, longhorn.Condition{Type: longhorn.NodeConditionTypeReady, Status: longhorn.ConditionStatusTrue})
|
||||
}
|
||||
if isCondSchedulable {
|
||||
node.Status.Conditions = append(node.Status.Conditions, longhorn.Condition{Type: longhorn.NodeConditionTypeSchedulable, Status: longhorn.ConditionStatusTrue})
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
func addDisksToNode(node *longhorn.Node, disks []*disk) {
|
||||
for i, disk := range disks {
|
||||
name := fmt.Sprintf("disk-%d", i)
|
||||
node.Spec.Disks[name] = disk.spec
|
||||
node.Status.DiskStatus[name] = &disk.status
|
||||
}
|
||||
}
|
||||
|
||||
func newSetting(name, value string) *longhorn.Setting {
|
||||
return &longhorn.Setting{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Value: value,
|
||||
}
|
||||
}
|
|
@ -117,6 +117,8 @@ func NewProvisionerDeployment(namespace, serviceAccount, provisionerImage, rootD
|
|||
"--leader-election",
|
||||
"--leader-election-namespace=$(POD_NAMESPACE)",
|
||||
"--default-fstype=ext4",
|
||||
"--enable-capacity",
|
||||
"--capacity-ownerref-level=2",
|
||||
fmt.Sprintf("--kube-api-qps=%v", types.KubeAPIQPS),
|
||||
fmt.Sprintf("--kube-api-burst=%v", types.KubeAPIBurst),
|
||||
fmt.Sprintf("--http-endpoint=:%v", types.CSISidecarMetricsPort),
|
||||
|
@ -380,6 +382,18 @@ func NewPluginDeployment(namespace, serviceAccount, nodeDriverRegistrarImage, li
|
|||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
StartupProbe: &corev1.Probe{
|
||||
ProbeHandler: corev1.ProbeHandler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(DefaultCSILivenessProbePort),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: datastore.PodProbeInitialDelay,
|
||||
TimeoutSeconds: datastore.PodProbeTimeoutSeconds,
|
||||
PeriodSeconds: datastore.PodProbePeriodSeconds,
|
||||
FailureThreshold: datastore.PodStartupProbeFailureThreshold,
|
||||
},
|
||||
LivenessProbe: &corev1.Probe{
|
||||
ProbeHandler: corev1.ProbeHandler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
|
@ -579,13 +593,13 @@ type DriverObjectDeployment struct {
|
|||
}
|
||||
|
||||
func NewCSIDriverObject() *DriverObjectDeployment {
|
||||
falseFlag := true
|
||||
obj := &storagev1.CSIDriver{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: types.LonghornDriverName,
|
||||
},
|
||||
Spec: storagev1.CSIDriverSpec{
|
||||
PodInfoOnMount: &falseFlag,
|
||||
PodInfoOnMount: ptr.To(true),
|
||||
StorageCapacity: ptr.To(true),
|
||||
},
|
||||
}
|
||||
return &DriverObjectDeployment{
|
||||
|
|
|
@ -105,6 +105,24 @@ func getCommonDeployment(commonName, namespace, serviceAccount, image, rootDir s
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// required by external-provisioner to set owner references for CSIStorageCapacity objects
|
||||
Name: "NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// required by external-provisioner to set owner references for CSIStorageCapacity objects
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
|
|
|
@ -41,6 +41,13 @@ func (ids *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.G
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: &csi.PluginCapability_Service_{
|
||||
Service: &csi.PluginCapability_Service{
|
||||
Type: csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: &csi.PluginCapability_VolumeExpansion_{
|
||||
VolumeExpansion: &csi.PluginCapability_VolumeExpansion{
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package csi
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
|
@ -13,6 +15,9 @@ type Manager struct {
|
|||
cs *ControllerServer
|
||||
}
|
||||
|
||||
// It can take up to 10s for each try. So total retry time would be 180s
|
||||
const rancherClientInitMaxRetry = 18
|
||||
|
||||
func init() {}
|
||||
|
||||
func GetCSIManager() *Manager {
|
||||
|
@ -24,9 +29,9 @@ func (m *Manager) Run(driverName, nodeID, endpoint, identityVersion, managerURL
|
|||
|
||||
// Longhorn API Client
|
||||
clientOpts := &longhornclient.ClientOpts{Url: managerURL}
|
||||
apiClient, err := longhornclient.NewRancherClient(clientOpts)
|
||||
apiClient, err := initRancherClient(clientOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to initialize Longhorn API client")
|
||||
return err
|
||||
}
|
||||
|
||||
// Create GRPC servers
|
||||
|
@ -36,10 +41,30 @@ func (m *Manager) Run(driverName, nodeID, endpoint, identityVersion, managerURL
|
|||
return errors.Wrap(err, "Failed to create CSI node server ")
|
||||
}
|
||||
|
||||
m.cs = NewControllerServer(apiClient, nodeID)
|
||||
m.cs, err = NewControllerServer(apiClient, nodeID)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create CSI controller server")
|
||||
}
|
||||
|
||||
s := NewNonBlockingGRPCServer()
|
||||
s.Start(endpoint, m.ids, m.cs, m.ns)
|
||||
s.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initRancherClient(clientOpts *longhornclient.ClientOpts) (*longhornclient.RancherClient, error) {
|
||||
var lastErr error
|
||||
|
||||
for i := 0; i < rancherClientInitMaxRetry; i++ {
|
||||
apiClient, err := longhornclient.NewRancherClient(clientOpts)
|
||||
if err == nil {
|
||||
return apiClient, nil
|
||||
}
|
||||
logrus.Warnf("Failed to initialize Longhorn API client %v. Retrying", err)
|
||||
lastErr = err
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(lastErr, "Failed to initialize Longhorn API client")
|
||||
}
|
||||
|
|
|
@ -893,6 +893,11 @@ func (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
|
|||
return &csi.NodeGetInfoResponse{
|
||||
NodeId: ns.nodeID,
|
||||
MaxVolumesPerNode: 0, // technically the scsi kernel limit is the max limit of volumes
|
||||
AccessibleTopology: &csi.Topology{
|
||||
Segments: map[string]string{
|
||||
nodeTopologyKey: ns.nodeID,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
22
csi/util.go
22
csi/util.go
|
@ -22,6 +22,7 @@ import (
|
|||
utilexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
||||
longhornclient "github.com/longhorn/longhorn-manager/client"
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
|
@ -35,6 +36,8 @@ const (
|
|||
defaultForceUmountTimeout = 30 * time.Second
|
||||
|
||||
tempTestMountPointValidStatusFile = ".longhorn-volume-mount-point-test.tmp"
|
||||
|
||||
nodeTopologyKey = "kubernetes.io/hostname"
|
||||
)
|
||||
|
||||
// NewForcedParamsExec creates a osExecutor that allows for adding additional params to later occurring Run calls
|
||||
|
@ -212,6 +215,14 @@ func getVolumeOptions(volumeID string, volOptions map[string]string) (*longhornc
|
|||
vol.BackupTargetName = backupTargetName
|
||||
}
|
||||
|
||||
if backupBlockSize, ok := volOptions["backupBlockSize"]; ok {
|
||||
blockSize, err := util.ConvertSize(backupBlockSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid parameter backupBlockSize")
|
||||
}
|
||||
vol.BackupBlockSize = strconv.FormatInt(blockSize, 10)
|
||||
}
|
||||
|
||||
if dataSource, ok := volOptions["dataSource"]; ok {
|
||||
vol.DataSource = dataSource
|
||||
}
|
||||
|
@ -466,3 +477,14 @@ func requiresSharedAccess(vol *longhornclient.Volume, cap *csi.VolumeCapability)
|
|||
func getStageBlockVolumePath(stagingTargetPath, volumeID string) string {
|
||||
return filepath.Join(stagingTargetPath, volumeID)
|
||||
}
|
||||
|
||||
func parseNodeID(topology *csi.Topology) (string, error) {
|
||||
if topology == nil || topology.Segments == nil {
|
||||
return "", fmt.Errorf("missing accessible topology request parameter")
|
||||
}
|
||||
nodeId, ok := topology.Segments[nodeTopologyKey]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("accessible topology request parameter is missing %s key", nodeTopologyKey)
|
||||
}
|
||||
return nodeId, nil
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ const (
|
|||
PodProbeTimeoutSeconds = PodProbePeriodSeconds - 1
|
||||
PodProbePeriodSeconds = 5
|
||||
PodLivenessProbeFailureThreshold = 3
|
||||
PodStartupProbeFailureThreshold = 36
|
||||
|
||||
IMPodProbeInitialDelay = 3
|
||||
IMPodProbeTimeoutSeconds = IMPodProbePeriodSeconds - 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,8 @@
|
|||
package engineapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
bimapi "github.com/longhorn/backing-image-manager/api"
|
||||
bimclient "github.com/longhorn/backing-image-manager/pkg/client"
|
||||
|
@ -30,7 +31,7 @@ type BackingImageDataSourceClient struct {
|
|||
func NewBackingImageDataSourceClient(ip string) *BackingImageDataSourceClient {
|
||||
return &BackingImageDataSourceClient{
|
||||
bimclient.DataSourceClient{
|
||||
Remote: fmt.Sprintf("%s:%d", ip, BackingImageDataSourceDefaultPort),
|
||||
Remote: net.JoinHostPort(ip, strconv.Itoa(BackingImageDataSourceDefaultPort)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package engineapi
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
bimapi "github.com/longhorn/backing-image-manager/api"
|
||||
bimclient "github.com/longhorn/backing-image-manager/pkg/client"
|
||||
|
@ -45,7 +47,7 @@ func NewBackingImageManagerClient(bim *longhorn.BackingImageManager) (*BackingIm
|
|||
ip: bim.Status.IP,
|
||||
apiMinVersion: bim.Status.APIMinVersion,
|
||||
apiVersion: bim.Status.APIVersion,
|
||||
grpcClient: bimclient.NewBackingImageManagerClient(fmt.Sprintf("%s:%d", bim.Status.IP, BackingImageManagerDefaultPort)),
|
||||
grpcClient: bimclient.NewBackingImageManagerClient(net.JoinHostPort(bim.Status.IP, strconv.Itoa(BackingImageManagerDefaultPort))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -84,7 +86,7 @@ func (c *BackingImageManagerClient) Sync(name, uuid, checksum, fromHost string,
|
|||
if err := CheckBackingImageManagerCompatibility(c.apiMinVersion, c.apiVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.grpcClient.Sync(name, uuid, checksum, fmt.Sprintf("%s:%d", fromHost, BackingImageManagerDefaultPort), size)
|
||||
resp, err := c.grpcClient.Sync(name, uuid, checksum, net.JoinHostPort(fromHost, strconv.Itoa(BackingImageManagerDefaultPort)), size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -334,5 +335,6 @@ func (m *BackupMonitor) Close() {
|
|||
func getBackupParameters(backup *longhorn.Backup) map[string]string {
|
||||
parameters := map[string]string{}
|
||||
parameters[lhbackup.LonghornBackupParameterBackupMode] = string(backup.Spec.BackupMode)
|
||||
parameters[lhbackup.LonghornBackupParameterBackupBlockSize] = strconv.FormatInt(backup.Spec.BackupBlockSize, 10)
|
||||
return parameters
|
||||
}
|
||||
|
|
|
@ -84,3 +84,7 @@ func (s *DiskService) DiskReplicaInstanceDelete(diskType, diskName, diskUUID, di
|
|||
func (s *DiskService) GetInstanceManagerName() string {
|
||||
return s.instanceManagerName
|
||||
}
|
||||
|
||||
func (s *DiskService) MetricsGet(diskType, diskName, diskPath, diskDriver string) (*imapi.DiskMetrics, error) {
|
||||
return s.grpcClient.MetricsGet(diskType, diskName, diskPath, diskDriver)
|
||||
}
|
||||
|
|
|
@ -250,6 +250,11 @@ func (e *EngineBinary) ReplicaRebuildStatus(*longhorn.Engine) (map[string]*longh
|
|||
return data, nil
|
||||
}
|
||||
|
||||
func (e *EngineBinary) ReplicaRebuildQosSet(engine *longhorn.Engine, qosLimitMbps int64) error {
|
||||
// NOTE: Not implemented for EngineBinary (deprecated path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// VolumeFrontendStart calls engine binary
|
||||
// TODO: Deprecated, replaced by gRPC proxy
|
||||
func (e *EngineBinary) VolumeFrontendStart(engine *longhorn.Engine) error {
|
||||
|
|
|
@ -240,6 +240,10 @@ func (e *EngineSimulator) ReplicaRebuildStatus(*longhorn.Engine) (map[string]*lo
|
|||
return nil, errors.New(ErrNotImplement)
|
||||
}
|
||||
|
||||
func (e *EngineSimulator) ReplicaRebuildQosSet(engine *longhorn.Engine, qosLimitMbps int64) error {
|
||||
return errors.New(ErrNotImplement)
|
||||
}
|
||||
|
||||
func (e *EngineSimulator) VolumeFrontendStart(*longhorn.Engine) error {
|
||||
return errors.New(ErrNotImplement)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ const (
|
|||
DefaultReplicaPortCountV1 = 10
|
||||
DefaultReplicaPortCountV2 = 5
|
||||
|
||||
DefaultPortArg = "--listen,0.0.0.0:"
|
||||
DefaultPortArg = "--listen,:"
|
||||
DefaultTerminateSignal = "SIGHUP"
|
||||
|
||||
// IncompatibleInstanceManagerAPIVersion means the instance manager version in v0.7.0
|
||||
|
@ -292,6 +292,7 @@ func parseInstance(p *imapi.Instance) *longhorn.InstanceProcess {
|
|||
TargetPortStart: p.InstanceStatus.TargetPortStart,
|
||||
TargetPortEnd: p.InstanceStatus.TargetPortEnd,
|
||||
UblkID: p.InstanceStatus.UblkID,
|
||||
UUID: p.InstanceStatus.UUID,
|
||||
|
||||
// FIXME: These fields are not used, maybe we can deprecate them later.
|
||||
Listen: "",
|
||||
|
@ -317,6 +318,7 @@ func parseProcess(p *imapi.Process) *longhorn.InstanceProcess {
|
|||
Conditions: p.ProcessStatus.Conditions,
|
||||
PortStart: p.ProcessStatus.PortStart,
|
||||
PortEnd: p.ProcessStatus.PortEnd,
|
||||
UUID: p.ProcessStatus.UUID,
|
||||
|
||||
// FIXME: These fields are not used, maybe we can deprecate them later.
|
||||
Listen: "",
|
||||
|
@ -587,13 +589,13 @@ func (c *InstanceManagerClient) ReplicaInstanceCreate(req *ReplicaInstanceCreate
|
|||
return parseInstance(instance), nil
|
||||
}
|
||||
|
||||
// InstanceDelete deletes the instance
|
||||
func (c *InstanceManagerClient) InstanceDelete(dataEngine longhorn.DataEngineType, name, kind, diskUUID string, cleanupRequired bool) (err error) {
|
||||
// InstanceDelete deletes the instance by name. UUID will be validated if not empty.
|
||||
func (c *InstanceManagerClient) InstanceDelete(dataEngine longhorn.DataEngineType, name, uuid, kind, diskUUID string, cleanupRequired bool) (err error) {
|
||||
if c.GetAPIVersion() < 4 {
|
||||
/* Fall back to the old way of deleting process */
|
||||
_, err = c.processManagerGrpcClient.ProcessDelete(name)
|
||||
_, err = c.processManagerGrpcClient.ProcessDelete(name, uuid)
|
||||
} else {
|
||||
_, err = c.instanceServiceGrpcClient.InstanceDelete(string(dataEngine), name, kind, diskUUID, cleanupRequired)
|
||||
_, err = c.instanceServiceGrpcClient.InstanceDelete(string(dataEngine), name, uuid, kind, diskUUID, cleanupRequired)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
|
@ -51,6 +51,11 @@ func (p *Proxy) ReplicaRebuildStatus(e *longhorn.Engine) (status map[string]*lon
|
|||
return status, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) ReplicaRebuildQosSet(e *longhorn.Engine, qosLimitMbps int64) error {
|
||||
return p.grpcClient.ReplicaRebuildingQosSet(string(e.Spec.DataEngine), e.Name, e.Spec.VolumeName,
|
||||
p.DirectToURL(e), qosLimitMbps)
|
||||
}
|
||||
|
||||
func (p *Proxy) ReplicaRebuildVerify(e *longhorn.Engine, replicaName, url string) (err error) {
|
||||
if err := ValidateReplicaURL(url); err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
package engineapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
|
@ -17,7 +18,7 @@ type ShareManagerClient struct {
|
|||
}
|
||||
|
||||
func NewShareManagerClient(sm *longhorn.ShareManager, pod *corev1.Pod) (*ShareManagerClient, error) {
|
||||
client, err := smclient.NewShareManagerClient(fmt.Sprintf("%s:%d", pod.Status.PodIP, ShareManagerDefaultPort))
|
||||
client, err := smclient.NewShareManagerClient(net.JoinHostPort(pod.Status.PodIP, strconv.Itoa(ShareManagerDefaultPort)))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create Share Manager client for %v", sm.Name)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package engineapi
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -91,6 +92,7 @@ type EngineClient interface {
|
|||
ReplicaAdd(engine *longhorn.Engine, replicaName, url string, isRestoreVolume, fastSync bool, localSync *etypes.FileLocalSync, replicaFileSyncHTTPClientTimeout, grpcTimeoutSeconds int64) error
|
||||
ReplicaRemove(engine *longhorn.Engine, url, replicaName string) error
|
||||
ReplicaRebuildStatus(*longhorn.Engine) (map[string]*longhorn.RebuildStatus, error)
|
||||
ReplicaRebuildQosSet(engine *longhorn.Engine, qosLimitMbps int64) error
|
||||
ReplicaRebuildVerify(engine *longhorn.Engine, replicaName, url string) error
|
||||
ReplicaModeUpdate(engine *longhorn.Engine, url string, mode string) error
|
||||
|
||||
|
@ -310,7 +312,8 @@ func GetEngineEndpoint(volume *Volume, ip string) (string, error) {
|
|||
|
||||
// it will looks like this in the end
|
||||
// iscsi://10.42.0.12:3260/iqn.2014-09.com.rancher:vol-name/1
|
||||
return EndpointISCSIPrefix + ip + ":" + DefaultISCSIPort + "/" + volume.Endpoint + "/" + DefaultISCSILUN, nil
|
||||
formattedIPPort := net.JoinHostPort(ip, DefaultISCSIPort)
|
||||
return EndpointISCSIPrefix + formattedIPPort + "/" + volume.Endpoint + "/" + DefaultISCSILUN, nil
|
||||
case spdkdevtypes.FrontendSPDKTCPNvmf, spdkdevtypes.FrontendSPDKUblk:
|
||||
return volume.Endpoint, nil
|
||||
}
|
||||
|
|
170
go.mod
170
go.mod
|
@ -2,7 +2,7 @@ module github.com/longhorn/longhorn-manager
|
|||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.2
|
||||
toolchain go1.24.6
|
||||
|
||||
// Replace directives are required for dependencies in this section because:
|
||||
// - This module imports k8s.io/kubernetes.
|
||||
|
@ -20,101 +20,106 @@ toolchain go1.24.2
|
|||
// necessary. However, it is better to include all of them for consistency.
|
||||
|
||||
replace (
|
||||
k8s.io/api => k8s.io/api v0.33.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.33.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.33.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.33.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.33.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.33.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.33.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.33.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.33.0
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.33.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.0
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.0
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.33.0
|
||||
k8s.io/externaljwt => k8s.io/externaljwt v0.33.0
|
||||
k8s.io/kms => k8s.io/kms v0.33.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.33.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.33.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.11
|
||||
k8s.io/metrics => k8s.io/metrics v0.33.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.33.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.0
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.0
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.33.0
|
||||
k8s.io/api => k8s.io/api v0.33.3
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.33.3
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.33.3
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.33.3
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.33.3
|
||||
k8s.io/client-go => k8s.io/client-go v0.33.3
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.33.3
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.33.3
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.33.3
|
||||
k8s.io/component-base => k8s.io/component-base v0.33.3
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.33.3
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.33.3
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.33.3
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.33.3
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.33.3
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.33.3
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.33.3
|
||||
k8s.io/externaljwt => k8s.io/externaljwt v0.33.3
|
||||
k8s.io/kms => k8s.io/kms v0.33.3
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.33.3
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.33.3
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.33.3
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.3
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.33.3
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.33.3
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.30.14
|
||||
k8s.io/metrics => k8s.io/metrics v0.33.3
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.33.3
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.3
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.33.3
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.33.3
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.33.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cockroachdb/errors v1.12.0
|
||||
github.com/container-storage-interface/spec v1.11.0
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/go-connections v0.6.0
|
||||
github.com/go-co-op/gocron v1.37.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.21.0
|
||||
github.com/longhorn/backing-image-manager v1.8.1
|
||||
github.com/longhorn/backupstore v0.0.0-20250421031654-0ef762b84472
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250419062810-2bdafe8e7f4e
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250425050615-1de428a1281a
|
||||
github.com/longhorn/go-spdk-helper v0.0.0-20250422073040-bb5fe0ae1d17
|
||||
github.com/longhorn/longhorn-engine v1.9.0-dev-20250223.0.20250225091521-921f63f3a87d
|
||||
github.com/longhorn/longhorn-instance-manager v1.9.0-dev-20250420.0.20250421223422-3c87f4a9aaa5
|
||||
github.com/longhorn/longhorn-share-manager v1.8.1
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.22.0
|
||||
github.com/longhorn/backing-image-manager v1.9.1
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/rancher/dynamiclistener v0.6.2
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/rancher/dynamiclistener v0.7.0
|
||||
github.com/rancher/go-rancher v0.1.1-0.20220412083059-ff12399dd57b
|
||||
github.com/rancher/wrangler/v3 v3.2.0
|
||||
github.com/rancher/wrangler/v3 v3.2.2
|
||||
github.com/robfig/cron v1.2.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli v1.22.16
|
||||
golang.org/x/mod v0.24.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/time v0.11.0
|
||||
google.golang.org/grpc v1.72.0
|
||||
github.com/urfave/cli v1.22.17
|
||||
golang.org/x/mod v0.27.0
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/sys v0.35.0
|
||||
golang.org/x/time v0.12.0
|
||||
google.golang.org/grpc v1.74.2
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.33.0
|
||||
k8s.io/apiextensions-apiserver v0.33.0
|
||||
k8s.io/apimachinery v0.33.0
|
||||
k8s.io/cli-runtime v0.33.0
|
||||
k8s.io/client-go v0.33.0
|
||||
k8s.io/kubernetes v1.33.0
|
||||
k8s.io/metrics v0.33.0
|
||||
k8s.io/mount-utils v0.33.0
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
k8s.io/api v0.33.3
|
||||
k8s.io/apiextensions-apiserver v0.33.3
|
||||
k8s.io/apimachinery v0.33.3
|
||||
k8s.io/cli-runtime v0.33.3
|
||||
k8s.io/client-go v0.33.3
|
||||
k8s.io/kubernetes v1.33.3
|
||||
k8s.io/metrics v0.33.3
|
||||
k8s.io/mount-utils v0.33.3
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/longhorn/types v0.0.0-20250416235128-0c407ad2b792 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
|
@ -127,19 +132,18 @@ require (
|
|||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/gammazero/deque v1.0.0 // indirect
|
||||
github.com/gammazero/workerpool v1.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gorilla/context v1.1.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@ -156,32 +160,32 @@ require (
|
|||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rancher/lasso v0.2.1
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rancher/lasso v0.2.3 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/slok/goresilience v0.2.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/text v0.24.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/text v0.28.0
|
||||
google.golang.org/protobuf v1.36.7
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/apiserver v0.33.0 // indirect
|
||||
k8s.io/component-base v0.33.0 // indirect
|
||||
k8s.io/apiserver v0.33.3 // indirect
|
||||
k8s.io/component-base v0.33.3 // indirect
|
||||
k8s.io/component-helpers v0.33.0 // indirect
|
||||
k8s.io/controller-manager v0.33.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-aggregator v0.32.1 // indirect
|
||||
k8s.io/kube-aggregator v0.33.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/kubelet v0.0.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
|
|
243
go.sum
243
go.sum
|
@ -1,6 +1,6 @@
|
|||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
|
@ -14,10 +14,16 @@ github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK
|
|||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo=
|
||||
github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM=
|
||||
github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
|
@ -27,8 +33,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
|
||||
|
@ -41,10 +47,14 @@ github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34
|
|||
github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo=
|
||||
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
|
||||
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
|
||||
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
|
||||
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0=
|
||||
github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
|
@ -103,30 +113,30 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.21.0 h1:dUN/iIgXLucAxyML2iPyhniIlACQumIeAJmIzsMBddc=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.21.0/go.mod h1:ZCVRTYuup+bwX9tOeE5Q3LDw64QvltSwMUQ3M3g2T+Q=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.22.0 h1:EUAs1+uHGps3OtVj4XVx16urhpI02eu+Z8Vps6plpHY=
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.22.0/go.mod h1:f+PalKyS4Ujsjb9+m6Rj0W6c28y3nfea3paQ/VqjI28=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/longhorn/backing-image-manager v1.8.1 h1:Va/Ncu1YCksUelegLF7HmawgZqt9/lfpLV/Rcbh09O8=
|
||||
github.com/longhorn/backing-image-manager v1.8.1/go.mod h1:SC3vqkxf6ntuMQmZ902IBExROqGy7JSMlqyE4tf4c6o=
|
||||
github.com/longhorn/backupstore v0.0.0-20250421031654-0ef762b84472 h1:nKpF/SLYj9Gw6PJPsuz23OxjHhe7iT8ObIN8NUSZhtE=
|
||||
github.com/longhorn/backupstore v0.0.0-20250421031654-0ef762b84472/go.mod h1:pcMa85HhY/6QuPOMYx/THntdmkxyXa1p1RAOsXFen/8=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250419062810-2bdafe8e7f4e h1:Pw6nJB8C8wB6usM7bSkGhYxbWaa/SNESh1yoFboazuY=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250419062810-2bdafe8e7f4e/go.mod h1:O5c6VGDM2EiYdBmaz9DlVf0o4vvBf1bJaF4m1aQ1kvU=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250425050615-1de428a1281a h1:rvUgmO0v6LTpIqoGn0n3nLFmchHyvoVZ8EePnt4bEqA=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250425050615-1de428a1281a/go.mod h1:6otI70HGdPUKJTRHCU9qlcpFJO6b5LXSrVWy3pciNww=
|
||||
github.com/longhorn/go-spdk-helper v0.0.0-20250422073040-bb5fe0ae1d17 h1:zkhXghEfmRNpyI6qZACc2LvpQqF3VEZF2Z40veEeEZY=
|
||||
github.com/longhorn/go-spdk-helper v0.0.0-20250422073040-bb5fe0ae1d17/go.mod h1:92fHXPUhed51zY2ZzlaNDwN5+p3n/iJgBz4C0Y1ADgc=
|
||||
github.com/longhorn/longhorn-engine v1.9.0-dev-20250223.0.20250225091521-921f63f3a87d h1:JdkTIIaKT0uUU80Ncl26JeAqT727PgPJMaqjf0OUfAE=
|
||||
github.com/longhorn/longhorn-engine v1.9.0-dev-20250223.0.20250225091521-921f63f3a87d/go.mod h1:qTsHK0nU6Myh4XGnuqfsPG4FpeLh7Vki2hLl/1P0iuc=
|
||||
github.com/longhorn/longhorn-instance-manager v1.9.0-dev-20250420.0.20250421223422-3c87f4a9aaa5 h1:T4qxmnrWHtv+GXTradhuf8z+5o9p6W2HvnUrmgRf88s=
|
||||
github.com/longhorn/longhorn-instance-manager v1.9.0-dev-20250420.0.20250421223422-3c87f4a9aaa5/go.mod h1:F/3UgM1ms5B+SfbOY4qWMqYwW+8NNiaVs2FrotTsiJc=
|
||||
github.com/longhorn/longhorn-share-manager v1.8.1 h1:WJkn4kLXWcROTOeZ17zVe6MxwFkzF6KaEKq7ZelobZM=
|
||||
github.com/longhorn/longhorn-share-manager v1.8.1/go.mod h1:3GpCj+P339UeCUsAvinlmq2tn8+qD7yaq6I/El/STfQ=
|
||||
github.com/longhorn/types v0.0.0-20250416235128-0c407ad2b792 h1:+KWYQQKGCBnoy7Our2ryJ9HtUksKf3OPaLVMfsxPzMk=
|
||||
github.com/longhorn/types v0.0.0-20250416235128-0c407ad2b792/go.mod h1:3jHuVDtpkXQzpnp4prguDBskVRric2kmF8aSPkRJ4jw=
|
||||
github.com/longhorn/backing-image-manager v1.9.1 h1:amT5BDkBJnnmlJYfPfA2m0o3zdvArf7e/DSsbgOquX0=
|
||||
github.com/longhorn/backing-image-manager v1.9.1/go.mod h1:a9UGK3bsd1Gj0kbN5tKev5/uaSwjOvoHqZzLqMMqnU0=
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297 h1:KVnOHFT3wuwgyhV7/Rue8NMt13NkpIkZ3B7eVR0C8yM=
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297/go.mod h1:j5TiUyvRBYaSaPY/p6GIFOk1orfWcngk9hIWxDDJ5mg=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942 h1:H9hPMP02ZJSzXa7/0TOG3HQAhieDAGQuqnePSlj+BbQ=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942/go.mod h1:fuYzrb6idZgLrh8yePy6fA+LVB+z5fl4zZbBAU09+0g=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa h1:J0DyOSate7Vf+zlHYB5WrCTWJfshEsSJDp161GjBmhI=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa/go.mod h1:fN9H878mLjAqSbPxEXpOCwvTlt43h+/CZxXrQlX/iMQ=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b h1:IzKSLNxFgDA/5ZtVJnt5CkgAfjyendEJsr2+fRMAa18=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b/go.mod h1:ypwTG96myWDEkea5PxNzzii9CPm/TI8duZwPGxUNsvo=
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139 h1:qeR/Rt/Mmahgzf2Df2m00BLZibYZYs5+iTTvbHFfAXA=
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139/go.mod h1:kl2QVpLZeMoYYSAVOd2IDiP7JeLQFX/fujLA9MdyK6o=
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178 h1:JO7uffDjHufJZZxvXLdoLpIWUl1/QszoZlx9dzCRNKY=
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178/go.mod h1:dLZTouISlm8sUpSDDb4xbnSEbZOBnKCVFMf46Ybpr44=
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1 h1:ObRP8lnNOncRg9podwrPrqObBXJsQDlPfNwslxkBRhM=
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1/go.mod h1:vYqc2o+6xTlgdlweIeED4Do/n+0/4I3AbD6jQ5OHfcg=
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8 h1:NkYbz5Bs+zNW7l3lS9xG9ktUPcNCgmG1tEYzOCk7rdM=
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8/go.mod h1:jbvGQ66V//M9Jp2DC6k+BR74QxSK0Hp/L2FRJ/SBxFA=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
|
@ -155,6 +165,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -164,25 +176,25 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
|||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rancher/dynamiclistener v0.6.2 h1:F0SEJhvO2aFe0eTvKGlQoy5x7HtwK8oJbyITVfBSb90=
|
||||
github.com/rancher/dynamiclistener v0.6.2/go.mod h1:ncmVR7qR8kR1o6xNkTcVS2mZ9WtlljimBilIlNjdyzc=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rancher/dynamiclistener v0.7.0 h1:+jyfZ4lVamc1UbKWo8V8dhSPtCgRZYaY8nm7wiHeko4=
|
||||
github.com/rancher/dynamiclistener v0.7.0/go.mod h1:Q2YA42xp7Xc69JiSlJ8GpvLvze261T0iQ/TP4RdMCYk=
|
||||
github.com/rancher/go-rancher v0.1.1-0.20220412083059-ff12399dd57b h1:so40GMVZOZkQeIbAzaZRq6wDrMErvRLuXNsGTRZUpg8=
|
||||
github.com/rancher/go-rancher v0.1.1-0.20220412083059-ff12399dd57b/go.mod h1:7oQvGNiJsGvrUgB+7AH8bmdzuR0uhULfwKb43Ht0hUk=
|
||||
github.com/rancher/lasso v0.2.1 h1:SZTqMVQn8cAOqvwGBd1/EYOIJ/MGN+UfJrOWvHd4jHU=
|
||||
github.com/rancher/lasso v0.2.1/go.mod h1:KSV3jBXfdXqdCuMm2uC8kKB9q/wuDYb3h0eHZoRjShM=
|
||||
github.com/rancher/wrangler/v3 v3.2.0 h1:fZmhSOczW+pxAhyOaGG+9xbEwETPGA5gbS0x0Im2zWs=
|
||||
github.com/rancher/wrangler/v3 v3.2.0/go.mod h1:0C5QyvSrQOff8gQQzpB/L/FF03EQycjR3unSJcKCHno=
|
||||
github.com/rancher/lasso v0.2.3 h1:74/z/C/O3ykhyMrRuEgc9kVyYiSoS7kp5BAijlcyXDg=
|
||||
github.com/rancher/lasso v0.2.3/go.mod h1:G+KeeOaKRjp+qGp0bV6VbLhYrq1vHbJPbDh40ejg5yE=
|
||||
github.com/rancher/wrangler/v3 v3.2.2 h1:IK1/v8n8gaZSB4izmJhGFXJt38Z8gkbwzl3Lo/e2jQc=
|
||||
github.com/rancher/wrangler/v3 v3.2.2/go.mod h1:TA1QuuQxrtn/kmJbBLW/l24IcfHBmSXBa9an3IRlqQQ=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
|
@ -219,11 +231,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
|
||||
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
|
||||
github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ=
|
||||
github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -232,51 +243,51 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
|
|||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -285,32 +296,32 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
@ -325,42 +336,42 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
|
||||
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
|
||||
k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs=
|
||||
k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc=
|
||||
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
|
||||
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
|
||||
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
|
||||
k8s.io/cli-runtime v0.33.0 h1:Lbl/pq/1o8BaIuyn+aVLdEPHVN665tBAXUePs8wjX7c=
|
||||
k8s.io/cli-runtime v0.33.0/go.mod h1:QcA+r43HeUM9jXFJx7A+yiTPfCooau/iCcP1wQh4NFw=
|
||||
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
|
||||
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
|
||||
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
|
||||
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
|
||||
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
|
||||
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
|
||||
k8s.io/controller-manager v0.33.0 h1:O9LnTjffOe62d66gMcKLuPXsBjY5sqETWEIzg+DVL8w=
|
||||
k8s.io/controller-manager v0.33.0/go.mod h1:vQwAQnroav4+UyE2acW1Rj6CSsHPzr2/018kgRLYqlI=
|
||||
k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8=
|
||||
k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE=
|
||||
k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs=
|
||||
k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8=
|
||||
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
|
||||
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4=
|
||||
k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E=
|
||||
k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA=
|
||||
k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo=
|
||||
k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA=
|
||||
k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg=
|
||||
k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA=
|
||||
k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4=
|
||||
k8s.io/component-helpers v0.33.3 h1:fjWVORSQfI0WKzPeIFSju/gMD9sybwXBJ7oPbqQu6eM=
|
||||
k8s.io/component-helpers v0.33.3/go.mod h1:7iwv+Y9Guw6X4RrnNQOyQlXcvJrVjPveHVqUA5dm31c=
|
||||
k8s.io/controller-manager v0.33.3 h1:OItg5te3ixRw9MFko5KW2ed4ogBbwnJfrS4mCXixbsg=
|
||||
k8s.io/controller-manager v0.33.3/go.mod h1:sH/I5CXliIc+3bnEjdalgSTJ/3fJhIHrDA3sOwTNgxM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-aggregator v0.33.0 h1:jTjEe/DqpJcaPp4x1CjNaMb1XPD+H8SSf/yVpC8coFg=
|
||||
k8s.io/kube-aggregator v0.33.0/go.mod h1:6BRnSnWzh6nWUxjQhNwGP9gMnPfSW0WsFeOZGMHtvZw=
|
||||
k8s.io/kube-aggregator v0.33.3 h1:Pa6hQpKJMX0p0D2wwcxXJgu02++gYcGWXoW1z1ZJDfo=
|
||||
k8s.io/kube-aggregator v0.33.3/go.mod h1:hwvkUoQ8q6gv0+SgNnlmQ3eUue1zHhJKTHsX7BwxwSE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/kubelet v0.33.0 h1:4pJA2Ge6Rp0kDNV76KH7pTBiaV2T1a1874QHMcubuSU=
|
||||
k8s.io/kubelet v0.33.0/go.mod h1:iDnxbJQMy9DUNaML5L/WUlt3uJtNLWh7ZAe0JSp4Yi0=
|
||||
k8s.io/kubernetes v1.33.0 h1:BP5Y5yIzUZVeBuE/ESZvnw6TNxjXbLsCckIkljE+R0U=
|
||||
k8s.io/kubernetes v1.33.0/go.mod h1:2nWuPk0seE4+6sd0x60wQ6rYEXcV7SoeMbU0YbFm/5k=
|
||||
k8s.io/metrics v0.33.0 h1:sKe5sC9qb1RakMhs8LWYNuN2ne6OTCWexj8Jos3rO2Y=
|
||||
k8s.io/metrics v0.33.0/go.mod h1:XewckTFXmE2AJiP7PT3EXaY7hi7bler3t2ZLyOdQYzU=
|
||||
k8s.io/mount-utils v0.33.0 h1:hH6EcCcax4lFNIERaGMj6d7oGMW1qW3eTCwHUuLtLog=
|
||||
k8s.io/mount-utils v0.33.0/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
|
||||
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
||||
k8s.io/kubelet v0.33.3 h1:Cvy8+7Lq9saZds2ib7YBXbKvkMMJu3f5mzucmhSIJno=
|
||||
k8s.io/kubelet v0.33.3/go.mod h1:Q1Cfr6VQq1m9v9XsE/mDmhTxPdN6NPU6Ug0e6mAqi58=
|
||||
k8s.io/kubernetes v1.33.3 h1:dBx5Z2ZhR8kNzAwCoCz4j1niUbUrNUDVxeSj4/Ienu0=
|
||||
k8s.io/kubernetes v1.33.3/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00=
|
||||
k8s.io/metrics v0.33.3 h1:9CcqBz15JZfISqwca33gdHS8I6XfsK1vA8WUdEnG70g=
|
||||
k8s.io/metrics v0.33.3/go.mod h1:Aw+cdg4AYHw0HvUY+lCyq40FOO84awrqvJRTw0cmXDs=
|
||||
k8s.io/mount-utils v0.33.3 h1:Q1jsnqdS4LdtJSYSXgiQv/XNrRHQncLk3gMYjKNSZrE=
|
||||
k8s.io/mount-utils v0.33.3/go.mod h1:1JR4rKymg8B8bCPo618hpSAdrpO6XLh0Acqok/xVwPE=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
|
||||
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
|
|
912
k8s/crds.yaml
912
k8s/crds.yaml
File diff suppressed because it is too large
Load Diff
|
@ -11,7 +11,7 @@ LH_MANAGER_PKG="github.com/longhorn/longhorn-manager"
|
|||
OUTPUT_PKG="${LH_MANAGER_PKG}/k8s/pkg/client"
|
||||
APIS_PATH="k8s/pkg/apis"
|
||||
APIS_DIR="${SCRIPT_ROOT}/${APIS_PATH}"
|
||||
GROUP_VERSION="longhorn:v1beta1,v1beta2"
|
||||
GROUP_VERSION="longhorn:v1beta2"
|
||||
CODE_GENERATOR_VERSION="v0.32.1"
|
||||
CRDS_DIR="crds"
|
||||
CONTROLLER_TOOLS_VERSION="v0.17.1"
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: engines.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: instancemanagers.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: replicas.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: settings.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,11 +0,0 @@
|
|||
# This is one of seven CRDs that were originally apiextensions.k8s.io/v1beta1. If Longhorn and Kubernetes are upgraded
|
||||
# in a particular order from Longhorn v1.0.2- to the latest, preserveUnknownFields may remain true, even though the
|
||||
# default (and intended value) is false.
|
||||
# https://github.com/longhorn/longhorn/discussions/4198
|
||||
# https://github.com/longhorn/longhorn/issues/7887
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
preserveUnknownFields: false
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backingimages.longhorn.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta2","v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: longhorn-system
|
||||
name: longhorn-conversion-webhook
|
||||
path: /v1/webhook/conversion
|
||||
port: 9501
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backuptargets.longhorn.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta2","v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: longhorn-system
|
||||
name: longhorn-conversion-webhook
|
||||
path: /v1/webhook/conversion
|
||||
port: 9501
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: engineimages.longhorn.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta2","v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: longhorn-system
|
||||
name: longhorn-conversion-webhook
|
||||
path: /v1/webhook/conversion
|
||||
port: 9501
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: nodes.longhorn.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta2","v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: longhorn-system
|
||||
name: longhorn-conversion-webhook
|
||||
path: /v1/webhook/conversion
|
||||
port: 9501
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: volumes.longhorn.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta2","v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: longhorn-system
|
||||
name: longhorn-conversion-webhook
|
||||
path: /v1/webhook/conversion
|
||||
port: 9501
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jinzhu/copier"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/conversion"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
)
|
||||
|
||||
// BackingImageDownloadState is replaced by BackingImageState.
|
||||
type BackingImageDownloadState string
|
||||
|
||||
type BackingImageState string
|
||||
|
||||
const (
|
||||
BackingImageStatePending = BackingImageState("pending")
|
||||
BackingImageStateStarting = BackingImageState("starting")
|
||||
BackingImageStateReadyForTransfer = BackingImageState("ready-for-transfer")
|
||||
BackingImageStateReady = BackingImageState("ready")
|
||||
BackingImageStateInProgress = BackingImageState("in-progress")
|
||||
BackingImageStateFailed = BackingImageState("failed")
|
||||
BackingImageStateUnknown = BackingImageState("unknown")
|
||||
)
|
||||
|
||||
type BackingImageDiskFileStatus struct {
|
||||
State BackingImageState `json:"state"`
|
||||
Progress int `json:"progress"`
|
||||
Message string `json:"message"`
|
||||
LastStateTransitionTime string `json:"lastStateTransitionTime"`
|
||||
}
|
||||
|
||||
// BackingImageSpec defines the desired state of the Longhorn backing image
|
||||
type BackingImageSpec struct {
|
||||
Disks map[string]struct{} `json:"disks"`
|
||||
Checksum string `json:"checksum"`
|
||||
SourceType BackingImageDataSourceType `json:"sourceType"`
|
||||
SourceParameters map[string]string `json:"sourceParameters"`
|
||||
|
||||
// Deprecated: This kind of info will be included in the related BackingImageDataSource.
|
||||
ImageURL string `json:"imageURL"`
|
||||
}
|
||||
|
||||
// BackingImageStatus defines the observed state of the Longhorn backing image status
|
||||
type BackingImageStatus struct {
|
||||
OwnerID string `json:"ownerID"`
|
||||
UUID string `json:"uuid"`
|
||||
Size int64 `json:"size"`
|
||||
Checksum string `json:"checksum"`
|
||||
DiskFileStatusMap map[string]*BackingImageDiskFileStatus `json:"diskFileStatusMap"`
|
||||
DiskLastRefAtMap map[string]string `json:"diskLastRefAtMap"`
|
||||
|
||||
// Deprecated: Replaced by field `State` in `DiskFileStatusMap`.
|
||||
DiskDownloadStateMap map[string]BackingImageDownloadState `json:"diskDownloadStateMap"`
|
||||
// Deprecated: Replaced by field `Progress` in `DiskFileStatusMap`.
|
||||
DiskDownloadProgressMap map[string]int `json:"diskDownloadProgressMap"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:resource:shortName=lhbi
|
||||
// +kubebuilder:unservedversion
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion
|
||||
// +kubebuilder:deprecatedversion:warning="longhorn.io/v1beta1 BackingImage is deprecated; use longhorn.io/v1beta2 BackingImage instead"
|
||||
// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image`,description="The backing image name"
|
||||
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
|
||||
|
||||
// BackingImage is where Longhorn stores backing image object.
|
||||
type BackingImage struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// +kubebuilder:validation:Schemaless
|
||||
// +kubebuilder:pruning:PreserveUnknownFields
|
||||
Spec BackingImageSpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:validation:Schemaless
|
||||
// +kubebuilder:pruning:PreserveUnknownFields
|
||||
Status BackingImageStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackingImageList is a list of BackingImages.
|
||||
type BackingImageList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []BackingImage `json:"items"`
|
||||
}
|
||||
|
||||
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
|
||||
func (bi *BackingImage) ConvertTo(dst conversion.Hub) error {
|
||||
switch t := dst.(type) {
|
||||
case *v1beta2.BackingImage:
|
||||
biV1beta2 := dst.(*v1beta2.BackingImage)
|
||||
biV1beta2.ObjectMeta = bi.ObjectMeta
|
||||
if err := copier.Copy(&biV1beta2.Spec, &bi.Spec); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copier.Copy(&biV1beta2.Status, &bi.Status); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy spec.disks from map[string]struct{} to map[string]string
|
||||
biV1beta2.Spec.Disks = make(map[string]string)
|
||||
for name := range bi.Spec.Disks {
|
||||
biV1beta2.Spec.Disks[name] = ""
|
||||
biV1beta2.Spec.DiskFileSpecMap[name] = &v1beta2.BackingImageDiskFileSpec{}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %v", t)
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertFrom converts from hub version (v1beta2) to spoke version (v1beta1)
|
||||
func (bi *BackingImage) ConvertFrom(src conversion.Hub) error {
|
||||
switch t := src.(type) {
|
||||
case *v1beta2.BackingImage:
|
||||
biV1beta2 := src.(*v1beta2.BackingImage)
|
||||
bi.ObjectMeta = biV1beta2.ObjectMeta
|
||||
if err := copier.Copy(&bi.Spec, &biV1beta2.Spec); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copier.Copy(&bi.Status, &biV1beta2.Status); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy spec.disks from map[string]string to map[string]struct{}
|
||||
bi.Spec.Disks = make(map[string]struct{})
|
||||
for name := range biV1beta2.Spec.Disks {
|
||||
bi.Spec.Disks[name] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %v", t)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue