Compare commits
41 Commits
v1.10.0-de
...
master
Author | SHA1 | Date |
---|---|---|
|
a59e184109 | |
|
f78696c38c | |
|
a282cfadb0 | |
|
0be1ae364a | |
|
1d74ce371e | |
|
5e5e16b491 | |
|
1d6c6d6eb8 | |
|
a086a9d539 | |
|
8bb338dcfe | |
|
d4d8329218 | |
|
2d5701ad6d | |
|
1b2811f045 | |
|
95f5703ec2 | |
|
16b59ba7b3 | |
|
bdbefab905 | |
|
03f2e1a0ae | |
|
b20aad87ad | |
|
5b5a7deb2a | |
|
a8a4599ffb | |
|
d34de92bab | |
|
3f5d685d77 | |
|
d39c0fbab5 | |
|
54c82131cf | |
|
55c124d402 | |
|
4d9e300028 | |
|
12e3a848bb | |
|
9ee7981b29 | |
|
ad639ef004 | |
|
f7c5c70e00 | |
|
d9947c944c | |
|
ad9abda739 | |
|
7e5f4ae426 | |
|
ed745de36a | |
|
ec4b8579da | |
|
dfe71fddab | |
|
3b32e54db5 | |
|
3c8aeb9c5b | |
|
9411d653d0 | |
|
5bb438e64f | |
|
ecfe2d6066 | |
|
a9209b7177 |
178
api/model.go
178
api/model.go
|
@ -29,39 +29,41 @@ type Empty struct {
|
|||
type Volume struct {
|
||||
client.Resource
|
||||
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
Frontend longhorn.VolumeFrontend `json:"frontend"`
|
||||
DisableFrontend bool `json:"disableFrontend"`
|
||||
FromBackup string `json:"fromBackup"`
|
||||
RestoreVolumeRecurringJob longhorn.RestoreVolumeRecurringJobType `json:"restoreVolumeRecurringJob"`
|
||||
DataSource longhorn.VolumeDataSource `json:"dataSource"`
|
||||
DataLocality longhorn.DataLocality `json:"dataLocality"`
|
||||
StaleReplicaTimeout int `json:"staleReplicaTimeout"`
|
||||
State longhorn.VolumeState `json:"state"`
|
||||
Robustness longhorn.VolumeRobustness `json:"robustness"`
|
||||
Image string `json:"image"`
|
||||
CurrentImage string `json:"currentImage"`
|
||||
BackingImage string `json:"backingImage"`
|
||||
Created string `json:"created"`
|
||||
LastBackup string `json:"lastBackup"`
|
||||
LastBackupAt string `json:"lastBackupAt"`
|
||||
LastAttachedBy string `json:"lastAttachedBy"`
|
||||
Standby bool `json:"standby"`
|
||||
RestoreRequired bool `json:"restoreRequired"`
|
||||
RestoreInitiated bool `json:"restoreInitiated"`
|
||||
RevisionCounterDisabled bool `json:"revisionCounterDisabled"`
|
||||
SnapshotDataIntegrity longhorn.SnapshotDataIntegrity `json:"snapshotDataIntegrity"`
|
||||
UnmapMarkSnapChainRemoved longhorn.UnmapMarkSnapChainRemoved `json:"unmapMarkSnapChainRemoved"`
|
||||
BackupCompressionMethod longhorn.BackupCompressionMethod `json:"backupCompressionMethod"`
|
||||
ReplicaSoftAntiAffinity longhorn.ReplicaSoftAntiAffinity `json:"replicaSoftAntiAffinity"`
|
||||
ReplicaZoneSoftAntiAffinity longhorn.ReplicaZoneSoftAntiAffinity `json:"replicaZoneSoftAntiAffinity"`
|
||||
ReplicaDiskSoftAntiAffinity longhorn.ReplicaDiskSoftAntiAffinity `json:"replicaDiskSoftAntiAffinity"`
|
||||
DataEngine longhorn.DataEngineType `json:"dataEngine"`
|
||||
SnapshotMaxCount int `json:"snapshotMaxCount"`
|
||||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
FreezeFilesystemForSnapshot longhorn.FreezeFilesystemForSnapshot `json:"freezeFilesystemForSnapshot"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
Frontend longhorn.VolumeFrontend `json:"frontend"`
|
||||
DisableFrontend bool `json:"disableFrontend"`
|
||||
FromBackup string `json:"fromBackup"`
|
||||
RestoreVolumeRecurringJob longhorn.RestoreVolumeRecurringJobType `json:"restoreVolumeRecurringJob"`
|
||||
DataSource longhorn.VolumeDataSource `json:"dataSource"`
|
||||
DataLocality longhorn.DataLocality `json:"dataLocality"`
|
||||
StaleReplicaTimeout int `json:"staleReplicaTimeout"`
|
||||
State longhorn.VolumeState `json:"state"`
|
||||
Robustness longhorn.VolumeRobustness `json:"robustness"`
|
||||
Image string `json:"image"`
|
||||
CurrentImage string `json:"currentImage"`
|
||||
BackingImage string `json:"backingImage"`
|
||||
Created string `json:"created"`
|
||||
LastBackup string `json:"lastBackup"`
|
||||
LastBackupAt string `json:"lastBackupAt"`
|
||||
LastAttachedBy string `json:"lastAttachedBy"`
|
||||
Standby bool `json:"standby"`
|
||||
RestoreRequired bool `json:"restoreRequired"`
|
||||
RestoreInitiated bool `json:"restoreInitiated"`
|
||||
RevisionCounterDisabled bool `json:"revisionCounterDisabled"`
|
||||
SnapshotDataIntegrity longhorn.SnapshotDataIntegrity `json:"snapshotDataIntegrity"`
|
||||
UnmapMarkSnapChainRemoved longhorn.UnmapMarkSnapChainRemoved `json:"unmapMarkSnapChainRemoved"`
|
||||
BackupCompressionMethod longhorn.BackupCompressionMethod `json:"backupCompressionMethod"`
|
||||
BackupBlockSize string `json:"backupBlockSize"`
|
||||
ReplicaSoftAntiAffinity longhorn.ReplicaSoftAntiAffinity `json:"replicaSoftAntiAffinity"`
|
||||
ReplicaZoneSoftAntiAffinity longhorn.ReplicaZoneSoftAntiAffinity `json:"replicaZoneSoftAntiAffinity"`
|
||||
ReplicaDiskSoftAntiAffinity longhorn.ReplicaDiskSoftAntiAffinity `json:"replicaDiskSoftAntiAffinity"`
|
||||
DataEngine longhorn.DataEngineType `json:"dataEngine"`
|
||||
SnapshotMaxCount int `json:"snapshotMaxCount"`
|
||||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
ReplicaRebuildingBandwidthLimit int64 `json:"replicaRebuildingBandwidthLimit"`
|
||||
FreezeFilesystemForSnapshot longhorn.FreezeFilesystemForSnapshot `json:"freezeFilesystemForSnapshot"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
|
||||
DiskSelector []string `json:"diskSelector"`
|
||||
NodeSelector []string `json:"nodeSelector"`
|
||||
|
@ -176,6 +178,7 @@ type Backup struct {
|
|||
NewlyUploadedDataSize string `json:"newlyUploadDataSize"`
|
||||
ReUploadedDataSize string `json:"reUploadedDataSize"`
|
||||
BackupTargetName string `json:"backupTargetName"`
|
||||
BlockSize string `json:"blockSize"`
|
||||
}
|
||||
|
||||
type BackupBackingImage struct {
|
||||
|
@ -249,6 +252,8 @@ type Attachment struct {
|
|||
}
|
||||
|
||||
type VolumeAttachment struct {
|
||||
client.Resource
|
||||
|
||||
Attachments map[string]Attachment `json:"attachments"`
|
||||
Volume string `json:"volume"`
|
||||
}
|
||||
|
@ -368,6 +373,10 @@ type UpdateSnapshotMaxSizeInput struct {
|
|||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
}
|
||||
|
||||
type UpdateReplicaRebuildingBandwidthLimitInput struct {
|
||||
ReplicaRebuildingBandwidthLimit string `json:"replicaRebuildingBandwidthLimit"`
|
||||
}
|
||||
|
||||
type UpdateBackupCompressionMethodInput struct {
|
||||
BackupCompressionMethod string `json:"backupCompressionMethod"`
|
||||
}
|
||||
|
@ -396,6 +405,10 @@ type UpdateSnapshotMaxSize struct {
|
|||
SnapshotMaxSize string `json:"snapshotMaxSize"`
|
||||
}
|
||||
|
||||
type UpdateReplicaRebuildingBandwidthLimit struct {
|
||||
ReplicaRebuildingBandwidthLimit string `json:"replicaRebuildingBandwidthLimit"`
|
||||
}
|
||||
|
||||
type UpdateFreezeFilesystemForSnapshotInput struct {
|
||||
FreezeFilesystemForSnapshot string `json:"freezeFilesystemForSnapshot"`
|
||||
}
|
||||
|
@ -662,6 +675,7 @@ func NewSchema() *client.Schemas {
|
|||
schemas.AddType("UpdateSnapshotDataIntegrityInput", UpdateSnapshotDataIntegrityInput{})
|
||||
schemas.AddType("UpdateSnapshotMaxCountInput", UpdateSnapshotMaxCountInput{})
|
||||
schemas.AddType("UpdateSnapshotMaxSizeInput", UpdateSnapshotMaxSizeInput{})
|
||||
schemas.AddType("UpdateReplicaRebuildingBandwidthLimitInput", UpdateReplicaRebuildingBandwidthLimitInput{})
|
||||
schemas.AddType("UpdateBackupCompressionInput", UpdateBackupCompressionMethodInput{})
|
||||
schemas.AddType("UpdateUnmapMarkSnapChainRemovedInput", UpdateUnmapMarkSnapChainRemovedInput{})
|
||||
schemas.AddType("UpdateReplicaSoftAntiAffinityInput", UpdateReplicaSoftAntiAffinityInput{})
|
||||
|
@ -1093,6 +1107,10 @@ func volumeSchema(volume *client.Schema) {
|
|||
Input: "UpdateSnapshotMaxSizeInput",
|
||||
},
|
||||
|
||||
"updateReplicaRebuildingBandwidthLimit": {
|
||||
Input: "UpdateReplicaRebuildingBandwidthLimitInput",
|
||||
},
|
||||
|
||||
"updateBackupCompressionMethod": {
|
||||
Input: "UpdateBackupCompressionMethodInput",
|
||||
},
|
||||
|
@ -1603,30 +1621,32 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
Actions: map[string]string{},
|
||||
Links: map[string]string{},
|
||||
},
|
||||
Name: v.Name,
|
||||
Size: strconv.FormatInt(v.Spec.Size, 10),
|
||||
Frontend: v.Spec.Frontend,
|
||||
DisableFrontend: v.Spec.DisableFrontend,
|
||||
LastAttachedBy: v.Spec.LastAttachedBy,
|
||||
FromBackup: v.Spec.FromBackup,
|
||||
DataSource: v.Spec.DataSource,
|
||||
NumberOfReplicas: v.Spec.NumberOfReplicas,
|
||||
ReplicaAutoBalance: v.Spec.ReplicaAutoBalance,
|
||||
DataLocality: v.Spec.DataLocality,
|
||||
SnapshotDataIntegrity: v.Spec.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: v.Spec.SnapshotMaxCount,
|
||||
SnapshotMaxSize: strconv.FormatInt(v.Spec.SnapshotMaxSize, 10),
|
||||
BackupCompressionMethod: v.Spec.BackupCompressionMethod,
|
||||
StaleReplicaTimeout: v.Spec.StaleReplicaTimeout,
|
||||
Created: v.CreationTimestamp.String(),
|
||||
Image: v.Spec.Image,
|
||||
BackingImage: v.Spec.BackingImage,
|
||||
Standby: v.Spec.Standby,
|
||||
DiskSelector: v.Spec.DiskSelector,
|
||||
NodeSelector: v.Spec.NodeSelector,
|
||||
RestoreVolumeRecurringJob: v.Spec.RestoreVolumeRecurringJob,
|
||||
FreezeFilesystemForSnapshot: v.Spec.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: v.Spec.BackupTargetName,
|
||||
Name: v.Name,
|
||||
Size: strconv.FormatInt(v.Spec.Size, 10),
|
||||
Frontend: v.Spec.Frontend,
|
||||
DisableFrontend: v.Spec.DisableFrontend,
|
||||
LastAttachedBy: v.Spec.LastAttachedBy,
|
||||
FromBackup: v.Spec.FromBackup,
|
||||
DataSource: v.Spec.DataSource,
|
||||
NumberOfReplicas: v.Spec.NumberOfReplicas,
|
||||
ReplicaAutoBalance: v.Spec.ReplicaAutoBalance,
|
||||
DataLocality: v.Spec.DataLocality,
|
||||
SnapshotDataIntegrity: v.Spec.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: v.Spec.SnapshotMaxCount,
|
||||
SnapshotMaxSize: strconv.FormatInt(v.Spec.SnapshotMaxSize, 10),
|
||||
ReplicaRebuildingBandwidthLimit: v.Spec.ReplicaRebuildingBandwidthLimit,
|
||||
BackupCompressionMethod: v.Spec.BackupCompressionMethod,
|
||||
BackupBlockSize: strconv.FormatInt(v.Spec.BackupBlockSize, 10),
|
||||
StaleReplicaTimeout: v.Spec.StaleReplicaTimeout,
|
||||
Created: v.CreationTimestamp.String(),
|
||||
Image: v.Spec.Image,
|
||||
BackingImage: v.Spec.BackingImage,
|
||||
Standby: v.Spec.Standby,
|
||||
DiskSelector: v.Spec.DiskSelector,
|
||||
NodeSelector: v.Spec.NodeSelector,
|
||||
RestoreVolumeRecurringJob: v.Spec.RestoreVolumeRecurringJob,
|
||||
FreezeFilesystemForSnapshot: v.Spec.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: v.Spec.BackupTargetName,
|
||||
|
||||
State: v.Status.State,
|
||||
Robustness: v.Status.Robustness,
|
||||
|
@ -1699,6 +1719,7 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
actions["updateSnapshotDataIntegrity"] = struct{}{}
|
||||
actions["updateSnapshotMaxCount"] = struct{}{}
|
||||
actions["updateSnapshotMaxSize"] = struct{}{}
|
||||
actions["updateReplicaRebuildingBandwidthLimit"] = struct{}{}
|
||||
actions["updateBackupCompressionMethod"] = struct{}{}
|
||||
actions["updateReplicaSoftAntiAffinity"] = struct{}{}
|
||||
actions["updateReplicaZoneSoftAntiAffinity"] = struct{}{}
|
||||
|
@ -1732,6 +1753,7 @@ func toVolumeResource(v *longhorn.Volume, ves []*longhorn.Engine, vrs []*longhor
|
|||
actions["updateSnapshotDataIntegrity"] = struct{}{}
|
||||
actions["updateSnapshotMaxCount"] = struct{}{}
|
||||
actions["updateSnapshotMaxSize"] = struct{}{}
|
||||
actions["updateReplicaRebuildingBandwidthLimit"] = struct{}{}
|
||||
actions["updateBackupCompressionMethod"] = struct{}{}
|
||||
actions["updateReplicaSoftAntiAffinity"] = struct{}{}
|
||||
actions["updateReplicaZoneSoftAntiAffinity"] = struct{}{}
|
||||
|
@ -2023,6 +2045,7 @@ func toBackupResource(b *longhorn.Backup) *Backup {
|
|||
NewlyUploadedDataSize: b.Status.NewlyUploadedDataSize,
|
||||
ReUploadedDataSize: b.Status.ReUploadedDataSize,
|
||||
BackupTargetName: backupTargetName,
|
||||
BlockSize: strconv.FormatInt(b.Spec.BackupBlockSize, 10),
|
||||
}
|
||||
// Set the volume name from backup CR's label if it's empty.
|
||||
// This field is empty probably because the backup state is not Ready
|
||||
|
@ -2414,6 +2437,47 @@ func toOrphanCollection(orphans map[string]*longhorn.Orphan) *client.GenericColl
|
|||
return &client.GenericCollection{Data: data, Collection: client.Collection{ResourceType: "orphan"}}
|
||||
}
|
||||
|
||||
func toVolumeAttachmentResource(volumeAttachment *longhorn.VolumeAttachment) *VolumeAttachment {
|
||||
attachments := make(map[string]Attachment)
|
||||
|
||||
for ticketName, ticket := range volumeAttachment.Spec.AttachmentTickets {
|
||||
status := volumeAttachment.Status.AttachmentTicketStatuses[ticketName]
|
||||
|
||||
attachment := Attachment{
|
||||
AttachmentID: ticket.ID,
|
||||
AttachmentType: string(ticket.Type),
|
||||
NodeID: ticket.NodeID,
|
||||
Parameters: ticket.Parameters,
|
||||
Satisfied: false,
|
||||
Conditions: nil,
|
||||
}
|
||||
|
||||
if status != nil {
|
||||
attachment.Satisfied = status.Satisfied
|
||||
attachment.Conditions = status.Conditions
|
||||
}
|
||||
|
||||
attachments[ticketName] = attachment
|
||||
}
|
||||
|
||||
return &VolumeAttachment{
|
||||
Resource: client.Resource{
|
||||
Id: volumeAttachment.Name,
|
||||
Type: "volumeAttachment",
|
||||
},
|
||||
Volume: volumeAttachment.Spec.Volume,
|
||||
Attachments: attachments,
|
||||
}
|
||||
}
|
||||
|
||||
func toVolumeAttachmentCollection(attachments []*longhorn.VolumeAttachment, apiContext *api.ApiContext) *client.GenericCollection {
|
||||
data := []interface{}{}
|
||||
for _, attachment := range attachments {
|
||||
data = append(data, toVolumeAttachmentResource(attachment))
|
||||
}
|
||||
return &client.GenericCollection{Data: data, Collection: client.Collection{ResourceType: "volumeAttachment"}}
|
||||
}
|
||||
|
||||
func sliceToMap(conditions []longhorn.Condition) map[string]longhorn.Condition {
|
||||
converted := map[string]longhorn.Condition{}
|
||||
for _, c := range conditions {
|
||||
|
|
|
@ -69,21 +69,22 @@ func NewRouter(s *Server) *mux.Router {
|
|||
r.Methods("DELETE").Path("/v1/volumes/{name}").Handler(f(schemas, s.VolumeDelete))
|
||||
r.Methods("POST").Path("/v1/volumes").Handler(f(schemas, s.fwd.Handler(s.fwd.HandleProxyRequestByNodeID, s.fwd.GetHTTPAddressByNodeID(NodeHasDefaultEngineImage(s.m)), s.VolumeCreate)))
|
||||
volumeActions := map[string]func(http.ResponseWriter, *http.Request) error{
|
||||
"attach": s.VolumeAttach,
|
||||
"detach": s.VolumeDetach,
|
||||
"salvage": s.VolumeSalvage,
|
||||
"updateDataLocality": s.VolumeUpdateDataLocality,
|
||||
"updateAccessMode": s.VolumeUpdateAccessMode,
|
||||
"updateUnmapMarkSnapChainRemoved": s.VolumeUpdateUnmapMarkSnapChainRemoved,
|
||||
"updateSnapshotMaxCount": s.VolumeUpdateSnapshotMaxCount,
|
||||
"updateSnapshotMaxSize": s.VolumeUpdateSnapshotMaxSize,
|
||||
"updateReplicaSoftAntiAffinity": s.VolumeUpdateReplicaSoftAntiAffinity,
|
||||
"updateReplicaZoneSoftAntiAffinity": s.VolumeUpdateReplicaZoneSoftAntiAffinity,
|
||||
"updateReplicaDiskSoftAntiAffinity": s.VolumeUpdateReplicaDiskSoftAntiAffinity,
|
||||
"activate": s.VolumeActivate,
|
||||
"expand": s.VolumeExpand,
|
||||
"cancelExpansion": s.VolumeCancelExpansion,
|
||||
"offlineReplicaRebuilding": s.VolumeOfflineRebuilding,
|
||||
"attach": s.VolumeAttach,
|
||||
"detach": s.VolumeDetach,
|
||||
"salvage": s.VolumeSalvage,
|
||||
"updateDataLocality": s.VolumeUpdateDataLocality,
|
||||
"updateAccessMode": s.VolumeUpdateAccessMode,
|
||||
"updateUnmapMarkSnapChainRemoved": s.VolumeUpdateUnmapMarkSnapChainRemoved,
|
||||
"updateSnapshotMaxCount": s.VolumeUpdateSnapshotMaxCount,
|
||||
"updateSnapshotMaxSize": s.VolumeUpdateSnapshotMaxSize,
|
||||
"updateReplicaRebuildingBandwidthLimit": s.VolumeUpdateReplicaRebuildingBandwidthLimit,
|
||||
"updateReplicaSoftAntiAffinity": s.VolumeUpdateReplicaSoftAntiAffinity,
|
||||
"updateReplicaZoneSoftAntiAffinity": s.VolumeUpdateReplicaZoneSoftAntiAffinity,
|
||||
"updateReplicaDiskSoftAntiAffinity": s.VolumeUpdateReplicaDiskSoftAntiAffinity,
|
||||
"activate": s.VolumeActivate,
|
||||
"expand": s.VolumeExpand,
|
||||
"cancelExpansion": s.VolumeCancelExpansion,
|
||||
"offlineReplicaRebuilding": s.VolumeOfflineRebuilding,
|
||||
|
||||
"updateReplicaCount": s.VolumeUpdateReplicaCount,
|
||||
"updateReplicaAutoBalance": s.VolumeUpdateReplicaAutoBalance,
|
||||
|
@ -291,5 +292,9 @@ func NewRouter(s *Server) *mux.Router {
|
|||
r.Path("/v1/ws/events").Handler(f(schemas, eventListStream))
|
||||
r.Path("/v1/ws/{period}/events").Handler(f(schemas, eventListStream))
|
||||
|
||||
// VolumeAttachment routes
|
||||
r.Methods("GET").Path("/v1/volumeattachments").Handler(f(schemas, s.VolumeAttachmentList))
|
||||
r.Methods("GET").Path("/v1/volumeattachments/{name}").Handler(f(schemas, s.VolumeAttachmentGet))
|
||||
|
||||
return r
|
||||
}
|
||||
|
|
|
@ -172,36 +172,43 @@ func (s *Server) VolumeCreate(rw http.ResponseWriter, req *http.Request) error {
|
|||
return errors.Wrap(err, "failed to parse snapshot max size")
|
||||
}
|
||||
|
||||
backupBlockSize, err := util.ConvertSize(volume.BackupBlockSize)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse backup block size %v", volume.BackupBlockSize)
|
||||
}
|
||||
|
||||
v, err := s.m.Create(volume.Name, &longhorn.VolumeSpec{
|
||||
Size: size,
|
||||
AccessMode: volume.AccessMode,
|
||||
Migratable: volume.Migratable,
|
||||
Encrypted: volume.Encrypted,
|
||||
Frontend: volume.Frontend,
|
||||
FromBackup: volume.FromBackup,
|
||||
RestoreVolumeRecurringJob: volume.RestoreVolumeRecurringJob,
|
||||
DataSource: volume.DataSource,
|
||||
NumberOfReplicas: volume.NumberOfReplicas,
|
||||
ReplicaAutoBalance: volume.ReplicaAutoBalance,
|
||||
DataLocality: volume.DataLocality,
|
||||
StaleReplicaTimeout: volume.StaleReplicaTimeout,
|
||||
BackingImage: volume.BackingImage,
|
||||
Standby: volume.Standby,
|
||||
RevisionCounterDisabled: volume.RevisionCounterDisabled,
|
||||
DiskSelector: volume.DiskSelector,
|
||||
NodeSelector: volume.NodeSelector,
|
||||
SnapshotDataIntegrity: volume.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: volume.SnapshotMaxCount,
|
||||
SnapshotMaxSize: snapshotMaxSize,
|
||||
BackupCompressionMethod: volume.BackupCompressionMethod,
|
||||
UnmapMarkSnapChainRemoved: volume.UnmapMarkSnapChainRemoved,
|
||||
ReplicaSoftAntiAffinity: volume.ReplicaSoftAntiAffinity,
|
||||
ReplicaZoneSoftAntiAffinity: volume.ReplicaZoneSoftAntiAffinity,
|
||||
ReplicaDiskSoftAntiAffinity: volume.ReplicaDiskSoftAntiAffinity,
|
||||
DataEngine: volume.DataEngine,
|
||||
FreezeFilesystemForSnapshot: volume.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: volume.BackupTargetName,
|
||||
OfflineRebuilding: volume.OfflineRebuilding,
|
||||
Size: size,
|
||||
AccessMode: volume.AccessMode,
|
||||
Migratable: volume.Migratable,
|
||||
Encrypted: volume.Encrypted,
|
||||
Frontend: volume.Frontend,
|
||||
FromBackup: volume.FromBackup,
|
||||
RestoreVolumeRecurringJob: volume.RestoreVolumeRecurringJob,
|
||||
DataSource: volume.DataSource,
|
||||
NumberOfReplicas: volume.NumberOfReplicas,
|
||||
ReplicaAutoBalance: volume.ReplicaAutoBalance,
|
||||
DataLocality: volume.DataLocality,
|
||||
StaleReplicaTimeout: volume.StaleReplicaTimeout,
|
||||
BackingImage: volume.BackingImage,
|
||||
Standby: volume.Standby,
|
||||
RevisionCounterDisabled: volume.RevisionCounterDisabled,
|
||||
DiskSelector: volume.DiskSelector,
|
||||
NodeSelector: volume.NodeSelector,
|
||||
SnapshotDataIntegrity: volume.SnapshotDataIntegrity,
|
||||
SnapshotMaxCount: volume.SnapshotMaxCount,
|
||||
SnapshotMaxSize: snapshotMaxSize,
|
||||
ReplicaRebuildingBandwidthLimit: volume.ReplicaRebuildingBandwidthLimit,
|
||||
BackupCompressionMethod: volume.BackupCompressionMethod,
|
||||
BackupBlockSize: backupBlockSize,
|
||||
UnmapMarkSnapChainRemoved: volume.UnmapMarkSnapChainRemoved,
|
||||
ReplicaSoftAntiAffinity: volume.ReplicaSoftAntiAffinity,
|
||||
ReplicaZoneSoftAntiAffinity: volume.ReplicaZoneSoftAntiAffinity,
|
||||
ReplicaDiskSoftAntiAffinity: volume.ReplicaDiskSoftAntiAffinity,
|
||||
DataEngine: volume.DataEngine,
|
||||
FreezeFilesystemForSnapshot: volume.FreezeFilesystemForSnapshot,
|
||||
BackupTargetName: volume.BackupTargetName,
|
||||
OfflineRebuilding: volume.OfflineRebuilding,
|
||||
}, volume.RecurringJobSelector)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create volume")
|
||||
|
@ -839,6 +846,33 @@ func (s *Server) VolumeUpdateSnapshotMaxSize(rw http.ResponseWriter, req *http.R
|
|||
return s.responseWithVolume(rw, req, "", v)
|
||||
}
|
||||
|
||||
func (s *Server) VolumeUpdateReplicaRebuildingBandwidthLimit(rw http.ResponseWriter, req *http.Request) error {
|
||||
var input UpdateReplicaRebuildingBandwidthLimit
|
||||
id := mux.Vars(req)["name"]
|
||||
|
||||
apiContext := api.GetApiContext(req)
|
||||
if err := apiContext.Read(&input); err != nil {
|
||||
return errors.Wrap(err, "failed to read ReplicaRebuildingBandwidthLimit input")
|
||||
}
|
||||
|
||||
replicaRebuildingBandwidthLimit, err := util.ConvertSize(input.ReplicaRebuildingBandwidthLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse replica rebuilding bandwidth limit %v", err)
|
||||
}
|
||||
|
||||
obj, err := util.RetryOnConflictCause(func() (interface{}, error) {
|
||||
return s.m.UpdateReplicaRebuildingBandwidthLimit(id, replicaRebuildingBandwidthLimit)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v, ok := obj.(*longhorn.Volume)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to convert to volume %v object", id)
|
||||
}
|
||||
return s.responseWithVolume(rw, req, "", v)
|
||||
}
|
||||
|
||||
func (s *Server) VolumeUpdateFreezeFilesystemForSnapshot(rw http.ResponseWriter, req *http.Request) error {
|
||||
var input UpdateFreezeFilesystemForSnapshotInput
|
||||
id := mux.Vars(req)["name"]
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/rancher/go-rancher/api"
|
||||
)
|
||||
|
||||
func (s *Server) VolumeAttachmentGet(rw http.ResponseWriter, req *http.Request) error {
|
||||
apiContext := api.GetApiContext(req)
|
||||
|
||||
id := mux.Vars(req)["name"]
|
||||
|
||||
volumeAttachment, err := s.m.GetVolumeAttachment(id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get volume attachment '%s'", id)
|
||||
}
|
||||
apiContext.Write(toVolumeAttachmentResource(volumeAttachment))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) VolumeAttachmentList(rw http.ResponseWriter, req *http.Request) (err error) {
|
||||
apiContext := api.GetApiContext(req)
|
||||
|
||||
volumeAttachmentList, err := s.m.ListVolumeAttachment()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list volume attachments")
|
||||
}
|
||||
apiContext.Write(toVolumeAttachmentCollection(volumeAttachmentList, apiContext))
|
||||
return nil
|
||||
}
|
|
@ -7,8 +7,12 @@ const (
|
|||
type Backup struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backup_mode,omitempty"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
|
||||
BlockSize string `json:"blockSize,omitempty" yaml:"block_size,omitempty"`
|
||||
|
||||
CompressionMethod string `json:"compressionMethod,omitempty" yaml:"compression_method,omitempty"`
|
||||
|
||||
Created string `json:"created,omitempty" yaml:"created,omitempty"`
|
||||
|
@ -21,8 +25,12 @@ type Backup struct {
|
|||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NewlyUploadDataSize string `json:"newlyUploadDataSize,omitempty" yaml:"newly_upload_data_size,omitempty"`
|
||||
|
||||
Progress int64 `json:"progress,omitempty" yaml:"progress,omitempty"`
|
||||
|
||||
ReUploadedDataSize string `json:"reUploadedDataSize,omitempty" yaml:"re_uploaded_data_size,omitempty"`
|
||||
|
||||
Size string `json:"size,omitempty" yaml:"size,omitempty"`
|
||||
|
||||
SnapshotCreated string `json:"snapshotCreated,omitempty" yaml:"snapshot_created,omitempty"`
|
||||
|
|
|
@ -7,6 +7,10 @@ const (
|
|||
type BackupBackingImage struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackingImageName string `json:"backingImageName,omitempty" yaml:"backing_image_name,omitempty"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
|
||||
CompressionMethod string `json:"compressionMethod,omitempty" yaml:"compression_method,omitempty"`
|
||||
|
||||
Created string `json:"created,omitempty" yaml:"created,omitempty"`
|
||||
|
@ -23,7 +27,7 @@ type BackupBackingImage struct {
|
|||
|
||||
Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
|
||||
|
||||
SecretNamespace string `json:"secretNamespace,omitempty" yaml:"secretNamespace,omitempty"`
|
||||
SecretNamespace string `json:"secretNamespace,omitempty" yaml:"secret_namespace,omitempty"`
|
||||
|
||||
Size int64 `json:"size,omitempty" yaml:"size,omitempty"`
|
||||
|
||||
|
|
|
@ -36,6 +36,10 @@ type BackupTargetOperations interface {
|
|||
Update(existing *BackupTarget, updates interface{}) (*BackupTarget, error)
|
||||
ById(id string) (*BackupTarget, error)
|
||||
Delete(container *BackupTarget) error
|
||||
|
||||
ActionBackupTargetSync(*BackupTarget, *SyncBackupResource) (*BackupTargetListOutput, error)
|
||||
|
||||
ActionBackupTargetUpdate(*BackupTarget, *BackupTarget) (*BackupTargetListOutput, error)
|
||||
}
|
||||
|
||||
func newBackupTargetClient(rancherClient *RancherClient) *BackupTargetClient {
|
||||
|
@ -87,3 +91,21 @@ func (c *BackupTargetClient) ById(id string) (*BackupTarget, error) {
|
|||
func (c *BackupTargetClient) Delete(container *BackupTarget) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_TARGET_TYPE, &container.Resource)
|
||||
}
|
||||
|
||||
func (c *BackupTargetClient) ActionBackupTargetSync(resource *BackupTarget, input *SyncBackupResource) (*BackupTargetListOutput, error) {
|
||||
|
||||
resp := &BackupTargetListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_TARGET_TYPE, "backupTargetSync", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetClient) ActionBackupTargetUpdate(resource *BackupTarget, input *BackupTarget) (*BackupTargetListOutput, error) {
|
||||
|
||||
resp := &BackupTargetListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_TARGET_TYPE, "backupTargetUpdate", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
BACKUP_TARGET_LIST_OUTPUT_TYPE = "backupTargetListOutput"
|
||||
)
|
||||
|
||||
type BackupTargetListOutput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Data []BackupTarget `json:"data,omitempty" yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
type BackupTargetListOutputCollection struct {
|
||||
Collection
|
||||
Data []BackupTargetListOutput `json:"data,omitempty"`
|
||||
client *BackupTargetListOutputClient
|
||||
}
|
||||
|
||||
type BackupTargetListOutputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type BackupTargetListOutputOperations interface {
|
||||
List(opts *ListOpts) (*BackupTargetListOutputCollection, error)
|
||||
Create(opts *BackupTargetListOutput) (*BackupTargetListOutput, error)
|
||||
Update(existing *BackupTargetListOutput, updates interface{}) (*BackupTargetListOutput, error)
|
||||
ById(id string) (*BackupTargetListOutput, error)
|
||||
Delete(container *BackupTargetListOutput) error
|
||||
}
|
||||
|
||||
func newBackupTargetListOutputClient(rancherClient *RancherClient) *BackupTargetListOutputClient {
|
||||
return &BackupTargetListOutputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Create(container *BackupTargetListOutput) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doCreate(BACKUP_TARGET_LIST_OUTPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Update(existing *BackupTargetListOutput, updates interface{}) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doUpdate(BACKUP_TARGET_LIST_OUTPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) List(opts *ListOpts) (*BackupTargetListOutputCollection, error) {
|
||||
resp := &BackupTargetListOutputCollection{}
|
||||
err := c.rancherClient.doList(BACKUP_TARGET_LIST_OUTPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *BackupTargetListOutputCollection) Next() (*BackupTargetListOutputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &BackupTargetListOutputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) ById(id string) (*BackupTargetListOutput, error) {
|
||||
resp := &BackupTargetListOutput{}
|
||||
err := c.rancherClient.doById(BACKUP_TARGET_LIST_OUTPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupTargetListOutputClient) Delete(container *BackupTargetListOutput) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_TARGET_LIST_OUTPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -58,6 +58,8 @@ type BackupVolumeOperations interface {
|
|||
ActionBackupList(*BackupVolume) (*BackupListOutput, error)
|
||||
|
||||
ActionBackupListByVolume(*BackupVolume, *Volume) (*BackupListOutput, error)
|
||||
|
||||
ActionBackupVolumeSync(*BackupVolume, *SyncBackupResource) (*BackupVolumeListOutput, error)
|
||||
}
|
||||
|
||||
func newBackupVolumeClient(rancherClient *RancherClient) *BackupVolumeClient {
|
||||
|
@ -145,3 +147,12 @@ func (c *BackupVolumeClient) ActionBackupListByVolume(resource *BackupVolume, in
|
|||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeClient) ActionBackupVolumeSync(resource *BackupVolume, input *SyncBackupResource) (*BackupVolumeListOutput, error) {
|
||||
|
||||
resp := &BackupVolumeListOutput{}
|
||||
|
||||
err := c.rancherClient.doAction(BACKUP_VOLUME_TYPE, "backupVolumeSync", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
BACKUP_VOLUME_LIST_OUTPUT_TYPE = "backupVolumeListOutput"
|
||||
)
|
||||
|
||||
type BackupVolumeListOutput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Data []BackupVolume `json:"data,omitempty" yaml:"data,omitempty"`
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputCollection struct {
|
||||
Collection
|
||||
Data []BackupVolumeListOutput `json:"data,omitempty"`
|
||||
client *BackupVolumeListOutputClient
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type BackupVolumeListOutputOperations interface {
|
||||
List(opts *ListOpts) (*BackupVolumeListOutputCollection, error)
|
||||
Create(opts *BackupVolumeListOutput) (*BackupVolumeListOutput, error)
|
||||
Update(existing *BackupVolumeListOutput, updates interface{}) (*BackupVolumeListOutput, error)
|
||||
ById(id string) (*BackupVolumeListOutput, error)
|
||||
Delete(container *BackupVolumeListOutput) error
|
||||
}
|
||||
|
||||
func newBackupVolumeListOutputClient(rancherClient *RancherClient) *BackupVolumeListOutputClient {
|
||||
return &BackupVolumeListOutputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Create(container *BackupVolumeListOutput) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doCreate(BACKUP_VOLUME_LIST_OUTPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Update(existing *BackupVolumeListOutput, updates interface{}) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doUpdate(BACKUP_VOLUME_LIST_OUTPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) List(opts *ListOpts) (*BackupVolumeListOutputCollection, error) {
|
||||
resp := &BackupVolumeListOutputCollection{}
|
||||
err := c.rancherClient.doList(BACKUP_VOLUME_LIST_OUTPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *BackupVolumeListOutputCollection) Next() (*BackupVolumeListOutputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &BackupVolumeListOutputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) ById(id string) (*BackupVolumeListOutput, error) {
|
||||
resp := &BackupVolumeListOutput{}
|
||||
err := c.rancherClient.doById(BACKUP_VOLUME_LIST_OUTPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *BackupVolumeListOutputClient) Delete(container *BackupVolumeListOutput) error {
|
||||
return c.rancherClient.doResourceDelete(BACKUP_VOLUME_LIST_OUTPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -9,10 +9,10 @@ type RancherClient struct {
|
|||
DetachInput DetachInputOperations
|
||||
SnapshotInput SnapshotInputOperations
|
||||
SnapshotCRInput SnapshotCRInputOperations
|
||||
BackupTarget BackupTargetOperations
|
||||
Backup BackupOperations
|
||||
BackupInput BackupInputOperations
|
||||
BackupStatus BackupStatusOperations
|
||||
SyncBackupResource SyncBackupResourceOperations
|
||||
Orphan OrphanOperations
|
||||
RestoreStatus RestoreStatusOperations
|
||||
PurgeStatus PurgeStatusOperations
|
||||
|
@ -38,6 +38,8 @@ type RancherClient struct {
|
|||
UpdateReplicaZoneSoftAntiAffinityInput UpdateReplicaZoneSoftAntiAffinityInputOperations
|
||||
UpdateReplicaDiskSoftAntiAffinityInput UpdateReplicaDiskSoftAntiAffinityInputOperations
|
||||
UpdateFreezeFSForSnapshotInput UpdateFreezeFSForSnapshotInputOperations
|
||||
UpdateBackupTargetInput UpdateBackupTargetInputOperations
|
||||
UpdateOfflineRebuildingInput UpdateOfflineRebuildingInputOperations
|
||||
WorkloadStatus WorkloadStatusOperations
|
||||
CloneStatus CloneStatusOperations
|
||||
Empty EmptyOperations
|
||||
|
@ -56,13 +58,14 @@ type RancherClient struct {
|
|||
InstanceManager InstanceManagerOperations
|
||||
BackingImageDiskFileStatus BackingImageDiskFileStatusOperations
|
||||
BackingImageCleanupInput BackingImageCleanupInputOperations
|
||||
BackingImageRestoreInput BackingImageRestoreInputOperations
|
||||
UpdateMinNumberOfCopiesInput UpdateMinNumberOfCopiesInputOperations
|
||||
BackingImageRestoreInput BackingImageRestoreInputOperations
|
||||
Attachment AttachmentOperations
|
||||
VolumeAttachment VolumeAttachmentOperations
|
||||
Volume VolumeOperations
|
||||
Snapshot SnapshotOperations
|
||||
SnapshotCR SnapshotCROperations
|
||||
BackupTarget BackupTargetOperations
|
||||
BackupVolume BackupVolumeOperations
|
||||
BackupBackingImage BackupBackingImageOperations
|
||||
Setting SettingOperations
|
||||
|
@ -73,6 +76,8 @@ type RancherClient struct {
|
|||
DiskUpdateInput DiskUpdateInputOperations
|
||||
DiskInfo DiskInfoOperations
|
||||
KubernetesStatus KubernetesStatusOperations
|
||||
BackupTargetListOutput BackupTargetListOutputOperations
|
||||
BackupVolumeListOutput BackupVolumeListOutputOperations
|
||||
BackupListOutput BackupListOutputOperations
|
||||
SnapshotListOutput SnapshotListOutputOperations
|
||||
SystemBackup SystemBackupOperations
|
||||
|
@ -91,10 +96,10 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.DetachInput = newDetachInputClient(client)
|
||||
client.SnapshotInput = newSnapshotInputClient(client)
|
||||
client.SnapshotCRInput = newSnapshotCRInputClient(client)
|
||||
client.BackupTarget = newBackupTargetClient(client)
|
||||
client.Backup = newBackupClient(client)
|
||||
client.BackupInput = newBackupInputClient(client)
|
||||
client.BackupStatus = newBackupStatusClient(client)
|
||||
client.SyncBackupResource = newSyncBackupResourceClient(client)
|
||||
client.Orphan = newOrphanClient(client)
|
||||
client.RestoreStatus = newRestoreStatusClient(client)
|
||||
client.PurgeStatus = newPurgeStatusClient(client)
|
||||
|
@ -120,6 +125,8 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.UpdateReplicaZoneSoftAntiAffinityInput = newUpdateReplicaZoneSoftAntiAffinityInputClient(client)
|
||||
client.UpdateReplicaDiskSoftAntiAffinityInput = newUpdateReplicaDiskSoftAntiAffinityInputClient(client)
|
||||
client.UpdateFreezeFSForSnapshotInput = newUpdateFreezeFSForSnapshotInputClient(client)
|
||||
client.UpdateBackupTargetInput = newUpdateBackupTargetInputClient(client)
|
||||
client.UpdateOfflineRebuildingInput = newUpdateOfflineRebuildingInputClient(client)
|
||||
client.WorkloadStatus = newWorkloadStatusClient(client)
|
||||
client.CloneStatus = newCloneStatusClient(client)
|
||||
client.Empty = newEmptyClient(client)
|
||||
|
@ -145,6 +152,7 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.Volume = newVolumeClient(client)
|
||||
client.Snapshot = newSnapshotClient(client)
|
||||
client.SnapshotCR = newSnapshotCRClient(client)
|
||||
client.BackupTarget = newBackupTargetClient(client)
|
||||
client.BackupVolume = newBackupVolumeClient(client)
|
||||
client.BackupBackingImage = newBackupBackingImageClient(client)
|
||||
client.Setting = newSettingClient(client)
|
||||
|
@ -155,6 +163,8 @@ func constructClient(rancherBaseClient *RancherBaseClientImpl) *RancherClient {
|
|||
client.DiskUpdateInput = newDiskUpdateInputClient(client)
|
||||
client.DiskInfo = newDiskInfoClient(client)
|
||||
client.KubernetesStatus = newKubernetesStatusClient(client)
|
||||
client.BackupTargetListOutput = newBackupTargetListOutputClient(client)
|
||||
client.BackupVolumeListOutput = newBackupVolumeListOutputClient(client)
|
||||
client.BackupListOutput = newBackupListOutputClient(client)
|
||||
client.SnapshotListOutput = newSnapshotListOutputClient(client)
|
||||
client.SystemBackup = newSystemBackupClient(client)
|
||||
|
|
|
@ -7,6 +7,10 @@ const (
|
|||
type CloneStatus struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
AttemptCount int64 `json:"attemptCount,omitempty" yaml:"attempt_count,omitempty"`
|
||||
|
||||
NextAllowedAttemptAt string `json:"nextAllowedAttemptAt,omitempty" yaml:"next_allowed_attempt_at,omitempty"`
|
||||
|
||||
Snapshot string `json:"snapshot,omitempty" yaml:"snapshot,omitempty"`
|
||||
|
||||
SourceVolume string `json:"sourceVolume,omitempty" yaml:"source_volume,omitempty"`
|
||||
|
|
|
@ -11,6 +11,8 @@ type DiskInfo struct {
|
|||
|
||||
Conditions map[string]interface{} `json:"conditions,omitempty" yaml:"conditions,omitempty"`
|
||||
|
||||
DiskDriver string `json:"diskDriver,omitempty" yaml:"disk_driver,omitempty"`
|
||||
|
||||
DiskType string `json:"diskType,omitempty" yaml:"disk_type,omitempty"`
|
||||
|
||||
DiskUUID string `json:"diskUUID,omitempty" yaml:"disk_uuid,omitempty"`
|
||||
|
|
|
@ -9,6 +9,8 @@ type DiskUpdate struct {
|
|||
|
||||
AllowScheduling bool `json:"allowScheduling,omitempty" yaml:"allow_scheduling,omitempty"`
|
||||
|
||||
DiskDriver string `json:"diskDriver,omitempty" yaml:"disk_driver,omitempty"`
|
||||
|
||||
DiskType string `json:"diskType,omitempty" yaml:"disk_type,omitempty"`
|
||||
|
||||
EvictionRequested bool `json:"evictionRequested,omitempty" yaml:"eviction_requested,omitempty"`
|
||||
|
|
|
@ -29,6 +29,8 @@ type EngineImage struct {
|
|||
|
||||
Image string `json:"image,omitempty" yaml:"image,omitempty"`
|
||||
|
||||
Incompatible bool `json:"incompatible,omitempty" yaml:"incompatible,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NoRefSince string `json:"noRefSince,omitempty" yaml:"no_ref_since,omitempty"`
|
||||
|
|
|
@ -7,6 +7,8 @@ const (
|
|||
type Orphan struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
DataEngine string `json:"dataEngine,omitempty" yaml:"data_engine,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
NodeID string `json:"nodeID,omitempty" yaml:"node_id,omitempty"`
|
||||
|
|
|
@ -19,6 +19,10 @@ type RecurringJob struct {
|
|||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
||||
OwnerID string `json:"ownerID,omitempty" yaml:"owner_id,omitempty"`
|
||||
|
||||
Parameters map[string]string `json:"parameters,omitempty" yaml:"parameters,omitempty"`
|
||||
|
||||
Retain int64 `json:"retain,omitempty" yaml:"retain,omitempty"`
|
||||
|
||||
Task string `json:"task,omitempty" yaml:"task,omitempty"`
|
||||
|
|
|
@ -7,6 +7,8 @@ const (
|
|||
type Setting struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
Applied bool `json:"applied,omitempty" yaml:"applied,omitempty"`
|
||||
|
||||
Definition SettingDefinition `json:"definition,omitempty" yaml:"definition,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty" yaml:"name,omitempty"`
|
||||
|
|
|
@ -7,7 +7,7 @@ const (
|
|||
type SnapshotInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backupMode,omitempty"`
|
||||
BackupMode string `json:"backupMode,omitempty" yaml:"backup_mode,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
|
||||
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
SYNC_BACKUP_RESOURCE_TYPE = "syncBackupResource"
|
||||
)
|
||||
|
||||
type SyncBackupResource struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
SyncAllBackupTargets bool `json:"syncAllBackupTargets,omitempty" yaml:"sync_all_backup_targets,omitempty"`
|
||||
|
||||
SyncAllBackupVolumes bool `json:"syncAllBackupVolumes,omitempty" yaml:"sync_all_backup_volumes,omitempty"`
|
||||
|
||||
SyncBackupTarget bool `json:"syncBackupTarget,omitempty" yaml:"sync_backup_target,omitempty"`
|
||||
|
||||
SyncBackupVolume bool `json:"syncBackupVolume,omitempty" yaml:"sync_backup_volume,omitempty"`
|
||||
}
|
||||
|
||||
type SyncBackupResourceCollection struct {
|
||||
Collection
|
||||
Data []SyncBackupResource `json:"data,omitempty"`
|
||||
client *SyncBackupResourceClient
|
||||
}
|
||||
|
||||
type SyncBackupResourceClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type SyncBackupResourceOperations interface {
|
||||
List(opts *ListOpts) (*SyncBackupResourceCollection, error)
|
||||
Create(opts *SyncBackupResource) (*SyncBackupResource, error)
|
||||
Update(existing *SyncBackupResource, updates interface{}) (*SyncBackupResource, error)
|
||||
ById(id string) (*SyncBackupResource, error)
|
||||
Delete(container *SyncBackupResource) error
|
||||
}
|
||||
|
||||
func newSyncBackupResourceClient(rancherClient *RancherClient) *SyncBackupResourceClient {
|
||||
return &SyncBackupResourceClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Create(container *SyncBackupResource) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doCreate(SYNC_BACKUP_RESOURCE_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Update(existing *SyncBackupResource, updates interface{}) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doUpdate(SYNC_BACKUP_RESOURCE_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) List(opts *ListOpts) (*SyncBackupResourceCollection, error) {
|
||||
resp := &SyncBackupResourceCollection{}
|
||||
err := c.rancherClient.doList(SYNC_BACKUP_RESOURCE_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *SyncBackupResourceCollection) Next() (*SyncBackupResourceCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &SyncBackupResourceCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) ById(id string) (*SyncBackupResource, error) {
|
||||
resp := &SyncBackupResource{}
|
||||
err := c.rancherClient.doById(SYNC_BACKUP_RESOURCE_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *SyncBackupResourceClient) Delete(container *SyncBackupResource) error {
|
||||
return c.rancherClient.doResourceDelete(SYNC_BACKUP_RESOURCE_TYPE, &container.Resource)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
UPDATE_BACKUP_TARGET_INPUT_TYPE = "UpdateBackupTargetInput"
|
||||
)
|
||||
|
||||
type UpdateBackupTargetInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
BackupTargetName string `json:"backupTargetName,omitempty" yaml:"backup_target_name,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputCollection struct {
|
||||
Collection
|
||||
Data []UpdateBackupTargetInput `json:"data,omitempty"`
|
||||
client *UpdateBackupTargetInputClient
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type UpdateBackupTargetInputOperations interface {
|
||||
List(opts *ListOpts) (*UpdateBackupTargetInputCollection, error)
|
||||
Create(opts *UpdateBackupTargetInput) (*UpdateBackupTargetInput, error)
|
||||
Update(existing *UpdateBackupTargetInput, updates interface{}) (*UpdateBackupTargetInput, error)
|
||||
ById(id string) (*UpdateBackupTargetInput, error)
|
||||
Delete(container *UpdateBackupTargetInput) error
|
||||
}
|
||||
|
||||
func newUpdateBackupTargetInputClient(rancherClient *RancherClient) *UpdateBackupTargetInputClient {
|
||||
return &UpdateBackupTargetInputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Create(container *UpdateBackupTargetInput) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doCreate(UPDATE_BACKUP_TARGET_INPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Update(existing *UpdateBackupTargetInput, updates interface{}) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doUpdate(UPDATE_BACKUP_TARGET_INPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) List(opts *ListOpts) (*UpdateBackupTargetInputCollection, error) {
|
||||
resp := &UpdateBackupTargetInputCollection{}
|
||||
err := c.rancherClient.doList(UPDATE_BACKUP_TARGET_INPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *UpdateBackupTargetInputCollection) Next() (*UpdateBackupTargetInputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &UpdateBackupTargetInputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) ById(id string) (*UpdateBackupTargetInput, error) {
|
||||
resp := &UpdateBackupTargetInput{}
|
||||
err := c.rancherClient.doById(UPDATE_BACKUP_TARGET_INPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateBackupTargetInputClient) Delete(container *UpdateBackupTargetInput) error {
|
||||
return c.rancherClient.doResourceDelete(UPDATE_BACKUP_TARGET_INPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package client
|
||||
|
||||
const (
|
||||
UPDATE_OFFLINE_REBUILDING_INPUT_TYPE = "UpdateOfflineRebuildingInput"
|
||||
)
|
||||
|
||||
type UpdateOfflineRebuildingInput struct {
|
||||
Resource `yaml:"-"`
|
||||
|
||||
OfflineRebuilding string `json:"offlineRebuilding,omitempty" yaml:"offline_rebuilding,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputCollection struct {
|
||||
Collection
|
||||
Data []UpdateOfflineRebuildingInput `json:"data,omitempty"`
|
||||
client *UpdateOfflineRebuildingInputClient
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputClient struct {
|
||||
rancherClient *RancherClient
|
||||
}
|
||||
|
||||
type UpdateOfflineRebuildingInputOperations interface {
|
||||
List(opts *ListOpts) (*UpdateOfflineRebuildingInputCollection, error)
|
||||
Create(opts *UpdateOfflineRebuildingInput) (*UpdateOfflineRebuildingInput, error)
|
||||
Update(existing *UpdateOfflineRebuildingInput, updates interface{}) (*UpdateOfflineRebuildingInput, error)
|
||||
ById(id string) (*UpdateOfflineRebuildingInput, error)
|
||||
Delete(container *UpdateOfflineRebuildingInput) error
|
||||
}
|
||||
|
||||
func newUpdateOfflineRebuildingInputClient(rancherClient *RancherClient) *UpdateOfflineRebuildingInputClient {
|
||||
return &UpdateOfflineRebuildingInputClient{
|
||||
rancherClient: rancherClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Create(container *UpdateOfflineRebuildingInput) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doCreate(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, container, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Update(existing *UpdateOfflineRebuildingInput, updates interface{}) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doUpdate(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, &existing.Resource, updates, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) List(opts *ListOpts) (*UpdateOfflineRebuildingInputCollection, error) {
|
||||
resp := &UpdateOfflineRebuildingInputCollection{}
|
||||
err := c.rancherClient.doList(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, opts, resp)
|
||||
resp.client = c
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (cc *UpdateOfflineRebuildingInputCollection) Next() (*UpdateOfflineRebuildingInputCollection, error) {
|
||||
if cc != nil && cc.Pagination != nil && cc.Pagination.Next != "" {
|
||||
resp := &UpdateOfflineRebuildingInputCollection{}
|
||||
err := cc.client.rancherClient.doNext(cc.Pagination.Next, resp)
|
||||
resp.client = cc.client
|
||||
return resp, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) ById(id string) (*UpdateOfflineRebuildingInput, error) {
|
||||
resp := &UpdateOfflineRebuildingInput{}
|
||||
err := c.rancherClient.doById(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, id, resp)
|
||||
if apiError, ok := err.(*ApiError); ok {
|
||||
if apiError.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *UpdateOfflineRebuildingInputClient) Delete(container *UpdateOfflineRebuildingInput) error {
|
||||
return c.rancherClient.doResourceDelete(UPDATE_OFFLINE_REBUILDING_INPUT_TYPE, &container.Resource)
|
||||
}
|
|
@ -11,6 +11,8 @@ type Volume struct {
|
|||
|
||||
BackingImage string `json:"backingImage,omitempty" yaml:"backing_image,omitempty"`
|
||||
|
||||
BackupBlockSize string `json:"backupBlockSize,omitempty" yaml:"backup_block_size,omitempty"`
|
||||
|
||||
BackupCompressionMethod string `json:"backupCompressionMethod,omitempty" yaml:"backup_compression_method,omitempty"`
|
||||
|
||||
BackupStatus []BackupStatus `json:"backupStatus,omitempty" yaml:"backup_status,omitempty"`
|
||||
|
@ -141,12 +143,12 @@ type VolumeOperations interface {
|
|||
|
||||
ActionCancelExpansion(*Volume) (*Volume, error)
|
||||
|
||||
ActionOfflineReplicaRebuilding(*Volume) (*Volume, error)
|
||||
|
||||
ActionDetach(*Volume, *DetachInput) (*Volume, error)
|
||||
|
||||
ActionExpand(*Volume, *ExpandInput) (*Volume, error)
|
||||
|
||||
ActionOfflineReplicaRebuilding(*Volume, *UpdateOfflineRebuildingInput) (*Volume, error)
|
||||
|
||||
ActionPvCreate(*Volume, *PVCreateInput) (*Volume, error)
|
||||
|
||||
ActionPvcCreate(*Volume, *PVCCreateInput) (*Volume, error)
|
||||
|
@ -265,15 +267,6 @@ func (c *VolumeClient) ActionCancelExpansion(resource *Volume) (*Volume, error)
|
|||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionOfflineReplicaRebuilding(resource *Volume) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
||||
err := c.rancherClient.doAction(VOLUME_TYPE, "offlineReplicaRebuilding", &resource.Resource, nil, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionDetach(resource *Volume, input *DetachInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
@ -292,6 +285,15 @@ func (c *VolumeClient) ActionExpand(resource *Volume, input *ExpandInput) (*Volu
|
|||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionOfflineReplicaRebuilding(resource *Volume, input *UpdateOfflineRebuildingInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
||||
err := c.rancherClient.doAction(VOLUME_TYPE, "offlineReplicaRebuilding", &resource.Resource, input, resp)
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (c *VolumeClient) ActionPvCreate(resource *Volume, input *PVCreateInput) (*Volume, error) {
|
||||
|
||||
resp := &Volume{}
|
||||
|
|
|
@ -329,6 +329,9 @@ func (c *BackingImageManagerController) syncBackingImageManager(key string) (err
|
|||
bim.Status.CurrentState = longhorn.BackingImageManagerStateUnknown
|
||||
c.updateForUnknownBackingImageManager(bim)
|
||||
}
|
||||
if noReadyDisk {
|
||||
return c.evictMissingDiskBackingImageManager(bim)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -388,6 +391,38 @@ func (c *BackingImageManagerController) cleanupBackingImageManager(bim *longhorn
|
|||
return nil
|
||||
}
|
||||
|
||||
// evictMissingDiskBackingImageManager trigger image manager eviction for missing disks
|
||||
func (c *BackingImageManagerController) evictMissingDiskBackingImageManager(bim *longhorn.BackingImageManager) error {
|
||||
isDiskExist, err := c.ds.IsNodeHasDiskUUID(bim.Spec.NodeID, bim.Spec.DiskUUID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot check if backing image manager %v is serving on a existing disk %v", bim.Name, bim.Spec.DiskUUID)
|
||||
} else if isDiskExist {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Backing image manager is serving on the disk that no longer belongs to any node. Trigger the manager eviction.
|
||||
for imageName := range bim.Spec.BackingImages {
|
||||
bi, getImageErr := c.ds.GetBackingImageRO(imageName)
|
||||
if getImageErr != nil {
|
||||
if datastore.ErrorIsNotFound(getImageErr) {
|
||||
c.logger.Warnf("No corresponding backing image %v for missing disk backing image manager %v", imageName, bim.Name)
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(getImageErr, "failed to get backing image %v for missing disk backing image manager %v", bi.Name, bim.Name)
|
||||
}
|
||||
if bi.Spec.DiskFileSpecMap != nil {
|
||||
if bimDiskFileSpec, exist := bi.Spec.DiskFileSpecMap[bim.Spec.DiskUUID]; exist && !bimDiskFileSpec.EvictionRequested {
|
||||
c.logger.Infof("Evicting backing image manager %v because of missing disk %v", bim.Name, bim.Spec.DiskUUID)
|
||||
bimDiskFileSpec.EvictionRequested = true
|
||||
if _, updateErr := c.ds.UpdateBackingImage(bi); updateErr != nil {
|
||||
return errors.Wrapf(updateErr, "failed to evict missing disk backing image manager %v from backing image %v", bim.Name, bi.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BackingImageManagerController) updateForUnknownBackingImageManager(bim *longhorn.BackingImageManager) {
|
||||
if bim.Status.CurrentState != longhorn.BackingImageManagerStateUnknown {
|
||||
return
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
systembackupstore "github.com/longhorn/backupstore/systembackup"
|
||||
multierr "github.com/longhorn/go-common-libs/multierr"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/engineapi"
|
||||
|
@ -241,28 +242,19 @@ func getLoggerForBackupTarget(logger logrus.FieldLogger, backupTarget *longhorn.
|
|||
)
|
||||
}
|
||||
|
||||
func getAvailableDataEngine(ds *datastore.DataStore) (longhorn.DataEngineType, error) {
|
||||
func getBackupTarget(nodeID string, backupTarget *longhorn.BackupTarget, ds *datastore.DataStore, log logrus.FieldLogger, proxyConnCounter util.Counter) (engineClientProxy engineapi.EngineClientProxy, backupTargetClient *engineapi.BackupTargetClient, err error) {
|
||||
var instanceManager *longhorn.InstanceManager
|
||||
errs := multierr.NewMultiError()
|
||||
dataEngines := ds.GetDataEngines()
|
||||
if len(dataEngines) > 0 {
|
||||
for _, dataEngine := range []longhorn.DataEngineType{longhorn.DataEngineTypeV2, longhorn.DataEngineTypeV1} {
|
||||
if _, ok := dataEngines[dataEngine]; ok {
|
||||
return dataEngine, nil
|
||||
}
|
||||
for dataEngine := range dataEngines {
|
||||
instanceManager, err = ds.GetRunningInstanceManagerByNodeRO(nodeID, dataEngine)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
errs.Append("errors", errors.Wrapf(err, "failed to get running instance manager for node %v and data engine %v", nodeID, dataEngine))
|
||||
}
|
||||
|
||||
return "", errors.New("no data engine available")
|
||||
}
|
||||
|
||||
func getBackupTarget(controllerID string, backupTarget *longhorn.BackupTarget, ds *datastore.DataStore, log logrus.FieldLogger, proxyConnCounter util.Counter) (engineClientProxy engineapi.EngineClientProxy, backupTargetClient *engineapi.BackupTargetClient, err error) {
|
||||
dataEngine, err := getAvailableDataEngine(ds)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to get available data engine for getting backup target")
|
||||
}
|
||||
|
||||
instanceManager, err := ds.GetRunningInstanceManagerByNodeRO(controllerID, dataEngine)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to get running instance manager for proxy client")
|
||||
if instanceManager == nil {
|
||||
return nil, nil, fmt.Errorf("failed to find a running instance manager for node %v: %v", nodeID, errs.Error())
|
||||
}
|
||||
|
||||
engineClientProxy, err = engineapi.NewEngineClientProxy(instanceManager, log, proxyConnCounter, ds)
|
||||
|
@ -466,12 +458,18 @@ func (btc *BackupTargetController) reconcile(name string) (err error) {
|
|||
log.WithError(err).Error("Failed to get info from backup store")
|
||||
return nil // Ignore error to allow status update as well as preventing enqueue
|
||||
}
|
||||
syncTimeRequired = true // Errors beyond this point are NOT backup target related.
|
||||
|
||||
backupTarget.Status.Available = true
|
||||
backupTarget.Status.Conditions = types.SetCondition(backupTarget.Status.Conditions,
|
||||
longhorn.BackupTargetConditionTypeUnavailable, longhorn.ConditionStatusFalse,
|
||||
"", "")
|
||||
if !backupTarget.Status.Available {
|
||||
backupTarget.Status.Available = true
|
||||
backupTarget.Status.Conditions = types.SetCondition(backupTarget.Status.Conditions,
|
||||
longhorn.BackupTargetConditionTypeUnavailable, longhorn.ConditionStatusFalse,
|
||||
"", "")
|
||||
// If the controller can communicate with the remote backup target while "backupTarget.Status.Available" is "false",
|
||||
// Longhorn should update the field to "true" first rather than continuing to fetch info from the target.
|
||||
// related issue: https://github.com/longhorn/longhorn/issues/11337
|
||||
return nil
|
||||
}
|
||||
syncTimeRequired = true // Errors beyond this point are NOT backup target related.
|
||||
|
||||
if err = btc.syncBackupVolume(backupTarget, info.backupStoreBackupVolumeNames, clusterVolumeBVMap, syncTime, log); err != nil {
|
||||
return err
|
||||
|
@ -540,6 +538,14 @@ func (btc *BackupTargetController) getInfoFromBackupStore(backupTarget *longhorn
|
|||
defer engineClientProxy.Close()
|
||||
|
||||
// Get required information using backup target client.
|
||||
// Get SystemBackups first to update the backup target to `available` while minimizing requests to S3.
|
||||
info.backupStoreSystemBackups, err = backupTargetClient.ListSystemBackup()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list system backups in %v", backupTargetClient.URL)
|
||||
}
|
||||
if !backupTarget.Status.Available {
|
||||
return info, nil
|
||||
}
|
||||
info.backupStoreBackupVolumeNames, err = backupTargetClient.BackupVolumeNameList()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list backup volumes in %v", backupTargetClient.URL)
|
||||
|
@ -548,10 +554,6 @@ func (btc *BackupTargetController) getInfoFromBackupStore(backupTarget *longhorn
|
|||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list backup backing images in %v", backupTargetClient.URL)
|
||||
}
|
||||
info.backupStoreSystemBackups, err = backupTargetClient.ListSystemBackup()
|
||||
if err != nil {
|
||||
return backupStoreInfo{}, errors.Wrapf(err, "failed to list system backups in %v", backupTargetClient.URL)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
|
||||
"github.com/longhorn/backupstore"
|
||||
|
||||
lhbackup "github.com/longhorn/go-common-libs/backup"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
@ -321,6 +323,11 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
backupLabelMap := map[string]string{}
|
||||
|
||||
backupURL := backupstore.EncodeBackupURL(backupName, canonicalBVName, backupTargetClient.URL)
|
||||
|
||||
// If the block size is unavailable from legacy remote backup, the size fallback to legacy default value 2MiB.
|
||||
// If the size value is invalid, it still creates a backup with invalid block size, but the volume restoring will be rejected by the volume validator.
|
||||
var blockSize = types.BackupBlockSizeInvalid
|
||||
|
||||
if backupInfo, err := backupTargetClient.BackupGet(backupURL, backupTargetClient.Credential); err != nil && !types.ErrorIsNotFound(err) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"backup": backupName,
|
||||
|
@ -331,6 +338,18 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
if accessMode, exist := backupInfo.Labels[types.GetLonghornLabelKey(types.LonghornLabelVolumeAccessMode)]; exist {
|
||||
backupLabelMap[types.GetLonghornLabelKey(types.LonghornLabelVolumeAccessMode)] = accessMode
|
||||
}
|
||||
backupBlockSizeParam := backupInfo.Parameters[lhbackup.LonghornBackupParameterBackupBlockSize]
|
||||
if blockSizeBytes, convertErr := util.ConvertSize(backupBlockSizeParam); convertErr != nil {
|
||||
log.WithError(convertErr).Warnf("Invalid backup block size string from the remote backup %v: %v", backupName, backupBlockSizeParam)
|
||||
} else if sizeErr := types.ValidateBackupBlockSize(-1, blockSizeBytes); sizeErr != nil {
|
||||
log.WithError(sizeErr).Warnf("Invalid backup block size from the remote backup %v: %v", backupName, backupBlockSizeParam)
|
||||
} else {
|
||||
if blockSizeBytes == 0 {
|
||||
blockSize = types.BackupBlockSize2Mi
|
||||
} else {
|
||||
blockSize = blockSizeBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +362,8 @@ func (bvc *BackupVolumeController) reconcile(backupVolumeName string) (err error
|
|||
OwnerReferences: datastore.GetOwnerReferencesForBackupVolume(backupVolume),
|
||||
},
|
||||
Spec: longhorn.BackupSpec{
|
||||
Labels: backupLabelMap,
|
||||
Labels: backupLabelMap,
|
||||
BackupBlockSize: blockSize,
|
||||
},
|
||||
}
|
||||
if _, err = bvc.ds.CreateBackup(backup, canonicalBVName); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -259,31 +258,18 @@ func GetInstanceManagerCPURequirement(ds *datastore.DataStore, imName string) (*
|
|||
|
||||
cpuRequest := 0
|
||||
switch im.Spec.DataEngine {
|
||||
case longhorn.DataEngineTypeV1:
|
||||
case longhorn.DataEngineTypeV1, longhorn.DataEngineTypeV2:
|
||||
// TODO: Currently lhNode.Spec.InstanceManagerCPURequest is applied to both v1 and v2 data engines.
|
||||
// In the future, we may want to support different CPU requests for them.
|
||||
cpuRequest = lhNode.Spec.InstanceManagerCPURequest
|
||||
if cpuRequest == 0 {
|
||||
guaranteedCPUSetting, err := ds.GetSettingWithAutoFillingRO(types.SettingNameGuaranteedInstanceManagerCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
guaranteedCPUPercentage, err := strconv.ParseFloat(guaranteedCPUSetting.Value, 64)
|
||||
guaranteedCPUPercentage, err := ds.GetSettingAsFloatByDataEngine(types.SettingNameGuaranteedInstanceManagerCPU, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allocatableMilliCPU := float64(kubeNode.Status.Allocatable.Cpu().MilliValue())
|
||||
cpuRequest = int(math.Round(allocatableMilliCPU * guaranteedCPUPercentage / 100.0))
|
||||
}
|
||||
case longhorn.DataEngineTypeV2:
|
||||
// TODO: Support CPU request per node for v2 volumes
|
||||
guaranteedCPUSetting, err := ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
guaranteedCPURequest, err := strconv.ParseFloat(guaranteedCPUSetting.Value, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuRequest = int(guaranteedCPURequest)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown data engine %v", im.Spec.DataEngine)
|
||||
}
|
||||
|
|
|
@ -467,7 +467,7 @@ func (ec *EngineController) CreateInstance(obj interface{}) (*longhorn.InstanceP
|
|||
}
|
||||
}(c)
|
||||
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsInt(types.SettingNameEngineReplicaTimeout)
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsIntByDataEngine(types.SettingNameEngineReplicaTimeout, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -494,6 +494,11 @@ func (ec *EngineController) CreateInstance(obj interface{}) (*longhorn.InstanceP
|
|||
|
||||
instanceManagerStorageIP := ec.ds.GetStorageIPFromPod(instanceManagerPod)
|
||||
|
||||
e.Status.Starting = true
|
||||
if e, err = ec.ds.UpdateEngineStatus(e); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update engine %v status.starting to true before sending instance create request", e.Name)
|
||||
}
|
||||
|
||||
return c.EngineInstanceCreate(&engineapi.EngineInstanceCreateRequest{
|
||||
Engine: e,
|
||||
VolumeFrontend: frontend,
|
||||
|
@ -1172,7 +1177,11 @@ func (m *EngineMonitor) checkAndApplyRebuildQoS(engine *longhorn.Engine, engineC
|
|||
}
|
||||
|
||||
func (m *EngineMonitor) getEffectiveRebuildQoS(engine *longhorn.Engine) (int64, error) {
|
||||
globalQoS, err := m.ds.GetSettingAsInt(types.SettingNameReplicaRebuildingBandwidthLimit)
|
||||
if types.IsDataEngineV1(engine.Spec.DataEngine) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
globalQoS, err := m.ds.GetSettingAsIntByDataEngine(types.SettingNameReplicaRebuildingBandwidthLimit, engine.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -1505,7 +1514,7 @@ func (m *EngineMonitor) restoreBackup(engine *longhorn.Engine, rsMap map[string]
|
|||
|
||||
backupTargetClient, err := newBackupTargetClientFromDefaultEngineImage(m.ds, backupTarget)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot get backup target config for backup restoration of engine %v", engine.Name)
|
||||
return errors.Wrapf(err, "failed to get backup target client for backup restoration of engine %v", engine.Name)
|
||||
}
|
||||
|
||||
mlog := m.logger.WithFields(logrus.Fields{
|
||||
|
@ -1517,7 +1526,8 @@ func (m *EngineMonitor) restoreBackup(engine *longhorn.Engine, rsMap map[string]
|
|||
|
||||
concurrentLimit, err := m.ds.GetSettingAsInt(types.SettingNameRestoreConcurrentLimit)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to assert %v value", types.SettingNameRestoreConcurrentLimit)
|
||||
return errors.Wrapf(err, "failed to get %v setting for backup restoration of engine %v",
|
||||
types.SettingNameRestoreConcurrentLimit, engine.Name)
|
||||
}
|
||||
|
||||
mlog.Info("Restoring backup")
|
||||
|
@ -1811,13 +1821,13 @@ func (ec *EngineController) startRebuilding(e *longhorn.Engine, replicaName, add
|
|||
go func() {
|
||||
autoCleanupSystemGeneratedSnapshot, err := ec.ds.GetSettingAsBool(types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingDefinitionAutoCleanupSystemGeneratedSnapshot)
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
return
|
||||
}
|
||||
|
||||
fastReplicaRebuild, err := ec.ds.GetSettingAsBool(types.SettingNameFastReplicaRebuildEnabled)
|
||||
fastReplicaRebuild, err := ec.ds.GetSettingAsBoolByDataEngine(types.SettingNameFastReplicaRebuildEnabled, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to get %v setting", types.SettingNameFastReplicaRebuildEnabled)
|
||||
log.WithError(err).Errorf("Failed to get %v setting for data engine %v", types.SettingNameFastReplicaRebuildEnabled, e.Spec.DataEngine)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2218,7 +2228,7 @@ func (ec *EngineController) UpgradeEngineInstance(e *longhorn.Engine, log *logru
|
|||
}
|
||||
}(c)
|
||||
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsInt(types.SettingNameEngineReplicaTimeout)
|
||||
engineReplicaTimeout, err := ec.ds.GetSettingAsIntByDataEngine(types.SettingNameEngineReplicaTimeout, e.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -286,6 +286,13 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
|
||||
log := logrus.WithFields(logrus.Fields{"instance": instanceName, "volumeName": spec.VolumeName, "dataEngine": spec.DataEngine, "specNodeID": spec.NodeID})
|
||||
|
||||
stateBeforeReconcile := status.CurrentState
|
||||
defer func() {
|
||||
if stateBeforeReconcile != status.CurrentState {
|
||||
log.Infof("Instance %v state is updated from %v to %v", instanceName, stateBeforeReconcile, status.CurrentState)
|
||||
}
|
||||
}()
|
||||
|
||||
var im *longhorn.InstanceManager
|
||||
if status.InstanceManagerName != "" {
|
||||
im, err = h.ds.GetInstanceManagerRO(status.InstanceManagerName)
|
||||
|
@ -361,6 +368,7 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
|
||||
if i, exists := instances[instanceName]; exists && i.Status.State == longhorn.InstanceStateRunning {
|
||||
status.Started = true
|
||||
status.Starting = false
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -381,18 +389,24 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
}
|
||||
|
||||
case longhorn.InstanceStateStopped:
|
||||
shouldDelete := false
|
||||
if im != nil && im.DeletionTimestamp == nil {
|
||||
if _, exists := instances[instanceName]; exists {
|
||||
shouldDelete = true
|
||||
}
|
||||
}
|
||||
if status.Starting {
|
||||
shouldDelete = true
|
||||
}
|
||||
if shouldDelete {
|
||||
// there is a delay between deleteInstance() invocation and state/InstanceManager update,
|
||||
// deleteInstance() may be called multiple times.
|
||||
if instance, exists := instances[instanceName]; exists {
|
||||
if shouldDeleteInstance(&instance) {
|
||||
if err := h.deleteInstance(instanceName, runtimeObj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := h.deleteInstance(instanceName, runtimeObj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
status.Started = false
|
||||
status.Starting = false
|
||||
default:
|
||||
return fmt.Errorf("unknown instance desire state: desire %v", spec.DesireState)
|
||||
}
|
||||
|
@ -438,17 +452,6 @@ func (h *InstanceHandler) ReconcileInstanceState(obj interface{}, spec *longhorn
|
|||
return nil
|
||||
}
|
||||
|
||||
func shouldDeleteInstance(instance *longhorn.InstanceProcess) bool {
|
||||
// For a replica of a SPDK volume, a stopped replica means the lvol is not exposed,
|
||||
// but the lvol is still there. We don't need to delete it.
|
||||
if types.IsDataEngineV2(instance.Spec.DataEngine) {
|
||||
if instance.Status.State == longhorn.InstanceStateStopped {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *InstanceHandler) getInstancesFromInstanceManager(obj runtime.Object, instanceManager *longhorn.InstanceManager) (map[string]longhorn.InstanceProcess, error) {
|
||||
switch obj.(type) {
|
||||
case *longhorn.Engine:
|
||||
|
|
|
@ -214,7 +214,7 @@ func (imc *InstanceManagerController) isResponsibleForSetting(obj interface{}) b
|
|||
}
|
||||
|
||||
return types.SettingName(setting.Name) == types.SettingNameKubernetesClusterAutoscalerEnabled ||
|
||||
types.SettingName(setting.Name) == types.SettingNameV2DataEngineCPUMask ||
|
||||
types.SettingName(setting.Name) == types.SettingNameDataEngineCPUMask ||
|
||||
types.SettingName(setting.Name) == types.SettingNameOrphanResourceAutoDeletion
|
||||
}
|
||||
|
||||
|
@ -549,12 +549,12 @@ func (imc *InstanceManagerController) isDateEngineCPUMaskApplied(im *longhorn.In
|
|||
return im.Spec.DataEngineSpec.V2.CPUMask == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
}
|
||||
|
||||
setting, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineCPUMask)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineCPUMask, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "failed to get %v setting for updating data engine CPU mask", types.SettingNameV2DataEngineCPUMask)
|
||||
return true, errors.Wrapf(err, "failed to get %v setting for updating data engine CPU mask", types.SettingNameDataEngineCPUMask)
|
||||
}
|
||||
|
||||
return setting.Value == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
return value == im.Status.DataEngineStatus.V2.CPUMask, nil
|
||||
}
|
||||
|
||||
func (imc *InstanceManagerController) syncLogSettingsToInstanceManagerPod(im *longhorn.InstanceManager) error {
|
||||
|
@ -574,36 +574,41 @@ func (imc *InstanceManagerController) syncLogSettingsToInstanceManagerPod(im *lo
|
|||
|
||||
settingNames := []types.SettingName{
|
||||
types.SettingNameLogLevel,
|
||||
types.SettingNameV2DataEngineLogLevel,
|
||||
types.SettingNameV2DataEngineLogFlags,
|
||||
types.SettingNameDataEngineLogLevel,
|
||||
types.SettingNameDataEngineLogFlags,
|
||||
}
|
||||
|
||||
for _, settingName := range settingNames {
|
||||
setting, err := imc.ds.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch settingName {
|
||||
case types.SettingNameLogLevel:
|
||||
// We use this to set the instance-manager log level, for either engine type.
|
||||
err = client.LogSetLevel("", "", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExisted(settingName)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set instance-manager log level to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
case types.SettingNameV2DataEngineLogLevel:
|
||||
// We use this to set the spdk_tgt log level independently of the instance-manager's.
|
||||
// We use this to set the instance-manager log level, for either engine type.
|
||||
err = client.LogSetLevel("", "", value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set instance-manager log level to setting %v value: %v", settingName, value)
|
||||
}
|
||||
case types.SettingNameDataEngineLogLevel:
|
||||
// We use this to set the data engine (such as spdk_tgt for v2 data engine) log level independently of the instance-manager's.
|
||||
if types.IsDataEngineV2(im.Spec.DataEngine) {
|
||||
err = client.LogSetLevel(longhorn.DataEngineTypeV2, "", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(settingName, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set spdk_tgt log level to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
if err := client.LogSetLevel(longhorn.DataEngineTypeV2, "", value); err != nil {
|
||||
return errors.Wrapf(err, "failed to set data engine log level to setting %v value: %v", settingName, value)
|
||||
}
|
||||
}
|
||||
case types.SettingNameV2DataEngineLogFlags:
|
||||
case types.SettingNameDataEngineLogFlags:
|
||||
if types.IsDataEngineV2(im.Spec.DataEngine) {
|
||||
err = client.LogSetFlags(longhorn.DataEngineTypeV2, "spdk_tgt", setting.Value)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(settingName, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set spdk_tgt log flags to setting %v value: %v", settingName, setting.Value)
|
||||
return err
|
||||
}
|
||||
if err := client.LogSetFlags(longhorn.DataEngineTypeV2, "spdk_tgt", value); err != nil {
|
||||
return errors.Wrapf(err, "failed to set data engine log flags to setting %v value: %v", settingName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -742,7 +747,7 @@ func (imc *InstanceManagerController) areDangerZoneSettingsSyncedToIMPod(im *lon
|
|||
isSettingSynced, err = imc.isSettingTaintTolerationSynced(setting, pod)
|
||||
case types.SettingNameSystemManagedComponentsNodeSelector:
|
||||
isSettingSynced, err = imc.isSettingNodeSelectorSynced(setting, pod)
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU:
|
||||
isSettingSynced, err = imc.isSettingGuaranteedInstanceManagerCPUSynced(setting, pod)
|
||||
case types.SettingNamePriorityClass:
|
||||
isSettingSynced, err = imc.isSettingPriorityClassSynced(setting, pod)
|
||||
|
@ -818,7 +823,7 @@ func (imc *InstanceManagerController) isSettingStorageNetworkSynced(setting *lon
|
|||
func (imc *InstanceManagerController) isSettingDataEngineSynced(settingName types.SettingName, im *longhorn.InstanceManager) (bool, error) {
|
||||
enabled, err := imc.ds.GetSettingAsBool(settingName)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to get %v setting for updating data engine", settingName)
|
||||
return false, errors.Wrapf(err, "failed to get %v setting for checking data engine sync", settingName)
|
||||
}
|
||||
var dataEngine longhorn.DataEngineType
|
||||
switch settingName {
|
||||
|
@ -830,6 +835,7 @@ func (imc *InstanceManagerController) isSettingDataEngineSynced(settingName type
|
|||
if !enabled && im.Spec.DataEngine == dataEngine {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -1487,24 +1493,27 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
if types.IsDataEngineV2(dataEngine) {
|
||||
// spdk_tgt doesn't support log level option, so we don't need to pass the log level to the instance manager.
|
||||
// The log level will be applied in the reconciliation of instance manager controller.
|
||||
logFlagsSetting, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineLogFlags)
|
||||
logFlagsSetting, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineLogFlags, dataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logFlags := "all"
|
||||
if logFlagsSetting.Value != "" {
|
||||
logFlags = strings.ToLower(logFlagsSetting.Value)
|
||||
if logFlagsSetting != "" {
|
||||
logFlags = strings.ToLower(logFlagsSetting)
|
||||
}
|
||||
|
||||
cpuMask := im.Spec.DataEngineSpec.V2.CPUMask
|
||||
if cpuMask == "" {
|
||||
value, err := imc.ds.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineCPUMask)
|
||||
value, err := imc.ds.GetSettingValueExistedByDataEngine(types.SettingNameDataEngineCPUMask, dataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cpuMask = value.Value
|
||||
cpuMask = value
|
||||
if cpuMask == "" {
|
||||
return nil, fmt.Errorf("failed to get CPU mask setting for data engine %v", dataEngine)
|
||||
}
|
||||
}
|
||||
|
||||
im.Status.DataEngineStatus.V2.CPUMask = cpuMask
|
||||
|
@ -1522,7 +1531,7 @@ func (imc *InstanceManagerController) createInstanceManagerPodSpec(im *longhorn.
|
|||
|
||||
podSpec.Spec.Containers[0].Args = args
|
||||
|
||||
hugepage, err := imc.ds.GetSettingAsInt(types.SettingNameV2DataEngineHugepageLimit)
|
||||
hugepage, err := imc.ds.GetSettingAsIntByDataEngine(types.SettingNameDataEngineHugepageLimit, im.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1695,7 +1704,7 @@ func (imc *InstanceManagerController) deleteOrphans(im *longhorn.InstanceManager
|
|||
|
||||
autoDeleteGracePeriod, err := imc.ds.GetSettingAsInt(types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get setting %v", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
}
|
||||
|
||||
orphanList, err := imc.ds.ListInstanceOrphansByInstanceManagerRO(im.Name)
|
||||
|
|
|
@ -32,7 +32,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
EnvironmentCheckMonitorSyncPeriod = 1800 * time.Second
|
||||
environmentCheckMonitorSyncPeriod = 1800 * time.Second
|
||||
|
||||
defaultHugePageLimitInMiB = 2048
|
||||
|
||||
kernelConfigDir = "/host/boot/"
|
||||
systemConfigDir = "/host/etc/"
|
||||
|
@ -64,7 +66,7 @@ func NewEnvironmentCheckMonitor(logger logrus.FieldLogger, ds *datastore.DataSto
|
|||
ctx, quit := context.WithCancel(context.Background())
|
||||
|
||||
m := &EnvironmentCheckMonitor{
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, EnvironmentCheckMonitorSyncPeriod),
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, environmentCheckMonitorSyncPeriod),
|
||||
|
||||
nodeName: nodeName,
|
||||
|
||||
|
@ -360,10 +362,11 @@ func (m *EnvironmentCheckMonitor) checkPackageInstalled(packageProbeExecutables
|
|||
}
|
||||
|
||||
func (m *EnvironmentCheckMonitor) checkHugePages(kubeNode *corev1.Node, collectedData *CollectedEnvironmentCheckInfo) {
|
||||
hugePageLimitInMiB, err := m.ds.GetSettingAsInt(types.SettingNameV2DataEngineHugepageLimit)
|
||||
hugePageLimitInMiB, err := m.ds.GetSettingAsIntByDataEngine(types.SettingNameDataEngineHugepageLimit, longhorn.DataEngineTypeV2)
|
||||
if err != nil {
|
||||
m.logger.Debugf("Failed to fetch v2-data-engine-hugepage-limit setting, using default value: %d", 2048)
|
||||
hugePageLimitInMiB = 2048
|
||||
m.logger.Warnf("Failed to get setting %v for data engine %v, using default value %d",
|
||||
types.SettingNameDataEngineHugepageLimit, longhorn.DataEngineTypeV2, defaultHugePageLimitInMiB)
|
||||
hugePageLimitInMiB = defaultHugePageLimitInMiB
|
||||
}
|
||||
|
||||
capacity := kubeNode.Status.Capacity
|
||||
|
|
|
@ -30,7 +30,7 @@ func NewFakeEnvironmentCheckMonitor(logger logrus.FieldLogger, ds *datastore.Dat
|
|||
ctx, quit := context.WithCancel(context.Background())
|
||||
|
||||
m := &FakeEnvironmentCheckMonitor{
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, EnvironmentCheckMonitorSyncPeriod),
|
||||
baseMonitor: newBaseMonitor(ctx, quit, logger, ds, environmentCheckMonitorSyncPeriod),
|
||||
|
||||
nodeName: nodeName,
|
||||
|
||||
|
|
|
@ -238,9 +238,9 @@ func (nc *NodeController) isResponsibleForSnapshot(obj interface{}) bool {
|
|||
}
|
||||
|
||||
func (nc *NodeController) snapshotHashRequired(volume *longhorn.Volume) bool {
|
||||
dataIntegrityImmediateChecking, err := nc.ds.GetSettingAsBool(types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
|
||||
dataIntegrityImmediateChecking, err := nc.ds.GetSettingAsBoolByDataEngine(types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation, volume.Spec.DataEngine)
|
||||
if err != nil {
|
||||
nc.logger.WithError(err).Warnf("Failed to get %v setting", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation)
|
||||
nc.logger.WithError(err).Warnf("Failed to get %v setting for data engine %v", types.SettingNameSnapshotDataIntegrityImmediateCheckAfterSnapshotCreation, volume.Spec.DataEngine)
|
||||
return false
|
||||
}
|
||||
if !dataIntegrityImmediateChecking {
|
||||
|
@ -902,6 +902,7 @@ func (nc *NodeController) updateDiskStatusSchedulableCondition(node *longhorn.No
|
|||
diskStatus.StorageScheduled = storageScheduled
|
||||
diskStatus.ScheduledReplica = scheduledReplica
|
||||
diskStatus.ScheduledBackingImage = scheduledBackingImage
|
||||
|
||||
// check disk pressure
|
||||
info, err := nc.scheduler.GetDiskSchedulingInfo(disk, diskStatus)
|
||||
if err != nil {
|
||||
|
@ -1150,7 +1151,7 @@ func (nc *NodeController) cleanUpBackingImagesInDisks(node *longhorn.Node) error
|
|||
|
||||
settingValue, err := nc.ds.GetSettingAsInt(types.SettingNameBackingImageCleanupWaitInterval)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get setting %v, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
|
||||
log.WithError(err).Warnf("Failed to get %v setting, won't do cleanup for backing images", types.SettingNameBackingImageCleanupWaitInterval)
|
||||
return nil
|
||||
}
|
||||
waitInterval := time.Duration(settingValue) * time.Minute
|
||||
|
@ -1300,7 +1301,7 @@ func (nc *NodeController) enqueueNodeForMonitor(key string) {
|
|||
func (nc *NodeController) syncOrphans(node *longhorn.Node, collectedDataInfo map[string]*monitor.CollectedDiskInfo) error {
|
||||
autoDeleteGracePeriod, err := nc.ds.GetSettingAsInt(types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get setting %v", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameOrphanResourceAutoDeletionGracePeriod)
|
||||
}
|
||||
|
||||
for diskName, diskInfo := range collectedDataInfo {
|
||||
|
@ -1845,8 +1846,7 @@ func (nc *NodeController) setReadyAndSchedulableConditions(node *longhorn.Node,
|
|||
nc.eventRecorder, node, corev1.EventTypeNormal)
|
||||
}
|
||||
|
||||
disableSchedulingOnCordonedNode, err :=
|
||||
nc.ds.GetSettingAsBool(types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
disableSchedulingOnCordonedNode, err := nc.ds.GetSettingAsBool(types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameDisableSchedulingOnCordonedNode)
|
||||
}
|
||||
|
@ -1955,7 +1955,7 @@ func (nc *NodeController) SetSchedulableCondition(node *longhorn.Node, kubeNode
|
|||
func (nc *NodeController) clearDelinquentLeasesIfNodeNotReady(node *longhorn.Node) error {
|
||||
enabled, err := nc.ds.GetSettingAsBool(types.SettingNameRWXVolumeFastFailover)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get setting %v", types.SettingNameRWXVolumeFastFailover)
|
||||
return errors.Wrapf(err, "failed to get %v setting", types.SettingNameRWXVolumeFastFailover)
|
||||
}
|
||||
if !enabled {
|
||||
return nil
|
||||
|
|
|
@ -380,6 +380,11 @@ func (rc *ReplicaController) CreateInstance(obj interface{}) (*longhorn.Instance
|
|||
return nil, err
|
||||
}
|
||||
|
||||
r.Status.Starting = true
|
||||
if r, err = rc.ds.UpdateReplicaStatus(r); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update replica %v status.starting to true before sending instance create request", r.Name)
|
||||
}
|
||||
|
||||
return c.ReplicaInstanceCreate(&engineapi.ReplicaInstanceCreateRequest{
|
||||
Replica: r,
|
||||
DiskName: diskName,
|
||||
|
|
|
@ -350,7 +350,6 @@ func (sc *SettingController) syncDangerZoneSettingsForManagedComponents(settingN
|
|||
types.SettingNameV1DataEngine,
|
||||
types.SettingNameV2DataEngine,
|
||||
types.SettingNameGuaranteedInstanceManagerCPU,
|
||||
types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU,
|
||||
}
|
||||
|
||||
if slices.Contains(dangerSettingsRequiringSpecificDataEngineVolumesDetached, settingName) {
|
||||
|
@ -360,14 +359,11 @@ func (sc *SettingController) syncDangerZoneSettingsForManagedComponents(settingN
|
|||
return errors.Wrapf(err, "failed to apply %v setting to Longhorn instance managers when there are attached volumes. "+
|
||||
"It will be eventually applied", settingName)
|
||||
}
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
dataEngine := longhorn.DataEngineTypeV1
|
||||
if settingName == types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU {
|
||||
dataEngine = longhorn.DataEngineTypeV2
|
||||
}
|
||||
|
||||
if err := sc.updateInstanceManagerCPURequest(dataEngine); err != nil {
|
||||
return err
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU:
|
||||
for _, dataEngine := range []longhorn.DataEngineType{longhorn.DataEngineTypeV1, longhorn.DataEngineTypeV2} {
|
||||
if err := sc.updateInstanceManagerCPURequest(dataEngine); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1225,10 +1221,6 @@ func (sc *SettingController) enqueueSettingForNode(obj interface{}) {
|
|||
|
||||
// updateInstanceManagerCPURequest deletes all instance manager pods immediately with the updated CPU request.
|
||||
func (sc *SettingController) updateInstanceManagerCPURequest(dataEngine longhorn.DataEngineType) error {
|
||||
settingName := types.SettingNameGuaranteedInstanceManagerCPU
|
||||
if types.IsDataEngineV2(dataEngine) {
|
||||
settingName = types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU
|
||||
}
|
||||
imPodList, err := sc.ds.ListInstanceManagerPodsBy("", "", longhorn.InstanceManagerTypeAllInOne, dataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list instance manager pods for toleration update")
|
||||
|
@ -1269,10 +1261,10 @@ func (sc *SettingController) updateInstanceManagerCPURequest(dataEngine longhorn
|
|||
|
||||
stopped, _, err := sc.ds.AreAllEngineInstancesStopped(dataEngine)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to check engine instances for %v setting update", settingName)
|
||||
return errors.Wrapf(err, "failed to check engine instances for %v setting update for data engine %v", types.SettingNameGuaranteedInstanceManagerCPU, dataEngine)
|
||||
}
|
||||
if !stopped {
|
||||
return &types.ErrorInvalidState{Reason: fmt.Sprintf("failed to apply %v setting to Longhorn components when there are running engine instances. It will be eventually applied", settingName)}
|
||||
return &types.ErrorInvalidState{Reason: fmt.Sprintf("failed to apply %v setting for data engine %v to Longhorn components when there are running engine instances. It will be eventually applied", types.SettingNameGuaranteedInstanceManagerCPU, dataEngine)}
|
||||
}
|
||||
|
||||
for _, pod := range notUpdatedPods {
|
||||
|
@ -1393,22 +1385,22 @@ const (
|
|||
ClusterInfoVolumeNumOfReplicas = util.StructName("LonghornVolumeNumberOfReplicas")
|
||||
ClusterInfoVolumeNumOfSnapshots = util.StructName("LonghornVolumeNumberOfSnapshots")
|
||||
|
||||
ClusterInfoPodAvgCPUUsageFmt = "Longhorn%sAverageCpuUsageMilliCores"
|
||||
ClusterInfoPodAvgMemoryUsageFmt = "Longhorn%sAverageMemoryUsageBytes"
|
||||
ClusterInfoSettingFmt = "LonghornSetting%s"
|
||||
ClusterInfoVolumeAccessModeCountFmt = "LonghornVolumeAccessMode%sCount"
|
||||
ClusterInfoVolumeDataEngineCountFmt = "LonghornVolumeDataEngine%sCount"
|
||||
ClusterInfoVolumeDataLocalityCountFmt = "LonghornVolumeDataLocality%sCount"
|
||||
ClusterInfoVolumeEncryptedCountFmt = "LonghornVolumeEncrypted%sCount"
|
||||
ClusterInfoVolumeFrontendCountFmt = "LonghornVolumeFrontend%sCount"
|
||||
ClusterInfoVolumeReplicaAutoBalanceCountFmt = "LonghornVolumeReplicaAutoBalance%sCount"
|
||||
ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt = "LonghornVolumeReplicaSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt = "LonghornVolumeReplicaZoneSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt = "LonghornVolumeReplicaDiskSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt = "LonghornVolumeRestoreVolumeRecurringJob%sCount"
|
||||
ClusterInfoVolumeSnapshotDataIntegrityCountFmt = "LonghornVolumeSnapshotDataIntegrity%sCount"
|
||||
ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt = "LonghornVolumeUnmapMarkSnapChainRemoved%sCount"
|
||||
ClusterInfoVolumeFreezeFilesystemForSnapshotCountFmt = "LonghornVolumeFreezeFilesystemForSnapshot%sCount"
|
||||
ClusterInfoPodAvgCPUUsageFmt = "Longhorn%sAverageCpuUsageMilliCores"
|
||||
ClusterInfoPodAvgMemoryUsageFmt = "Longhorn%sAverageMemoryUsageBytes"
|
||||
ClusterInfoSettingFmt = "LonghornSetting%s"
|
||||
ClusterInfoVolumeAccessModeCountFmt = "LonghornVolumeAccessMode%sCount"
|
||||
ClusterInfoVolumeDataEngineCountFmt = "LonghornVolumeDataEngine%sCount"
|
||||
ClusterInfoVolumeDataLocalityCountFmt = "LonghornVolumeDataLocality%sCount"
|
||||
ClusterInfoVolumeEncryptedCountFmt = "LonghornVolumeEncrypted%sCount"
|
||||
ClusterInfoVolumeFrontendCountFmt = "LonghornVolumeFrontend%sCount"
|
||||
ClusterInfoVolumeReplicaAutoBalanceCountFmt = "LonghornVolumeReplicaAutoBalance%sCount"
|
||||
ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt = "LonghornVolumeReplicaSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt = "LonghornVolumeReplicaZoneSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt = "LonghornVolumeReplicaDiskSoftAntiAffinity%sCount"
|
||||
ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt = "LonghornVolumeRestoreVolumeRecurringJob%sCount"
|
||||
ClusterInfoVolumeSnapshotDataIntegrityCountFmt = "LonghornVolumeSnapshotDataIntegrity%sCount"
|
||||
ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt = "LonghornVolumeUnmapMarkSnapChainRemoved%sCount"
|
||||
ClusterInfoVolumeFreezeFilesystemForV1DataEngineSnapshotCountFmt = "LonghornVolumeFreezeFilesystemForV1DataEngineSnapshot%sCount"
|
||||
)
|
||||
|
||||
// Node Scope Info: will be sent from all Longhorn cluster nodes
|
||||
|
@ -1609,7 +1601,6 @@ func (info *ClusterInfo) collectSettings() error {
|
|||
types.SettingNameSystemManagedPodsImagePullPolicy: true,
|
||||
types.SettingNameV1DataEngine: true,
|
||||
types.SettingNameV2DataEngine: true,
|
||||
types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU: true,
|
||||
}
|
||||
|
||||
settings, err := info.ds.ListSettings()
|
||||
|
@ -1667,12 +1658,15 @@ func (info *ClusterInfo) convertSettingValueType(setting *longhorn.Setting) (con
|
|||
|
||||
switch definition.Type {
|
||||
case types.SettingTypeInt:
|
||||
return strconv.ParseInt(setting.Value, 10, 64)
|
||||
if !definition.DataEngineSpecific {
|
||||
return strconv.ParseInt(setting.Value, 10, 64)
|
||||
}
|
||||
case types.SettingTypeBool:
|
||||
return strconv.ParseBool(setting.Value)
|
||||
default:
|
||||
return setting.Value, nil
|
||||
if !definition.DataEngineSpecific {
|
||||
return strconv.ParseBool(setting.Value)
|
||||
}
|
||||
}
|
||||
return setting.Value, nil
|
||||
}
|
||||
|
||||
func (info *ClusterInfo) collectVolumesInfo() error {
|
||||
|
@ -1741,29 +1735,31 @@ func (info *ClusterInfo) collectVolumesInfo() error {
|
|||
frontendCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFrontendCountFmt, frontend))]++
|
||||
}
|
||||
|
||||
replicaAutoBalance := info.collectSettingInVolume(string(volume.Spec.ReplicaAutoBalance), string(longhorn.ReplicaAutoBalanceIgnored), types.SettingNameReplicaAutoBalance)
|
||||
replicaAutoBalance := info.collectSettingInVolume(string(volume.Spec.ReplicaAutoBalance), string(longhorn.ReplicaAutoBalanceIgnored), volume.Spec.DataEngine, types.SettingNameReplicaAutoBalance)
|
||||
replicaAutoBalanceCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaAutoBalanceCountFmt, util.ConvertToCamel(string(replicaAutoBalance), "-")))]++
|
||||
|
||||
replicaSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaSoftAntiAffinity), string(longhorn.ReplicaSoftAntiAffinityDefault), types.SettingNameReplicaSoftAntiAffinity)
|
||||
replicaSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaSoftAntiAffinity), string(longhorn.ReplicaSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaSoftAntiAffinity)
|
||||
replicaSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaSoftAntiAffinity), "-")))]++
|
||||
|
||||
replicaZoneSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaZoneSoftAntiAffinity), string(longhorn.ReplicaZoneSoftAntiAffinityDefault), types.SettingNameReplicaZoneSoftAntiAffinity)
|
||||
replicaZoneSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaZoneSoftAntiAffinity), string(longhorn.ReplicaZoneSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaZoneSoftAntiAffinity)
|
||||
replicaZoneSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaZoneSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaZoneSoftAntiAffinity), "-")))]++
|
||||
|
||||
replicaDiskSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaDiskSoftAntiAffinity), string(longhorn.ReplicaDiskSoftAntiAffinityDefault), types.SettingNameReplicaDiskSoftAntiAffinity)
|
||||
replicaDiskSoftAntiAffinity := info.collectSettingInVolume(string(volume.Spec.ReplicaDiskSoftAntiAffinity), string(longhorn.ReplicaDiskSoftAntiAffinityDefault), volume.Spec.DataEngine, types.SettingNameReplicaDiskSoftAntiAffinity)
|
||||
replicaDiskSoftAntiAffinityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeReplicaDiskSoftAntiAffinityCountFmt, util.ConvertToCamel(string(replicaDiskSoftAntiAffinity), "-")))]++
|
||||
|
||||
restoreVolumeRecurringJob := info.collectSettingInVolume(string(volume.Spec.RestoreVolumeRecurringJob), string(longhorn.RestoreVolumeRecurringJobDefault), types.SettingNameRestoreVolumeRecurringJobs)
|
||||
restoreVolumeRecurringJob := info.collectSettingInVolume(string(volume.Spec.RestoreVolumeRecurringJob), string(longhorn.RestoreVolumeRecurringJobDefault), volume.Spec.DataEngine, types.SettingNameRestoreVolumeRecurringJobs)
|
||||
restoreVolumeRecurringJobCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeRestoreVolumeRecurringJobCountFmt, util.ConvertToCamel(string(restoreVolumeRecurringJob), "-")))]++
|
||||
|
||||
snapshotDataIntegrity := info.collectSettingInVolume(string(volume.Spec.SnapshotDataIntegrity), string(longhorn.SnapshotDataIntegrityIgnored), types.SettingNameSnapshotDataIntegrity)
|
||||
snapshotDataIntegrity := info.collectSettingInVolume(string(volume.Spec.SnapshotDataIntegrity), string(longhorn.SnapshotDataIntegrityIgnored), volume.Spec.DataEngine, types.SettingNameSnapshotDataIntegrity)
|
||||
snapshotDataIntegrityCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeSnapshotDataIntegrityCountFmt, util.ConvertToCamel(string(snapshotDataIntegrity), "-")))]++
|
||||
|
||||
unmapMarkSnapChainRemoved := info.collectSettingInVolume(string(volume.Spec.UnmapMarkSnapChainRemoved), string(longhorn.UnmapMarkSnapChainRemovedIgnored), types.SettingNameRemoveSnapshotsDuringFilesystemTrim)
|
||||
unmapMarkSnapChainRemoved := info.collectSettingInVolume(string(volume.Spec.UnmapMarkSnapChainRemoved), string(longhorn.UnmapMarkSnapChainRemovedIgnored), volume.Spec.DataEngine, types.SettingNameRemoveSnapshotsDuringFilesystemTrim)
|
||||
unmapMarkSnapChainRemovedCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeUnmapMarkSnapChainRemovedCountFmt, util.ConvertToCamel(string(unmapMarkSnapChainRemoved), "-")))]++
|
||||
|
||||
freezeFilesystemForSnapshot := info.collectSettingInVolume(string(volume.Spec.FreezeFilesystemForSnapshot), string(longhorn.FreezeFilesystemForSnapshotDefault), types.SettingNameFreezeFilesystemForSnapshot)
|
||||
freezeFilesystemForSnapshotCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFreezeFilesystemForSnapshotCountFmt, util.ConvertToCamel(string(freezeFilesystemForSnapshot), "-")))]++
|
||||
if types.IsDataEngineV1(volume.Spec.DataEngine) {
|
||||
freezeFilesystemForSnapshot := info.collectSettingInVolume(string(volume.Spec.FreezeFilesystemForSnapshot), string(longhorn.FreezeFilesystemForSnapshotDefault), volume.Spec.DataEngine, types.SettingNameFreezeFilesystemForSnapshot)
|
||||
freezeFilesystemForSnapshotCountStruct[util.StructName(fmt.Sprintf(ClusterInfoVolumeFreezeFilesystemForV1DataEngineSnapshotCountFmt, util.ConvertToCamel(string(freezeFilesystemForSnapshot), "-")))]++
|
||||
}
|
||||
}
|
||||
info.structFields.fields.Append(ClusterInfoVolumeNumOfReplicas, totalVolumeNumOfReplicas)
|
||||
info.structFields.fields.AppendCounted(accessModeCountStruct)
|
||||
|
@ -1814,13 +1810,14 @@ func (info *ClusterInfo) collectVolumesInfo() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue string, settingName types.SettingName) string {
|
||||
func (info *ClusterInfo) collectSettingInVolume(volumeSpecValue, ignoredValue string, dataEngine longhorn.DataEngineType, settingName types.SettingName) string {
|
||||
if volumeSpecValue == ignoredValue {
|
||||
globalSetting, err := info.ds.GetSettingWithAutoFillingRO(settingName)
|
||||
globalSettingValue, err := info.ds.GetSettingValueExistedByDataEngine(settingName, dataEngine)
|
||||
if err != nil {
|
||||
info.logger.WithError(err).Warnf("Failed to get Longhorn Setting %v", settingName)
|
||||
}
|
||||
return globalSetting.Value
|
||||
|
||||
return globalSettingValue
|
||||
}
|
||||
return volumeSpecValue
|
||||
}
|
||||
|
|
|
@ -345,7 +345,7 @@ func (vac *VolumeAttachmentController) handleNodeCordoned(va *longhorn.VolumeAtt
|
|||
|
||||
detachManuallyAttachedVolumesWhenCordoned, err := vac.ds.GetSettingAsBool(types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get setting %v", types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
log.WithError(err).Warnf("Failed to get %v setting", types.SettingNameDetachManuallyAttachedVolumesWhenCordoned)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -5006,17 +5006,6 @@ func (c *VolumeController) shouldCleanUpFailedReplica(v *longhorn.Volume, r *lon
|
|||
return true
|
||||
}
|
||||
|
||||
if types.IsDataEngineV2(v.Spec.DataEngine) {
|
||||
V2DataEngineFastReplicaRebuilding, err := c.ds.GetSettingAsBool(types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("Failed to get the setting %v, will consider it as false", types.SettingDefinitionV2DataEngineFastReplicaRebuilding)
|
||||
V2DataEngineFastReplicaRebuilding = false
|
||||
}
|
||||
if !V2DataEngineFastReplicaRebuilding {
|
||||
log.Infof("Failed replica %v should be cleaned up blindly since setting %v is not enabled", r.Name, types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Failed too long ago to be useful during a rebuild.
|
||||
if v.Spec.StaleReplicaTimeout > 0 &&
|
||||
util.TimestampAfterTimeout(r.Spec.FailedAt, time.Duration(v.Spec.StaleReplicaTimeout)*time.Minute) {
|
||||
|
|
|
@ -286,7 +286,7 @@ func (vbc *VolumeRebuildingController) reconcile(volName string) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
isOfflineRebuildEnabled, err := vbc.isVolumeOfflineRebuildEnabled(vol.Spec.OfflineRebuilding)
|
||||
isOfflineRebuildEnabled, err := vbc.isVolumeOfflineRebuildEnabled(vol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -343,16 +343,16 @@ func (vbc *VolumeRebuildingController) reconcile(volName string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (vbc *VolumeRebuildingController) isVolumeOfflineRebuildEnabled(offlineRebuilding longhorn.VolumeOfflineRebuilding) (bool, error) {
|
||||
if offlineRebuilding == longhorn.VolumeOfflineRebuildingEnabled {
|
||||
func (vbc *VolumeRebuildingController) isVolumeOfflineRebuildEnabled(vol *longhorn.Volume) (bool, error) {
|
||||
if vol.Spec.OfflineRebuilding == longhorn.VolumeOfflineRebuildingEnabled {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
globalOfflineRebuildingEnabled, err := vbc.ds.GetSettingAsBool(types.SettingNameOfflineReplicaRebuilding)
|
||||
globalOfflineRebuildingEnabled, err := vbc.ds.GetSettingAsBoolByDataEngine(types.SettingNameOfflineReplicaRebuilding, vol.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return globalOfflineRebuildingEnabled && offlineRebuilding != longhorn.VolumeOfflineRebuildingDisabled, nil
|
||||
return globalOfflineRebuildingEnabled && vol.Spec.OfflineRebuilding != longhorn.VolumeOfflineRebuildingDisabled, nil
|
||||
}
|
||||
|
||||
func (vbc *VolumeRebuildingController) syncLHVolumeAttachmentForOfflineRebuild(vol *longhorn.Volume, va *longhorn.VolumeAttachment, attachmentID string) (*longhorn.VolumeAttachment, error) {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
utilexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
"github.com/longhorn/longhorn-manager/util"
|
||||
|
||||
longhornclient "github.com/longhorn/longhorn-manager/client"
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
|
@ -214,6 +215,14 @@ func getVolumeOptions(volumeID string, volOptions map[string]string) (*longhornc
|
|||
vol.BackupTargetName = backupTargetName
|
||||
}
|
||||
|
||||
if backupBlockSize, ok := volOptions["backupBlockSize"]; ok {
|
||||
blockSize, err := util.ConvertSize(backupBlockSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid parameter backupBlockSize")
|
||||
}
|
||||
vol.BackupBlockSize = strconv.FormatInt(blockSize, 10)
|
||||
}
|
||||
|
||||
if dataSource, ok := volOptions["dataSource"]; ok {
|
||||
vol.DataSource = dataSource
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package datastore
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"math/rand"
|
||||
|
@ -9,7 +10,6 @@ import (
|
|||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -76,6 +77,10 @@ func (s *DataStore) UpdateCustomizedSettings(defaultImages map[types.SettingName
|
|||
return err
|
||||
}
|
||||
|
||||
if err := s.syncConsolidatedV2DataEngineSettings(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.createNonExistingSettingCRsWithDefaultSetting(defaultSettingCM.ResourceVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,11 +171,11 @@ func (s *DataStore) syncSettingsWithDefaultImages(defaultImages map[types.Settin
|
|||
|
||||
func (s *DataStore) syncSettingOrphanResourceAutoDeletionSettings() error {
|
||||
oldOrphanReplicaDataAutoDeletionSettingRO, err := s.getSettingRO(string(types.SettingNameOrphanAutoDeletion))
|
||||
switch {
|
||||
case ErrorIsNotFound(err):
|
||||
logrus.Infof("No old setting %v to be replaced.", types.SettingNameOrphanAutoDeletion)
|
||||
return nil
|
||||
case err != nil:
|
||||
if err != nil {
|
||||
if ErrorIsNotFound(err) {
|
||||
logrus.Debugf("No old setting %v to be replaced.", types.SettingNameOrphanAutoDeletion)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get replaced setting %v", types.SettingNameOrphanAutoDeletion)
|
||||
}
|
||||
|
||||
|
@ -189,6 +194,36 @@ func (s *DataStore) syncSettingOrphanResourceAutoDeletionSettings() error {
|
|||
return s.createOrUpdateSetting(types.SettingNameOrphanResourceAutoDeletion, value, "")
|
||||
}
|
||||
|
||||
func (s *DataStore) syncConsolidatedV2DataEngineSetting(oldSettingName, newSettingName types.SettingName) error {
|
||||
oldSetting, err := s.getSettingRO(string(oldSettingName))
|
||||
if err != nil {
|
||||
if ErrorIsNotFound(err) {
|
||||
logrus.Debugf("No old setting %v to be replaced.", oldSettingName)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get old setting %v", oldSettingName)
|
||||
}
|
||||
|
||||
return s.createOrUpdateSetting(newSettingName, oldSetting.Value, "")
|
||||
}
|
||||
|
||||
func (s *DataStore) syncConsolidatedV2DataEngineSettings() error {
|
||||
settings := map[types.SettingName]types.SettingName{
|
||||
types.SettingNameV2DataEngineHugepageLimit: types.SettingNameDataEngineHugepageLimit,
|
||||
types.SettingNameV2DataEngineCPUMask: types.SettingNameDataEngineCPUMask,
|
||||
types.SettingNameV2DataEngineLogLevel: types.SettingNameDataEngineLogLevel,
|
||||
types.SettingNameV2DataEngineLogFlags: types.SettingNameDataEngineLogFlags,
|
||||
}
|
||||
|
||||
for oldSettingName, newSettingName := range settings {
|
||||
if err := s.syncConsolidatedV2DataEngineSetting(oldSettingName, newSettingName); err != nil {
|
||||
return errors.Wrapf(err, "failed to sync consolidated v2 data engine setting %v to %v", oldSettingName, newSettingName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DataStore) createOrUpdateSetting(name types.SettingName, value, defaultSettingCMResourceVersion string) error {
|
||||
setting, err := s.GetSettingExact(name)
|
||||
if err != nil {
|
||||
|
@ -237,7 +272,12 @@ func (s *DataStore) applyCustomizedDefaultSettingsToDefinitions(customizedDefaul
|
|||
continue
|
||||
}
|
||||
|
||||
if value, ok := customizedDefaultSettings[string(sName)]; ok {
|
||||
if raw, ok := customizedDefaultSettings[string(sName)]; ok {
|
||||
value, err := GetSettingValidValue(definition, raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Setting %v default value is updated to a customized value %v (raw value %v)", sName, value, raw)
|
||||
definition.Default = value
|
||||
types.SetSettingDefinition(sName, definition)
|
||||
}
|
||||
|
@ -245,6 +285,75 @@ func (s *DataStore) applyCustomizedDefaultSettingsToDefinitions(customizedDefaul
|
|||
return nil
|
||||
}
|
||||
|
||||
func GetSettingValidValue(definition types.SettingDefinition, value string) (string, error) {
|
||||
if !definition.DataEngineSpecific {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
if !types.IsJSONFormat(definition.Default) {
|
||||
return "", fmt.Errorf("setting %v is data engine specific but default value %v is not in JSON-formatted string", definition.DisplayName, definition.Default)
|
||||
}
|
||||
|
||||
var values map[longhorn.DataEngineType]any
|
||||
var err error
|
||||
|
||||
// Get default values from definition
|
||||
defaultValues, err := types.ParseDataEngineSpecificSetting(definition, definition.Default)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Get values from customized value
|
||||
if types.IsJSONFormat(value) {
|
||||
values, err = types.ParseDataEngineSpecificSetting(definition, value)
|
||||
} else {
|
||||
values, err = types.ParseSettingSingleValue(definition, value)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Remove any data engine types that are not in the default values
|
||||
for dataEngine := range values {
|
||||
if _, ok := defaultValues[dataEngine]; !ok {
|
||||
delete(values, dataEngine)
|
||||
}
|
||||
}
|
||||
|
||||
return convertDataEngineValuesToJSONString(values)
|
||||
}
|
||||
|
||||
func convertDataEngineValuesToJSONString(values map[longhorn.DataEngineType]any) (string, error) {
|
||||
converted := make(map[longhorn.DataEngineType]string)
|
||||
|
||||
for dataEngine, raw := range values {
|
||||
var value string
|
||||
switch v := raw.(type) {
|
||||
case string:
|
||||
value = v
|
||||
case bool:
|
||||
value = strconv.FormatBool(v)
|
||||
case int:
|
||||
value = strconv.Itoa(v)
|
||||
case int64:
|
||||
value = strconv.FormatInt(v, 10)
|
||||
case float64:
|
||||
value = strconv.FormatFloat(v, 'f', -1, 64)
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported value type: %T", v)
|
||||
}
|
||||
|
||||
converted[dataEngine] = value
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(converted)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(jsonBytes), nil
|
||||
}
|
||||
|
||||
func (s *DataStore) syncSettingCRsWithCustomizedDefaultSettings(customizedDefaultSettings map[string]string, defaultSettingCMResourceVersion string) error {
|
||||
for _, sName := range types.SettingNameList {
|
||||
definition, ok := types.GetSettingDefinition(sName)
|
||||
|
@ -304,8 +413,16 @@ func (s *DataStore) UpdateSetting(setting *longhorn.Setting) (*longhorn.Setting,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
delete(obj.Annotations, types.GetLonghornLabelKey(types.UpdateSettingFromLonghorn))
|
||||
obj, err = s.lhClient.LonghornV1beta2().Settings(s.namespace).Update(context.TODO(), obj, metav1.UpdateOptions{})
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
latest, getErr := s.lhClient.LonghornV1beta2().Settings(s.namespace).Get(context.TODO(), setting.Name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return getErr
|
||||
}
|
||||
|
||||
delete(latest.Annotations, types.GetLonghornLabelKey(types.UpdateSettingFromLonghorn))
|
||||
obj, err = s.lhClient.LonghornV1beta2().Settings(s.namespace).Update(context.TODO(), latest, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -335,30 +452,21 @@ func (s *DataStore) deleteSetting(name string) error {
|
|||
// ValidateSetting checks the given setting value types and condition
|
||||
func (s *DataStore) ValidateSetting(name, value string) (err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "failed to set the setting %v with invalid value %v", name, value)
|
||||
err = errors.Wrapf(err, "failed to validate setting %v with invalid value %v", name, value)
|
||||
}()
|
||||
sName := types.SettingName(name)
|
||||
|
||||
if err := types.ValidateSetting(name, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch sName {
|
||||
switch types.SettingName(name) {
|
||||
case types.SettingNamePriorityClass:
|
||||
if value != "" {
|
||||
if _, err := s.GetPriorityClass(value); err != nil {
|
||||
return errors.Wrapf(err, "failed to get priority class %v before modifying priority class setting", value)
|
||||
}
|
||||
}
|
||||
case types.SettingNameGuaranteedInstanceManagerCPU, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
guaranteedInstanceManagerCPU, err := s.GetSettingWithAutoFillingRO(sName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
guaranteedInstanceManagerCPU.Value = value
|
||||
if err := types.ValidateCPUReservationValues(sName, guaranteedInstanceManagerCPU.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case types.SettingNameV1DataEngine:
|
||||
old, err := s.GetSettingWithAutoFillingRO(types.SettingNameV1DataEngine)
|
||||
if err != nil {
|
||||
|
@ -394,13 +502,49 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
|
|||
return err
|
||||
}
|
||||
}
|
||||
case types.SettingNameV2DataEngineCPUMask:
|
||||
if value == "" {
|
||||
return errors.Errorf("cannot set %v setting to empty value", name)
|
||||
|
||||
case types.SettingNameDataEngineCPUMask:
|
||||
definition, ok := types.GetSettingDefinition(types.SettingNameDataEngineCPUMask)
|
||||
if !ok {
|
||||
return fmt.Errorf("setting %v is not found", types.SettingNameDataEngineCPUMask)
|
||||
}
|
||||
if err := s.ValidateCPUMask(value); err != nil {
|
||||
return err
|
||||
var values map[longhorn.DataEngineType]any
|
||||
if types.IsJSONFormat(value) {
|
||||
values, err = types.ParseDataEngineSpecificSetting(definition, value)
|
||||
} else {
|
||||
values, err = types.ParseSettingSingleValue(definition, value)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse value %v for setting %v", value, types.SettingNameDataEngineCPUMask)
|
||||
}
|
||||
for dataEngine, raw := range values {
|
||||
cpuMask, ok := raw.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("setting %v value %v is not a string for data engine %v", types.SettingNameDataEngineCPUMask, raw, dataEngine)
|
||||
}
|
||||
|
||||
lhNodes, err := s.ListNodesRO()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to list nodes for %v setting validation for data engine %v", types.SettingNameDataEngineCPUMask, dataEngine)
|
||||
}
|
||||
|
||||
// Ensure if the CPU mask can be satisfied on each node
|
||||
for _, lhnode := range lhNodes {
|
||||
kubeNode, err := s.GetKubernetesNodeRO(lhnode.Name)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("Kubernetes node %s not found, skipping CPU mask validation for this node for data engine %v", lhnode.Name, dataEngine)
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get Kubernetes node %s for %v setting validation for data engine %v", lhnode.Name, types.SettingNameDataEngineCPUMask, dataEngine)
|
||||
}
|
||||
|
||||
if err := s.ValidateCPUMask(kubeNode, cpuMask); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case types.SettingNameAutoCleanupSystemGeneratedSnapshot:
|
||||
disablePurgeValue, err := s.GetSettingAsBool(types.SettingNameDisableSnapshotPurge)
|
||||
if err != nil {
|
||||
|
@ -409,6 +553,7 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
|
|||
if value == "true" && disablePurgeValue {
|
||||
return errors.Errorf("cannot set %v setting to true when %v setting is true", name, types.SettingNameDisableSnapshotPurge)
|
||||
}
|
||||
|
||||
case types.SettingNameDisableSnapshotPurge:
|
||||
autoCleanupValue, err := s.GetSettingAsBool(types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
if err != nil {
|
||||
|
@ -417,6 +562,7 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
|
|||
if value == "true" && autoCleanupValue {
|
||||
return errors.Errorf("cannot set %v setting to true when %v setting is true", name, types.SettingNameAutoCleanupSystemGeneratedSnapshot)
|
||||
}
|
||||
|
||||
case types.SettingNameSnapshotMaxCount:
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
|
@ -425,6 +571,7 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
|
|||
if v < 2 || v > 250 {
|
||||
return fmt.Errorf("%s should be between 2 and 250", name)
|
||||
}
|
||||
|
||||
case types.SettingNameDefaultLonghornStaticStorageClass:
|
||||
definition, ok := types.GetSettingDefinition(types.SettingNameDefaultLonghornStaticStorageClass)
|
||||
if !ok {
|
||||
|
@ -476,45 +623,53 @@ func (s *DataStore) ValidateV2DataEngineEnabled(dataEngineEnabled bool) (ims []*
|
|||
}
|
||||
|
||||
// Check if there is enough hugepages-2Mi capacity for all nodes
|
||||
hugepageRequestedInMiB, err := s.GetSettingWithAutoFillingRO(types.SettingNameV2DataEngineHugepageLimit)
|
||||
hugepageRequestedInMiB, err := s.GetSettingAsIntByDataEngine(types.SettingNameDataEngineHugepageLimit, longhorn.DataEngineTypeV2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
{
|
||||
hugepageRequested := resource.MustParse(hugepageRequestedInMiB.Value + "Mi")
|
||||
// hugepageRequestedInMiB is integer
|
||||
hugepageRequested, err := resource.ParseQuantity(fmt.Sprintf("%dMi", hugepageRequestedInMiB))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse hugepage value %qMi", hugepageRequestedInMiB)
|
||||
}
|
||||
|
||||
_ims, err := s.ListInstanceManagersRO()
|
||||
_ims, err := s.ListInstanceManagersRO()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list instance managers for %v setting update", types.SettingNameV2DataEngine)
|
||||
}
|
||||
|
||||
for _, im := range _ims {
|
||||
if types.IsDataEngineV1(im.Spec.DataEngine) {
|
||||
continue
|
||||
}
|
||||
node, err := s.GetKubernetesNodeRO(im.Spec.NodeID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list instance managers for %v setting update", types.SettingNameV2DataEngine)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get Kubernetes node %v for %v setting update", im.Spec.NodeID, types.SettingNameV2DataEngine)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for _, im := range _ims {
|
||||
node, err := s.GetKubernetesNodeRO(im.Spec.NodeID)
|
||||
if val, ok := node.Labels[types.NodeDisableV2DataEngineLabelKey]; ok && val == types.NodeDisableV2DataEngineLabelKeyTrue {
|
||||
// V2 data engine is disabled on this node, don't worry about hugepages
|
||||
continue
|
||||
}
|
||||
|
||||
if dataEngineEnabled {
|
||||
capacity, ok := node.Status.Capacity["hugepages-2Mi"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to get hugepages-2Mi capacity for node %v", node.Name)
|
||||
}
|
||||
|
||||
hugepageCapacity, err := resource.ParseQuantity(capacity.String())
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "failed to get Kubernetes node %v for %v setting update", im.Spec.NodeID, types.SettingNameV2DataEngine)
|
||||
}
|
||||
|
||||
continue
|
||||
return nil, errors.Wrapf(err, "failed to parse hugepage value %qMi", hugepageRequestedInMiB)
|
||||
}
|
||||
|
||||
if val, ok := node.Labels[types.NodeDisableV2DataEngineLabelKey]; ok && val == types.NodeDisableV2DataEngineLabelKeyTrue {
|
||||
// V2 data engine is disabled on this node, don't worry about hugepages
|
||||
continue
|
||||
}
|
||||
|
||||
if dataEngineEnabled {
|
||||
capacity, ok := node.Status.Capacity["hugepages-2Mi"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to get hugepages-2Mi capacity for node %v", node.Name)
|
||||
}
|
||||
|
||||
hugepageCapacity := resource.MustParse(capacity.String())
|
||||
|
||||
if hugepageCapacity.Cmp(hugepageRequested) < 0 {
|
||||
return nil, errors.Errorf("not enough hugepages-2Mi capacity for node %v, requested %v, capacity %v", node.Name, hugepageRequested.String(), hugepageCapacity.String())
|
||||
}
|
||||
if hugepageCapacity.Cmp(hugepageRequested) < 0 {
|
||||
return nil, errors.Errorf("not enough hugepages-2Mi capacity for node %v, requested %v, capacity %v", node.Name, hugepageRequested.String(), hugepageCapacity.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -522,7 +677,11 @@ func (s *DataStore) ValidateV2DataEngineEnabled(dataEngineEnabled bool) (ims []*
|
|||
return
|
||||
}
|
||||
|
||||
func (s *DataStore) ValidateCPUMask(value string) error {
|
||||
func (s *DataStore) ValidateCPUMask(kubeNode *corev1.Node, value string) error {
|
||||
if value == "" {
|
||||
return fmt.Errorf("failed to validate CPU mask: cannot be empty")
|
||||
}
|
||||
|
||||
// CPU mask must start with 0x
|
||||
cpuMaskRegex := regexp.MustCompile(`^0x[1-9a-fA-F][0-9a-fA-F]*$`)
|
||||
if !cpuMaskRegex.MatchString(value) {
|
||||
|
@ -531,30 +690,76 @@ func (s *DataStore) ValidateCPUMask(value string) error {
|
|||
|
||||
maskValue, err := strconv.ParseUint(value[2:], 16, 64) // skip 0x prefix
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse CPU mask: %s", value)
|
||||
return errors.Wrapf(err, "failed to parse CPU mask %v", value)
|
||||
}
|
||||
|
||||
// Validate the mask value is not larger than the number of available CPUs
|
||||
numCPUs := runtime.NumCPU()
|
||||
numCPUs, err := s.getMinNumCPUsFromAvailableNodes()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get minimum number of CPUs for CPU mask validation")
|
||||
}
|
||||
|
||||
maxCPUMaskValue := (1 << numCPUs) - 1
|
||||
if maskValue > uint64(maxCPUMaskValue) {
|
||||
return fmt.Errorf("CPU mask exceeds the maximum allowed value %v for the current system: %s", maxCPUMaskValue, value)
|
||||
}
|
||||
|
||||
guaranteedInstanceManagerCPU, err := s.GetSettingAsInt(types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU)
|
||||
// CPU mask currently only supports v2 data engine
|
||||
guaranteedInstanceManagerCPUInPercentage, err := s.GetSettingAsFloatByDataEngine(types.SettingNameGuaranteedInstanceManagerCPU, longhorn.DataEngineTypeV2)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get %v setting for CPU mask validation", types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU)
|
||||
return errors.Wrapf(err, "failed to get %v setting for guaranteed instance manager CPU validation for data engine %v",
|
||||
types.SettingNameGuaranteedInstanceManagerCPU, longhorn.DataEngineTypeV2)
|
||||
}
|
||||
|
||||
guaranteedInstanceManagerCPU := float64(kubeNode.Status.Allocatable.Cpu().MilliValue()) * guaranteedInstanceManagerCPUInPercentage
|
||||
|
||||
numMilliCPUsRequrestedByMaskValue := calculateMilliCPUs(maskValue)
|
||||
if numMilliCPUsRequrestedByMaskValue > int(guaranteedInstanceManagerCPU) {
|
||||
return fmt.Errorf("number of CPUs (%v) requested by CPU mask (%v) is larger than the %v setting value (%v)",
|
||||
numMilliCPUsRequrestedByMaskValue, value, types.SettingNameV2DataEngineGuaranteedInstanceManagerCPU, guaranteedInstanceManagerCPU)
|
||||
numMilliCPUsRequrestedByMaskValue, value, types.SettingNameGuaranteedInstanceManagerCPU, guaranteedInstanceManagerCPU)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DataStore) getMinNumCPUsFromAvailableNodes() (int64, error) {
|
||||
kubeNodes, err := s.ListKubeNodesRO()
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "failed to list Kubernetes nodes")
|
||||
}
|
||||
|
||||
// Assign max value to minNumCPUs of the max value of int64
|
||||
minNumCPUs := int64(^uint64(0) >> 1)
|
||||
for _, kubeNode := range kubeNodes {
|
||||
lhNode, err := s.GetNodeRO(kubeNode.Name)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return -1, errors.Wrapf(err, "failed to get Longhorn node %v", kubeNode.Name)
|
||||
}
|
||||
// Skip node that is down, deleted, or missing manager
|
||||
if isUnavailable, err := s.IsNodeDownOrDeletedOrMissingManager(lhNode.Name); err != nil {
|
||||
return -1, errors.Wrapf(err, "failed to check if node %v is down or deleted", lhNode.Name)
|
||||
} else if isUnavailable {
|
||||
continue
|
||||
}
|
||||
// Skip node that disables v2 data engine
|
||||
if val, ok := kubeNode.Labels[types.NodeDisableV2DataEngineLabelKey]; ok {
|
||||
if val == types.NodeDisableV2DataEngineLabelKeyTrue {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
numCPUs := kubeNode.Status.Allocatable.Cpu().Value()
|
||||
if numCPUs < minNumCPUs {
|
||||
minNumCPUs = numCPUs
|
||||
}
|
||||
}
|
||||
|
||||
return minNumCPUs, nil
|
||||
}
|
||||
|
||||
func calculateMilliCPUs(mask uint64) int {
|
||||
// Count the number of set bits in the mask
|
||||
setBits := bits.OnesCount64(mask)
|
||||
|
@ -654,6 +859,11 @@ func (s *DataStore) getSettingRO(name string) (*longhorn.Setting, error) {
|
|||
return s.settingLister.Settings(s.namespace).Get(name)
|
||||
}
|
||||
|
||||
// GetSettingWithAutoFillingRO retrieves a read-only setting from the datastore by its name.
|
||||
// If the setting does not exist, it automatically constructs and returns a default setting
|
||||
// object using the predefined default value from the setting's definition. If the setting
|
||||
// name is not recognized or an unexpected error occurs during retrieval, the function
|
||||
// returns an error.
|
||||
func (s *DataStore) GetSettingWithAutoFillingRO(sName types.SettingName) (*longhorn.Setting, error) {
|
||||
definition, ok := types.GetSettingDefinition(sName)
|
||||
if !ok {
|
||||
|
@ -727,6 +937,44 @@ func (s *DataStore) GetSettingValueExisted(sName types.SettingName) (string, err
|
|||
return setting.Value, nil
|
||||
}
|
||||
|
||||
// GetSettingValueExistedByDataEngine returns the value of the given setting name for a specific data engine.
|
||||
// Returns error if the setting does not have a value for the given data engine.
|
||||
func (s *DataStore) GetSettingValueExistedByDataEngine(settingName types.SettingName, dataEngine longhorn.DataEngineType) (string, error) {
|
||||
definition, ok := types.GetSettingDefinition(settingName)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
|
||||
if !definition.DataEngineSpecific {
|
||||
return s.GetSettingValueExisted(settingName)
|
||||
}
|
||||
|
||||
if !types.IsJSONFormat(definition.Default) {
|
||||
return "", fmt.Errorf("setting %v does not have a JSON-formatted default value", settingName)
|
||||
}
|
||||
|
||||
setting, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
values, err := types.ParseDataEngineSpecificSetting(definition, setting.Value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
value, ok := values[dataEngine]
|
||||
if ok {
|
||||
if strValue, ok := value.(string); ok {
|
||||
return strValue, nil
|
||||
} else {
|
||||
return fmt.Sprintf("%v", value), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("setting %v does not have a value for data engine %v", settingName, dataEngine)
|
||||
}
|
||||
|
||||
// ListSettings lists all Settings in the namespace, and fill with default
|
||||
// values of any missing entry
|
||||
func (s *DataStore) ListSettings() (map[types.SettingName]*longhorn.Setting, error) {
|
||||
|
@ -785,7 +1033,7 @@ func (s *DataStore) GetAutoBalancedReplicasSetting(volume *longhorn.Volume, logg
|
|||
|
||||
var err error
|
||||
if setting == "" {
|
||||
globalSetting, _ := s.GetSettingValueExisted(types.SettingNameReplicaAutoBalance)
|
||||
globalSetting, _ := s.GetSettingValueExistedByDataEngine(types.SettingNameReplicaAutoBalance, volume.Spec.DataEngine)
|
||||
|
||||
if globalSetting == string(longhorn.ReplicaAutoBalanceIgnored) {
|
||||
globalSetting = string(longhorn.ReplicaAutoBalanceDisabled)
|
||||
|
@ -812,20 +1060,9 @@ func (s *DataStore) GetVolumeSnapshotDataIntegrity(volumeName string) (longhorn.
|
|||
return volume.Spec.SnapshotDataIntegrity, nil
|
||||
}
|
||||
|
||||
var dataIntegrity string
|
||||
switch volume.Spec.DataEngine {
|
||||
case longhorn.DataEngineTypeV1:
|
||||
dataIntegrity, err = s.GetSettingValueExisted(types.SettingNameSnapshotDataIntegrity)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to assert %v value", types.SettingNameSnapshotDataIntegrity)
|
||||
}
|
||||
case longhorn.DataEngineTypeV2:
|
||||
dataIntegrity, err = s.GetSettingValueExisted(types.SettingNameV2DataEngineSnapshotDataIntegrity)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to assert %v value", types.SettingNameV2DataEngineSnapshotDataIntegrity)
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unknown data engine type %v for snapshot data integrity get", volume.Spec.DataEngine)
|
||||
dataIntegrity, err := s.GetSettingValueExistedByDataEngine(types.SettingNameSnapshotDataIntegrity, volume.Spec.DataEngine)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to assert %v value for data engine %v", types.SettingNameSnapshotDataIntegrity, volume.Spec.DataEngine)
|
||||
}
|
||||
|
||||
return longhorn.SnapshotDataIntegrity(dataIntegrity), nil
|
||||
|
@ -3167,6 +3404,22 @@ func (s *DataStore) IsNodeSchedulable(name string) bool {
|
|||
return nodeSchedulableCondition.Status == longhorn.ConditionStatusTrue
|
||||
}
|
||||
|
||||
func (s *DataStore) IsNodeHasDiskUUID(nodeName, diskUUID string) (bool, error) {
|
||||
node, err := s.GetNodeRO(nodeName)
|
||||
if err != nil {
|
||||
if ErrorIsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for _, diskStatus := range node.Status.DiskStatus {
|
||||
if diskStatus.DiskUUID == diskUUID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func getNodeSelector(nodeName string) (labels.Selector, error) {
|
||||
return metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
|
@ -3515,6 +3768,81 @@ func GetOwnerReferencesForNode(node *longhorn.Node) []metav1.OwnerReference {
|
|||
}
|
||||
}
|
||||
|
||||
// GetSettingAsFloat gets the setting for the given name, returns as float
|
||||
// Returns error if the definition type is not float
|
||||
func (s *DataStore) GetSettingAsFloat(settingName types.SettingName) (float64, error) {
|
||||
definition, ok := types.GetSettingDefinition(settingName)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
settings, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
value := settings.Value
|
||||
|
||||
if definition.Type == types.SettingTypeFloat {
|
||||
result, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("the %v setting value couldn't change to float, value is %v ", string(settingName), value)
|
||||
}
|
||||
|
||||
// GetSettingAsFloatByDataEngine retrieves the float64 value of the given setting for the specified
|
||||
// DataEngineType. If the setting is not data-engine-specific, it falls back to GetSettingAsFloat.
|
||||
// For data-engine-specific settings, it expects the setting value to be in JSON format mapping
|
||||
// data engine types to float values.
|
||||
//
|
||||
// If the setting is not defined, not in the expected format, or the value for the given data engine
|
||||
// is missing or not a float, an error is returned.
|
||||
//
|
||||
// Example JSON format for a data-engine-specific setting:
|
||||
//
|
||||
// {"v1": 50.0, "v2": 100.0}
|
||||
//
|
||||
// Returns the float value for the provided data engine type, or an error if validation or parsing fails.
|
||||
func (s *DataStore) GetSettingAsFloatByDataEngine(settingName types.SettingName, dataEngine longhorn.DataEngineType) (float64, error) {
|
||||
definition, ok := types.GetSettingDefinition(settingName)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
|
||||
if !definition.DataEngineSpecific {
|
||||
return s.GetSettingAsFloat(settingName)
|
||||
}
|
||||
if !types.IsJSONFormat(definition.Default) {
|
||||
return -1, fmt.Errorf("setting %v does not have a JSON-formatted default value", settingName)
|
||||
}
|
||||
|
||||
// Get the setting value, which may be auto-filled
|
||||
setting, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Parse the setting value as a map of floats map[dataEngine]float64]{...}
|
||||
values, err := types.ParseDataEngineSpecificSetting(definition, setting.Value)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
value, ok := values[dataEngine]
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("the %v setting value for data engine %v is not defined, value is %v", string(settingName), dataEngine, values)
|
||||
}
|
||||
|
||||
floatValue, ok := value.(float64)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("the %v setting value for data engine %v is not a float, value is %v", string(settingName), dataEngine, value)
|
||||
}
|
||||
|
||||
return floatValue, nil
|
||||
}
|
||||
|
||||
// GetSettingAsInt gets the setting for the given name, returns as integer
|
||||
// Returns error if the definition type is not integer
|
||||
func (s *DataStore) GetSettingAsInt(settingName types.SettingName) (int64, error) {
|
||||
|
@ -3539,6 +3867,55 @@ func (s *DataStore) GetSettingAsInt(settingName types.SettingName) (int64, error
|
|||
return -1, fmt.Errorf("the %v setting value couldn't change to integer, value is %v ", string(settingName), value)
|
||||
}
|
||||
|
||||
// GetSettingAsIntByDataEngine retrieves the int64 value of the given setting for the specified
|
||||
// DataEngineType. If the setting is not data-engine-specific, it falls back to GetSettingAsInt.
|
||||
// For data-engine-specific settings, it expects the setting value to be in JSON format mapping
|
||||
// data engine types to integer values.
|
||||
//
|
||||
// If the setting is not defined, not in the expected format, or the value for the given data engine
|
||||
// is missing or not an integer, an error is returned.
|
||||
//
|
||||
// Example JSON format for a data-engine-specific setting:
|
||||
//
|
||||
// {"v1": 1024, "v2": 2048}
|
||||
//
|
||||
// Returns the int64 value for the provided data engine type, or an error if validation or parsing fails.
|
||||
func (s *DataStore) GetSettingAsIntByDataEngine(settingName types.SettingName, dataEngine longhorn.DataEngineType) (int64, error) {
|
||||
definition, ok := types.GetSettingDefinition(settingName)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
|
||||
if !definition.DataEngineSpecific {
|
||||
return s.GetSettingAsInt(settingName)
|
||||
}
|
||||
if !types.IsJSONFormat(definition.Default) {
|
||||
return -1, fmt.Errorf("setting %v does not have a JSON-formatted default value", settingName)
|
||||
}
|
||||
|
||||
setting, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
values, err := types.ParseDataEngineSpecificSetting(definition, setting.Value)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
value, ok := values[dataEngine]
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("the %v setting value for data engine %v is not defined, value is %v", string(settingName), dataEngine, values)
|
||||
}
|
||||
|
||||
intValue, ok := value.(int64)
|
||||
if !ok {
|
||||
return -1, fmt.Errorf("the %v setting value for data engine %v is not an integer, value is %v", string(settingName), dataEngine, value)
|
||||
}
|
||||
|
||||
return intValue, nil
|
||||
}
|
||||
|
||||
// GetSettingAsBool gets the setting for the given name, returns as boolean
|
||||
// Returns error if the definition type is not boolean
|
||||
func (s *DataStore) GetSettingAsBool(settingName types.SettingName) (bool, error) {
|
||||
|
@ -3546,11 +3923,11 @@ func (s *DataStore) GetSettingAsBool(settingName types.SettingName) (bool, error
|
|||
if !ok {
|
||||
return false, fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
settings, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
setting, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
value := settings.Value
|
||||
value := setting.Value
|
||||
|
||||
if definition.Type == types.SettingTypeBool {
|
||||
result, err := strconv.ParseBool(value)
|
||||
|
@ -3563,6 +3940,55 @@ func (s *DataStore) GetSettingAsBool(settingName types.SettingName) (bool, error
|
|||
return false, fmt.Errorf("the %v setting value couldn't be converted to bool, value is %v ", string(settingName), value)
|
||||
}
|
||||
|
||||
// GetSettingAsBoolByDataEngine retrieves the bool value of the given setting for the specified
|
||||
// DataEngineType. If the setting is not data-engine-specific, it falls back to GetSettingAsBool.
|
||||
// For data-engine-specific settings, it expects the setting value to be in JSON format mapping
|
||||
// data engine types to boolean values.
|
||||
//
|
||||
// If the setting is not defined, not in the expected format, or the value for the given data engine
|
||||
// is missing or not a boolean, an error is returned.
|
||||
//
|
||||
// Example JSON format for a data-engine-specific setting:
|
||||
//
|
||||
// {"v1": true, "v2": false}
|
||||
//
|
||||
// Returns the boolean value for the provided data engine type, or an error if validation or parsing fails.
|
||||
func (s *DataStore) GetSettingAsBoolByDataEngine(settingName types.SettingName, dataEngine longhorn.DataEngineType) (bool, error) {
|
||||
definition, ok := types.GetSettingDefinition(settingName)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("setting %v is not supported", settingName)
|
||||
}
|
||||
|
||||
if !definition.DataEngineSpecific {
|
||||
return s.GetSettingAsBool(settingName)
|
||||
}
|
||||
if !types.IsJSONFormat(definition.Default) {
|
||||
return false, fmt.Errorf("setting %v does not have a JSON-formatted default value", settingName)
|
||||
}
|
||||
|
||||
setting, err := s.GetSettingWithAutoFillingRO(settingName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
values, err := types.ParseDataEngineSpecificSetting(definition, setting.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
value, ok := values[dataEngine]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("the %v setting value for data engine %v is not defined, value is %v", string(settingName), dataEngine, values)
|
||||
}
|
||||
|
||||
boolValue, ok := value.(bool)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("the %v setting value for data engine %v is not an boolean, value is %v", string(settingName), dataEngine, value)
|
||||
}
|
||||
|
||||
return boolValue, nil
|
||||
}
|
||||
|
||||
// GetSettingImagePullPolicy get the setting and return one of Kubernetes ImagePullPolicy definition
|
||||
// Returns error if the ImagePullPolicy is invalid
|
||||
func (s *DataStore) GetSettingImagePullPolicy() (corev1.PullPolicy, error) {
|
||||
|
@ -5446,6 +5872,21 @@ func (s *DataStore) GetLHVolumeAttachmentByVolumeName(volName string) (*longhorn
|
|||
return s.GetLHVolumeAttachment(vaName)
|
||||
}
|
||||
|
||||
// ListLHVolumeAttachments returns all VolumeAttachments in the cluster
|
||||
func (s *DataStore) ListLHVolumeAttachments() ([]*longhorn.VolumeAttachment, error) {
|
||||
vaList, err := s.lhVolumeAttachmentLister.VolumeAttachments(s.namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make([]*longhorn.VolumeAttachment, 0, len(vaList))
|
||||
for _, va := range vaList {
|
||||
result = append(result, va.DeepCopy())
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListSupportBundles returns an object contains all SupportBundles
|
||||
func (s *DataStore) ListSupportBundles() (map[string]*longhorn.SupportBundle, error) {
|
||||
itemMap := make(map[string]*longhorn.SupportBundle)
|
||||
|
@ -6190,6 +6631,10 @@ func (s *DataStore) GetRunningInstanceManagerByNodeRO(node string, dataEngine lo
|
|||
}
|
||||
|
||||
func (s *DataStore) GetFreezeFilesystemForSnapshotSetting(e *longhorn.Engine) (bool, error) {
|
||||
if types.IsDataEngineV2(e.Spec.DataEngine) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
volume, err := s.GetVolumeRO(e.Spec.VolumeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -6199,7 +6644,7 @@ func (s *DataStore) GetFreezeFilesystemForSnapshotSetting(e *longhorn.Engine) (b
|
|||
return volume.Spec.FreezeFilesystemForSnapshot == longhorn.FreezeFilesystemForSnapshotEnabled, nil
|
||||
}
|
||||
|
||||
return s.GetSettingAsBool(types.SettingNameFreezeFilesystemForSnapshot)
|
||||
return s.GetSettingAsBoolByDataEngine(types.SettingNameFreezeFilesystemForSnapshot, e.Spec.DataEngine)
|
||||
}
|
||||
|
||||
func (s *DataStore) CanPutBackingImageOnDisk(backingImage *longhorn.BackingImage, diskUUID string) (bool, error) {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -334,5 +335,6 @@ func (m *BackupMonitor) Close() {
|
|||
func getBackupParameters(backup *longhorn.Backup) map[string]string {
|
||||
parameters := map[string]string{}
|
||||
parameters[lhbackup.LonghornBackupParameterBackupMode] = string(backup.Spec.BackupMode)
|
||||
parameters[lhbackup.LonghornBackupParameterBackupBlockSize] = strconv.FormatInt(backup.Spec.BackupBlockSize, 10)
|
||||
return parameters
|
||||
}
|
||||
|
|
36
go.mod
36
go.mod
|
@ -2,7 +2,7 @@ module github.com/longhorn/longhorn-manager
|
|||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.5
|
||||
toolchain go1.24.6
|
||||
|
||||
// Replace directives are required for dependencies in this section because:
|
||||
// - This module imports k8s.io/kubernetes.
|
||||
|
@ -55,8 +55,9 @@ replace (
|
|||
)
|
||||
|
||||
require (
|
||||
github.com/cockroachdb/errors v1.12.0
|
||||
github.com/container-storage-interface/spec v1.11.0
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/go-connections v0.6.0
|
||||
github.com/go-co-op/gocron v1.37.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
|
@ -65,11 +66,11 @@ require (
|
|||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.22.0
|
||||
github.com/longhorn/backing-image-manager v1.9.1
|
||||
github.com/longhorn/backupstore v0.0.0-20250728024545-3d2abce3e32f
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250728015531-59051d40ad88
|
||||
github.com/longhorn/longhorn-engine v1.9.1
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
@ -81,9 +82,9 @@ require (
|
|||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli v1.22.17
|
||||
golang.org/x/mod v0.26.0
|
||||
golang.org/x/net v0.42.0
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/mod v0.27.0
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/sys v0.35.0
|
||||
golang.org/x/time v0.12.0
|
||||
google.golang.org/grpc v1.74.2
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
|
@ -102,18 +103,22 @@ require (
|
|||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
@ -139,7 +144,6 @@ require (
|
|||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gorilla/context v1.1.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@ -169,12 +173,12 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/term v0.33.0 // indirect
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/text v0.28.0
|
||||
google.golang.org/protobuf v1.36.7
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/apiserver v0.33.3 // indirect
|
||||
k8s.io/component-base v0.33.3 // indirect
|
||||
|
|
72
go.sum
72
go.sum
|
@ -14,6 +14,12 @@ github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK
|
|||
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo=
|
||||
github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM=
|
||||
github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
|
@ -27,8 +33,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
|
||||
|
@ -41,8 +47,12 @@ github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34
|
|||
github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo=
|
||||
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
|
||||
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
|
||||
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
|
||||
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0=
|
||||
github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
|
@ -111,22 +121,22 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
|
|||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/longhorn/backing-image-manager v1.9.1 h1:amT5BDkBJnnmlJYfPfA2m0o3zdvArf7e/DSsbgOquX0=
|
||||
github.com/longhorn/backing-image-manager v1.9.1/go.mod h1:a9UGK3bsd1Gj0kbN5tKev5/uaSwjOvoHqZzLqMMqnU0=
|
||||
github.com/longhorn/backupstore v0.0.0-20250728024545-3d2abce3e32f h1:ackmMlq7H9YEQGSYKQnKiJkjdYh5SlKcMj3Ex/pswbc=
|
||||
github.com/longhorn/backupstore v0.0.0-20250728024545-3d2abce3e32f/go.mod h1:j5TiUyvRBYaSaPY/p6GIFOk1orfWcngk9hIWxDDJ5mg=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814 h1:9qcRV8oIOS2J+QWLIk//BxsTMTaM63LceKCXTPG3f8Y=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250725014231-2c0bac610814/go.mod h1:c7BqzWJP4iuRwl/Di+OPGIfC18SoIXy5+SokJsmmKMg=
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297 h1:KVnOHFT3wuwgyhV7/Rue8NMt13NkpIkZ3B7eVR0C8yM=
|
||||
github.com/longhorn/backupstore v0.0.0-20250804022317-794abf817297/go.mod h1:j5TiUyvRBYaSaPY/p6GIFOk1orfWcngk9hIWxDDJ5mg=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942 h1:H9hPMP02ZJSzXa7/0TOG3HQAhieDAGQuqnePSlj+BbQ=
|
||||
github.com/longhorn/go-common-libs v0.0.0-20250812101836-470cb7301942/go.mod h1:fuYzrb6idZgLrh8yePy6fA+LVB+z5fl4zZbBAU09+0g=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa h1:J0DyOSate7Vf+zlHYB5WrCTWJfshEsSJDp161GjBmhI=
|
||||
github.com/longhorn/go-iscsi-helper v0.0.0-20250713130221-69ce6f3960fa/go.mod h1:fN9H878mLjAqSbPxEXpOCwvTlt43h+/CZxXrQlX/iMQ=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250728015531-59051d40ad88 h1:Rgv8mxCYUwrU6Duki/s07XO4WhbLDIajZFunHrhc0i0=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250728015531-59051d40ad88/go.mod h1:xphXU+fnlJeIi/uGlANhHcq7IBEheaRwPQJuGFyhX6w=
|
||||
github.com/longhorn/longhorn-engine v1.9.1 h1:DlkcXhwmR2b6ATwZeaQr8hG4i8Mf4SLcXcIzgnl6jaI=
|
||||
github.com/longhorn/longhorn-engine v1.9.1/go.mod h1:40+Fw+/PV78DDFYWXUfJHmrZ8QGfFaisC9m9YRnw4xg=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b h1:IzKSLNxFgDA/5ZtVJnt5CkgAfjyendEJsr2+fRMAa18=
|
||||
github.com/longhorn/go-spdk-helper v0.0.3-0.20250809103353-695fd752a98b/go.mod h1:ypwTG96myWDEkea5PxNzzii9CPm/TI8duZwPGxUNsvo=
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139 h1:qeR/Rt/Mmahgzf2Df2m00BLZibYZYs5+iTTvbHFfAXA=
|
||||
github.com/longhorn/longhorn-engine v1.10.0-dev-20250713.0.20250728071833-3932ded2f139/go.mod h1:kl2QVpLZeMoYYSAVOd2IDiP7JeLQFX/fujLA9MdyK6o=
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178 h1:JO7uffDjHufJZZxvXLdoLpIWUl1/QszoZlx9dzCRNKY=
|
||||
github.com/longhorn/longhorn-instance-manager v1.10.0-dev-20250629.0.20250711075830-f3729b840178/go.mod h1:dLZTouISlm8sUpSDDb4xbnSEbZOBnKCVFMf46Ybpr44=
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1 h1:ObRP8lnNOncRg9podwrPrqObBXJsQDlPfNwslxkBRhM=
|
||||
github.com/longhorn/longhorn-share-manager v1.9.1/go.mod h1:vYqc2o+6xTlgdlweIeED4Do/n+0/4I3AbD6jQ5OHfcg=
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1 h1:Lox/NlebN9jOc9JXokB270iyeMlyUw9gRePBy5LKwz0=
|
||||
github.com/longhorn/types v0.0.0-20250710112743-e3a1e9e2a9c1/go.mod h1:3bhH8iUZGZT3kA/B1DYMGzpdzfacqeexOt4SHo4/C2I=
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8 h1:NkYbz5Bs+zNW7l3lS9xG9ktUPcNCgmG1tEYzOCk7rdM=
|
||||
github.com/longhorn/types v0.0.0-20250810143617-8a478c078cb8/go.mod h1:jbvGQ66V//M9Jp2DC6k+BR74QxSK0Hp/L2FRJ/SBxFA=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
|
@ -155,6 +165,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -253,21 +265,21 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
|
||||
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -284,22 +296,22 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -308,8 +320,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:
|
|||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
|
|
@ -684,6 +684,15 @@ spec:
|
|||
spec:
|
||||
description: BackupSpec defines the desired state of the Longhorn backup
|
||||
properties:
|
||||
backupBlockSize:
|
||||
description: The backup block size. 0 means the legacy default size
|
||||
2MiB, and -1 indicate the block size is invalid.
|
||||
enum:
|
||||
- "-1"
|
||||
- "2097152"
|
||||
- "16777216"
|
||||
format: int64
|
||||
type: string
|
||||
backupMode:
|
||||
description: |-
|
||||
The backup mode of this backup.
|
||||
|
@ -1551,6 +1560,8 @@ spec:
|
|||
type: string
|
||||
started:
|
||||
type: boolean
|
||||
starting:
|
||||
type: boolean
|
||||
storageIP:
|
||||
type: string
|
||||
ublkID:
|
||||
|
@ -2555,6 +2566,8 @@ spec:
|
|||
type: boolean
|
||||
started:
|
||||
type: boolean
|
||||
starting:
|
||||
type: boolean
|
||||
storageIP:
|
||||
type: string
|
||||
ublkID:
|
||||
|
@ -2632,7 +2645,10 @@ spec:
|
|||
- applied
|
||||
type: object
|
||||
value:
|
||||
description: The value of the setting.
|
||||
description: |-
|
||||
The value of the setting.
|
||||
- It can be a non-JSON formatted string that is applied to all the applicable data engines listed in the setting definition.
|
||||
- It can be a JSON formatted string that contains values for applicable data engines listed in the setting definition's Default.
|
||||
type: string
|
||||
required:
|
||||
- value
|
||||
|
@ -3457,6 +3473,14 @@ spec:
|
|||
type: string
|
||||
backingImage:
|
||||
type: string
|
||||
backupBlockSize:
|
||||
description: BackupBlockSize indicate the block size to create backups.
|
||||
The block size is immutable.
|
||||
enum:
|
||||
- "2097152"
|
||||
- "16777216"
|
||||
format: int64
|
||||
type: string
|
||||
backupCompressionMethod:
|
||||
enum:
|
||||
- none
|
||||
|
@ -3549,9 +3573,10 @@ spec:
|
|||
- disabled
|
||||
type: string
|
||||
replicaRebuildingBandwidthLimit:
|
||||
description: ReplicaRebuildingBandwidthLimit limits the write bandwidth
|
||||
(in megabytes per second) on the destination replica during rebuilding.
|
||||
Set to 0 to disable bandwidth limiting.
|
||||
description: ReplicaRebuildingBandwidthLimit controls the maximum
|
||||
write bandwidth (in megabytes per second) allowed on the destination
|
||||
replica during the rebuilding process. Set this value to 0 to disable
|
||||
bandwidth limiting.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
|
|
|
@ -51,6 +51,11 @@ type BackupSpec struct {
|
|||
// Can be "full" or "incremental"
|
||||
// +optional
|
||||
BackupMode BackupMode `json:"backupMode"`
|
||||
// The backup block size. 0 means the legacy default size 2MiB, and -1 indicate the block size is invalid.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Enum="-1";"2097152";"16777216"
|
||||
// +optional
|
||||
BackupBlockSize int64 `json:"backupBlockSize,string"`
|
||||
}
|
||||
|
||||
// BackupStatus defines the observed state of the Longhorn backup
|
||||
|
|
|
@ -102,6 +102,8 @@ type InstanceStatus struct {
|
|||
// +optional
|
||||
Port int `json:"port"`
|
||||
// +optional
|
||||
Starting bool `json:"starting"`
|
||||
// +optional
|
||||
Started bool `json:"started"`
|
||||
// +optional
|
||||
LogFetched bool `json:"logFetched"`
|
||||
|
|
|
@ -17,6 +17,8 @@ type Setting struct {
|
|||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// The value of the setting.
|
||||
// - It can be a non-JSON formatted string that is applied to all the applicable data engines listed in the setting definition.
|
||||
// - It can be a JSON formatted string that contains values for applicable data engines listed in the setting definition's Default.
|
||||
Value string `json:"value"`
|
||||
|
||||
// The status of the setting.
|
||||
|
|
|
@ -285,6 +285,11 @@ type VolumeSpec struct {
|
|||
// +kubebuilder:validation:Enum=none;lz4;gzip
|
||||
// +optional
|
||||
BackupCompressionMethod BackupCompressionMethod `json:"backupCompressionMethod"`
|
||||
// BackupBlockSize indicate the block size to create backups. The block size is immutable.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Enum="2097152";"16777216"
|
||||
// +optional
|
||||
BackupBlockSize int64 `json:"backupBlockSize,string"`
|
||||
// +kubebuilder:validation:Enum=v1;v2
|
||||
// +optional
|
||||
DataEngine DataEngineType `json:"dataEngine"`
|
||||
|
@ -306,10 +311,10 @@ type VolumeSpec struct {
|
|||
// - disabled: Disable offline rebuilding for this volume, regardless of the global setting
|
||||
// +optional
|
||||
OfflineRebuilding VolumeOfflineRebuilding `json:"offlineRebuilding"`
|
||||
// ReplicaRebuildingBandwidthLimit limits the write bandwidth (in megabytes per second) on the destination replica during rebuilding. Set to 0 to disable bandwidth limiting.
|
||||
// ReplicaRebuildingBandwidthLimit controls the maximum write bandwidth (in megabytes per second) allowed on the destination replica during the rebuilding process. Set this value to 0 to disable bandwidth limiting.
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +optional
|
||||
ReplicaRebuildingBandwidthLimit int64 `json:"replicaRebuildingBandwidthLimit,omitempty"`
|
||||
ReplicaRebuildingBandwidthLimit int64 `json:"replicaRebuildingBandwidthLimit"`
|
||||
}
|
||||
|
||||
// VolumeStatus defines the observed state of the Longhorn volume
|
||||
|
|
|
@ -30,6 +30,7 @@ type BackupSpecApplyConfiguration struct {
|
|||
SnapshotName *string `json:"snapshotName,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
BackupMode *longhornv1beta2.BackupMode `json:"backupMode,omitempty"`
|
||||
BackupBlockSize *int64 `json:"backupBlockSize,omitempty"`
|
||||
}
|
||||
|
||||
// BackupSpecApplyConfiguration constructs a declarative configuration of the BackupSpec type for use with
|
||||
|
@ -75,3 +76,11 @@ func (b *BackupSpecApplyConfiguration) WithBackupMode(value longhornv1beta2.Back
|
|||
b.BackupMode = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithBackupBlockSize sets the BackupBlockSize field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the BackupBlockSize field is set to the value of the last call.
|
||||
func (b *BackupSpecApplyConfiguration) WithBackupBlockSize(value int64) *BackupSpecApplyConfiguration {
|
||||
b.BackupBlockSize = &value
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ type InstanceStatusApplyConfiguration struct {
|
|||
IP *string `json:"ip,omitempty"`
|
||||
StorageIP *string `json:"storageIP,omitempty"`
|
||||
Port *int `json:"port,omitempty"`
|
||||
Starting *bool `json:"starting,omitempty"`
|
||||
Started *bool `json:"started,omitempty"`
|
||||
LogFetched *bool `json:"logFetched,omitempty"`
|
||||
SalvageExecuted *bool `json:"salvageExecuted,omitempty"`
|
||||
|
@ -102,6 +103,14 @@ func (b *InstanceStatusApplyConfiguration) WithPort(value int) *InstanceStatusAp
|
|||
return b
|
||||
}
|
||||
|
||||
// WithStarting sets the Starting field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Starting field is set to the value of the last call.
|
||||
func (b *InstanceStatusApplyConfiguration) WithStarting(value bool) *InstanceStatusApplyConfiguration {
|
||||
b.Starting = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStarted sets the Started field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Started field is set to the value of the last call.
|
||||
|
|
|
@ -53,6 +53,7 @@ type VolumeSpecApplyConfiguration struct {
|
|||
ReplicaAutoBalance *longhornv1beta2.ReplicaAutoBalance `json:"replicaAutoBalance,omitempty"`
|
||||
SnapshotDataIntegrity *longhornv1beta2.SnapshotDataIntegrity `json:"snapshotDataIntegrity,omitempty"`
|
||||
BackupCompressionMethod *longhornv1beta2.BackupCompressionMethod `json:"backupCompressionMethod,omitempty"`
|
||||
BackupBlockSize *int64 `json:"backupBlockSize,omitempty"`
|
||||
DataEngine *longhornv1beta2.DataEngineType `json:"dataEngine,omitempty"`
|
||||
SnapshotMaxCount *int `json:"snapshotMaxCount,omitempty"`
|
||||
SnapshotMaxSize *int64 `json:"snapshotMaxSize,omitempty"`
|
||||
|
@ -296,6 +297,14 @@ func (b *VolumeSpecApplyConfiguration) WithBackupCompressionMethod(value longhor
|
|||
return b
|
||||
}
|
||||
|
||||
// WithBackupBlockSize sets the BackupBlockSize field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the BackupBlockSize field is set to the value of the last call.
|
||||
func (b *VolumeSpecApplyConfiguration) WithBackupBlockSize(value int64) *VolumeSpecApplyConfiguration {
|
||||
b.BackupBlockSize = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDataEngine sets the DataEngine field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the DataEngine field is set to the value of the last call.
|
||||
|
|
|
@ -191,6 +191,7 @@ func (m *VolumeManager) Create(name string, spec *longhorn.VolumeSpec, recurring
|
|||
SnapshotMaxCount: spec.SnapshotMaxCount,
|
||||
SnapshotMaxSize: spec.SnapshotMaxSize,
|
||||
BackupCompressionMethod: spec.BackupCompressionMethod,
|
||||
BackupBlockSize: spec.BackupBlockSize,
|
||||
UnmapMarkSnapChainRemoved: spec.UnmapMarkSnapChainRemoved,
|
||||
ReplicaSoftAntiAffinity: spec.ReplicaSoftAntiAffinity,
|
||||
ReplicaZoneSoftAntiAffinity: spec.ReplicaZoneSoftAntiAffinity,
|
||||
|
@ -1195,6 +1196,32 @@ func (m *VolumeManager) UpdateSnapshotMaxSize(name string, snapshotMaxSize int64
|
|||
return v, nil
|
||||
}
|
||||
|
||||
func (m *VolumeManager) UpdateReplicaRebuildingBandwidthLimit(name string, replicaRebuildingBandwidthLimit int64) (v *longhorn.Volume, err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "unable to update field ReplicaRebuildingBandwidthLimit for volume %s", name)
|
||||
}()
|
||||
|
||||
v, err = m.ds.GetVolume(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v.Spec.ReplicaRebuildingBandwidthLimit == replicaRebuildingBandwidthLimit {
|
||||
logrus.Debugf("Volume %s already set field ReplicaRebuildingBandwidthLimit to %d", v.Name, replicaRebuildingBandwidthLimit)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
oldReplicaRebuildingBandwidthLimit := v.Spec.ReplicaRebuildingBandwidthLimit
|
||||
v.Spec.ReplicaRebuildingBandwidthLimit = replicaRebuildingBandwidthLimit
|
||||
v, err = m.ds.UpdateVolume(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Infof("Updated volume %s field ReplicaRebuildingBandwidthLimit from %d to %d", v.Name, oldReplicaRebuildingBandwidthLimit, replicaRebuildingBandwidthLimit)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (m *VolumeManager) restoreBackingImage(backupTargetName, biName, secret, secretNamespace, dataEngine string) error {
|
||||
if secret != "" || secretNamespace != "" {
|
||||
_, err := m.ds.GetSecretRO(secretNamespace, secret)
|
||||
|
|
|
@ -7,3 +7,7 @@ import (
|
|||
func (m *VolumeManager) GetVolumeAttachment(volumeName string) (*longhorn.VolumeAttachment, error) {
|
||||
return m.ds.GetLHVolumeAttachmentByVolumeName(volumeName)
|
||||
}
|
||||
|
||||
func (m *VolumeManager) ListVolumeAttachment() ([]*longhorn.VolumeAttachment, error) {
|
||||
return m.ds.ListLHVolumeAttachments()
|
||||
}
|
||||
|
|
|
@ -457,7 +457,7 @@ func (rcs *ReplicaScheduler) filterNodeDisksForReplica(node *longhorn.Node, disk
|
|||
if requireSchedulingCheck {
|
||||
info, err := rcs.GetDiskSchedulingInfo(diskSpec, diskStatus)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get settings when scheduling replica: %v", err)
|
||||
logrus.WithError(err).Error("Failed to get settings when scheduling replica")
|
||||
multiError.Append(util.NewMultiError(longhorn.ErrorReplicaScheduleSchedulingSettingsRetrieveFailed))
|
||||
return preferredDisks, multiError
|
||||
}
|
||||
|
@ -614,14 +614,18 @@ func filterActiveReplicas(replicas map[string]*longhorn.Replica) map[string]*lon
|
|||
}
|
||||
|
||||
func (rcs *ReplicaScheduler) CheckAndReuseFailedReplica(replicas map[string]*longhorn.Replica, volume *longhorn.Volume, hardNodeAffinity string) (*longhorn.Replica, error) {
|
||||
// No need the check for v1 data engine since the v1 data engine can reuse failed replicas
|
||||
// for delta rebuilding although the fast replica rebuilding is not enabled.
|
||||
if types.IsDataEngineV2(volume.Spec.DataEngine) {
|
||||
V2DataEngineFastReplicaRebuilding, err := rcs.ds.GetSettingAsBool(types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
fastReplicaRebuilding, err := rcs.ds.GetSettingAsBoolByDataEngine(types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to get the setting %v, will consider it as false", types.SettingDefinitionV2DataEngineFastReplicaRebuilding)
|
||||
V2DataEngineFastReplicaRebuilding = false
|
||||
logrus.WithError(err).Warnf("Failed to get %v setting for data engine %v, will consider it as false",
|
||||
types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
fastReplicaRebuilding = false
|
||||
}
|
||||
if !V2DataEngineFastReplicaRebuilding {
|
||||
logrus.Infof("Skip checking and reusing replicas for volume %v since setting %v is not enabled", volume.Name, types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
if !fastReplicaRebuilding {
|
||||
logrus.Infof("Skip checking and reusing replicas for volume %v since setting %v for data engine %v is not enabled",
|
||||
volume.Name, types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
@ -714,14 +718,18 @@ func (rcs *ReplicaScheduler) RequireNewReplica(replicas map[string]*longhorn.Rep
|
|||
return 0
|
||||
}
|
||||
|
||||
// No need the check for v1 data engine since the v1 data engine can reuse failed replicas
|
||||
// for delta rebuilding although the fast replica rebuilding is not enabled.
|
||||
if types.IsDataEngineV2(volume.Spec.DataEngine) {
|
||||
V2DataEngineFastReplicaRebuilding, err := rcs.ds.GetSettingAsBool(types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
fastReplicaRebuilding, err := rcs.ds.GetSettingAsBoolByDataEngine(types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to get the setting %v, will consider it as false", types.SettingDefinitionV2DataEngineFastReplicaRebuilding)
|
||||
V2DataEngineFastReplicaRebuilding = false
|
||||
logrus.WithError(err).Warnf("Failed to get %v setting for data engine %v, will consider it as false",
|
||||
types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
fastReplicaRebuilding = false
|
||||
}
|
||||
if !V2DataEngineFastReplicaRebuilding {
|
||||
logrus.Infof("Skip checking potentially reusable replicas for volume %v since setting %v is not enabled", volume.Name, types.SettingNameV2DataEngineFastReplicaRebuilding)
|
||||
if !fastReplicaRebuilding {
|
||||
logrus.Infof("Skip checking potentially reusable replicas for volume %v since setting %v for data engine %v is not enabled",
|
||||
volume.Name, types.SettingNameFastReplicaRebuildEnabled, volume.Spec.DataEngine)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
|
1935
types/setting.go
1935
types/setting.go
File diff suppressed because it is too large
Load Diff
|
@ -885,7 +885,7 @@ func ValidateMinNumberOfBackingIamgeCopies(number int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func ValidateV2DataEngineLogFlags(flags string) error {
|
||||
func ValidateDataEngineLogFlags(flags string) error {
|
||||
if flags == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -1031,6 +1031,29 @@ func ValidateOfflineRebuild(value longhorn.VolumeOfflineRebuilding) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ValidateBackupBlockSize skips the volume size check if volSize set to negative.
|
||||
func ValidateBackupBlockSize(volSize int64, backupBlockSize int64) error {
|
||||
if backupBlockSize != BackupBlockSize2Mi && backupBlockSize != BackupBlockSize16Mi {
|
||||
return fmt.Errorf("unsupported BackupBlockSize: %v", backupBlockSize)
|
||||
}
|
||||
if volSize >= 0 && volSize%backupBlockSize != 0 {
|
||||
return fmt.Errorf("volume size %v must be an integer multiple of the backup block size %v", volSize, backupBlockSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateReplicaRebuildingBandwidthLimit(dataEengine longhorn.DataEngineType, replicaRebuildingBandwidthLimit int64) error {
|
||||
if replicaRebuildingBandwidthLimit == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if IsDataEngineV2(dataEengine) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("replicaRebuildingBandwidthLimit is not supported for data engine %v", dataEengine)
|
||||
}
|
||||
|
||||
func GetDaemonSetNameFromEngineImageName(engineImageName string) string {
|
||||
return "engine-image-" + engineImageName
|
||||
}
|
||||
|
@ -1242,39 +1265,6 @@ func CreateDefaultDisk(dataPath string, storageReservedPercentage int64) (map[st
|
|||
}, nil
|
||||
}
|
||||
|
||||
func ValidateCPUReservationValues(settingName SettingName, instanceManagerCPUStr string) error {
|
||||
|
||||
definition, _ := GetSettingDefinition(settingName)
|
||||
valueIntRange := definition.ValueIntRange
|
||||
valueFloatRange := definition.ValueFloatRange
|
||||
|
||||
switch settingName {
|
||||
case SettingNameGuaranteedInstanceManagerCPU:
|
||||
instanceManagerCPU, err := strconv.ParseFloat(instanceManagerCPUStr, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid requested instance manager CPU percentage value (%v)", instanceManagerCPUStr)
|
||||
}
|
||||
|
||||
isUnderLimit := instanceManagerCPU < valueFloatRange[ValueFloatRangeMinimum]
|
||||
isOverLimit := instanceManagerCPU > valueFloatRange[ValueFloatRangeMaximum]
|
||||
if isUnderLimit || isOverLimit {
|
||||
return fmt.Errorf("invalid requested instance manager CPU percentage. Valid range is %v to %v", valueFloatRange[ValueFloatRangeMinimum], valueFloatRange[ValueFloatRangeMaximum])
|
||||
}
|
||||
|
||||
case SettingNameV2DataEngineGuaranteedInstanceManagerCPU:
|
||||
instanceManagerCPU, err := strconv.Atoi(instanceManagerCPUStr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid requested instance manager CPU millicpu value (%v)", instanceManagerCPUStr)
|
||||
}
|
||||
|
||||
isUnderLimit := instanceManagerCPU < valueIntRange[ValueIntRangeMinimum]
|
||||
if isUnderLimit {
|
||||
return fmt.Errorf("invalid requested instance manager CPUs. Valid instance manager CPU range is larger than %v millicpu", valueIntRange[ValueIntRangeMinimum])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CniNetwork struct {
|
||||
Name string `json:"name"`
|
||||
IPs []string `json:"ips,omitempty"`
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/longhorn/longhorn-manager/upgrade/v170to171"
|
||||
"github.com/longhorn/longhorn-manager/upgrade/v17xto180"
|
||||
"github.com/longhorn/longhorn-manager/upgrade/v18xto190"
|
||||
"github.com/longhorn/longhorn-manager/upgrade/v19xto1100"
|
||||
|
||||
longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2"
|
||||
lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned"
|
||||
|
@ -247,8 +248,16 @@ func doResourceUpgrade(namespace string, lhClient *lhclientset.Clientset, kubeCl
|
|||
if err := v18xto190.UpgradeResources(namespace, lhClient, kubeClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
// Upgrade all v1beta1 resources to v1beta2, so we need to force update all resources to make sure the resources are updated to v1beta2.
|
||||
forceResourceUpdate = true
|
||||
}
|
||||
// When lhVersionBeforeUpgrade < v1.10.0, it is v1.9.x. The `CheckUpgradePath` method would have failed us out earlier if it was not v1.9.x.
|
||||
if semver.Compare(lhVersionBeforeUpgrade, "v1.10.0") < 0 {
|
||||
logrus.Info("Walking through the resource upgrade path v1.9.x to v1.10.0")
|
||||
if err := v19xto1100.UpgradeResources(namespace, lhClient, kubeClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := upgradeutil.UpdateResources(namespace, lhClient, resourceMaps, forceResourceUpdate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
package v19xto1100
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/longhorn/longhorn-manager/datastore"
|
||||
"github.com/longhorn/longhorn-manager/types"
|
||||
|
||||
lhclientset "github.com/longhorn/longhorn-manager/k8s/pkg/client/clientset/versioned"
|
||||
upgradeutil "github.com/longhorn/longhorn-manager/upgrade/util"
|
||||
)
|
||||
|
||||
const (
|
||||
upgradeLogPrefix = "upgrade from v1.9.x to v1.10.0: "
|
||||
)
|
||||
|
||||
func UpgradeResources(namespace string, lhClient *lhclientset.Clientset, kubeClient *clientset.Clientset, resourceMaps map[string]interface{}) error {
|
||||
if resourceMaps == nil {
|
||||
return errors.New("resourceMaps cannot be nil")
|
||||
}
|
||||
|
||||
if err := updateCRs(namespace, lhClient, kubeClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateCRs(namespace string, lhClient *lhclientset.Clientset, kubeClient *clientset.Clientset, resourceMaps map[string]interface{}) (err error) {
|
||||
if resourceMaps == nil {
|
||||
return errors.New("resourceMaps cannot be nil")
|
||||
}
|
||||
|
||||
if err := upgradeSettings(namespace, lhClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upgradeVolumes(namespace, lhClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := upgradeBackups(namespace, lhClient, resourceMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeSettings(namespace string, lhClient *lhclientset.Clientset, resourceMaps map[string]interface{}) (err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, upgradeLogPrefix+"upgrade setting failed")
|
||||
}()
|
||||
|
||||
settingMap, err := upgradeutil.ListAndUpdateSettingsInProvidedCache(namespace, lhClient, resourceMaps)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list all existing Longhorn settings during the setting upgrade")
|
||||
}
|
||||
|
||||
// Update Setting CRs
|
||||
var errs []error
|
||||
for _, s := range settingMap {
|
||||
definition, ok := types.GetSettingDefinition(types.SettingName(s.Name))
|
||||
if !ok {
|
||||
logrus.Warnf("Unknown setting %v found during upgrade, skipping", s.Name)
|
||||
continue
|
||||
}
|
||||
if !definition.DataEngineSpecific {
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := datastore.GetSettingValidValue(definition, s.Value)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "failed to get valid value for setting %s", s.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
s.Value = value
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func upgradeVolumes(namespace string, lhClient *lhclientset.Clientset, resourceMaps map[string]interface{}) (err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, upgradeLogPrefix+"upgrade volume failed")
|
||||
}()
|
||||
|
||||
volumesMap, err := upgradeutil.ListAndUpdateVolumesInProvidedCache(namespace, lhClient, resourceMaps)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list all existing Longhorn volumes during the volume upgrade")
|
||||
}
|
||||
|
||||
for _, v := range volumesMap {
|
||||
if v.Spec.BackupBlockSize == 0 {
|
||||
v.Spec.BackupBlockSize = types.BackupBlockSize2Mi
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeBackups(namespace string, lhClient *lhclientset.Clientset, resourceMaps map[string]interface{}) (err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, upgradeLogPrefix+"upgrade backup failed")
|
||||
}()
|
||||
|
||||
backupsMap, err := upgradeutil.ListAndUpdateBackupsInProvidedCache(namespace, lhClient, resourceMaps)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list all existing Longhorn backups during the backup upgrade")
|
||||
}
|
||||
|
||||
for _, b := range backupsMap {
|
||||
if b.Spec.BackupBlockSize == 0 {
|
||||
b.Spec.BackupBlockSize = types.BackupBlockSize2Mi
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,62 @@
|
|||
# This makefile can be used to-regenerate the protobuf files.
|
||||
#
|
||||
# Prerequisites:
|
||||
# "protoc" from https://github.com/protocolbuffers/protobuf
|
||||
# go get github.com/cockroachdb/protoc-gen-gogoroach
|
||||
# go get github.com/gogo/protobuf/types
|
||||
# go get github.com/gogo/protobuf/protoc-gen-gogo
|
||||
#
|
||||
# Note: as of 2021-04-13, we like to use a custom protoc-gen-gogo
|
||||
# with additional options, to stabilize the marshalled
|
||||
# encoding of objects (so that they are deterministic
|
||||
# across marshal/unmarshal cycles) and reduce the memory footprint
|
||||
# of objects:
|
||||
#
|
||||
# vanity.TurnOnStable_MarshalerAll,
|
||||
# vanity.TurnOffGoUnrecognizedAll,
|
||||
# vanity.TurnOffGoUnkeyedAll,
|
||||
# vanity.TurnOffGoSizecacheAll,
|
||||
#
|
||||
# Until this is resolved, the "go get" commands above are not
|
||||
# adequate; instead:
|
||||
#
|
||||
# 1. set the PATH env var to point to CockroachDB's `bin`
|
||||
# sub-directory (after a successful CockroachDB build), where a
|
||||
# suitable version of protoc-gen-gogoroach is provided.
|
||||
#
|
||||
# 2. run `make -f Makefile.update-protos` with this PATH active.
|
||||
|
||||
export SHELL := env PWD=$(CURDIR) bash
|
||||
|
||||
PROTOS := $(wildcard \
|
||||
errbase/internal/*.proto \
|
||||
errorspb/*.proto \
|
||||
extgrpc/*.proto \
|
||||
exthttp/*.proto \
|
||||
grpc/*.proto \
|
||||
markers/internal/*.proto \
|
||||
)
|
||||
GO_SOURCES = $(PROTOS:.proto=.pb.go)
|
||||
|
||||
SED = sed
|
||||
SED_INPLACE := $(shell $(SED) --version 2>&1 | grep -q GNU && echo -i || echo "-i ''")
|
||||
|
||||
all: $(PROTOS)
|
||||
set -e; for dir in $(sort $(dir $(PROTOS))); do \
|
||||
protoc \
|
||||
-I. \
|
||||
-I$$GOPATH/src/ \
|
||||
-I$$GOPATH/src/github.com \
|
||||
-I$$GOPATH/src/github.com/cockroachdb/errors \
|
||||
-I$$GOPATH/src/github.com/gogo/protobuf \
|
||||
-I$$GOPATH/src/github.com/gogo/protobuf/protobuf \
|
||||
--gogoroach_out=Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,plugins=grpc,import_prefix=:. \
|
||||
$$dir/*.proto; \
|
||||
done
|
||||
$(SED) $(SED_INPLACE) -E \
|
||||
-e '/import _ /d' \
|
||||
-e 's!import (fmt|math) "github.com/(fmt|math)"! !g' \
|
||||
-e 's!github.com/((bytes|encoding/binary|errors|fmt|io|math|github\.com|(google\.)?golang\.org)([^a-z]|$$))!\1!g' \
|
||||
-e 's!golang.org/x/net/context!context!g' \
|
||||
$(GO_SOURCES)
|
||||
gofmt -s -w $(GO_SOURCES)
|
|
@ -0,0 +1,646 @@
|
|||
# cockroachdb/errors: Go errors with network portability
|
||||
|
||||
This library aims to be used as a drop-in replacement to
|
||||
`github.com/pkg/errors` and Go's standard `errors` package. It also
|
||||
provides *network portability* of error objects, in ways suitable for
|
||||
distributed systems with mixed-version software compatibility.
|
||||
|
||||
It also provides native and comprehensive support for [PII](https://en.wikipedia.org/wiki/Personal_data)-free details
|
||||
and an opt-in [Sentry.io](https://sentry.io/) reporting mechanism that
|
||||
automatically formats error details and strips them of PII.
|
||||
|
||||
See also [the design RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20190318_error_handling.md).
|
||||
|
||||

|
||||
[](https://pkg.go.dev/github.com/cockroachdb/errors)
|
||||
|
||||
Table of contents:
|
||||
|
||||
- [Features](#Features)
|
||||
- [How to use](#How-to-use)
|
||||
- [What comes out of an error?](#What-comes-out-of-an-error)
|
||||
- [Available error leaves](#Available-error-leaves)
|
||||
- [Available wrapper constructors](#Available-wrapper-constructors)
|
||||
- [Providing PII-free details](#Providing-PII-free-details)
|
||||
- [Building your own error types](#Building-your-own-error-types)
|
||||
- [Error composition (summary)](#Error-composition-summary)
|
||||
- [API (not constructing error objects)](#API-not-constructing-error-objects)
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Go's <1.13 `errors` | `github.com/pkg/errors` | Go 1.13 `errors`/`xerrors` | `cockroachdb/errors` |
|
||||
|-------------------------------------------------------------------------------------------------------|---------------------|-------------------------|----------------------------|----------------------|
|
||||
| error constructors (`New`, `Errorf` etc) | ✔ | ✔ | ✔ | ✔ |
|
||||
| error causes (`Cause` / `Unwrap`) | | ✔ | ✔ | ✔ |
|
||||
| cause barriers (`Opaque` / `Handled`) | | | ✔ | ✔ |
|
||||
| `errors.As()`, `errors.Is()` | | | ✔ | ✔ |
|
||||
| automatic error wrap when format ends with `: %w` | | | ✔ | ✔ |
|
||||
| standard wrappers with efficient stack trace capture | | ✔ | | ✔ |
|
||||
| **transparent protobuf encode/decode with forward compatibility** | | | | ✔ |
|
||||
| **`errors.Is()` recognizes errors across the network** | | | | ✔ |
|
||||
| **comprehensive support for PII-free reportable strings** | | | | ✔ |
|
||||
| support for both `Cause()` and `Unwrap()` [go#31778](https://github.com/golang/go/issues/31778) | | | | ✔ |
|
||||
| standard error reports to Sentry.io | | | | ✔ |
|
||||
| wrappers to denote assertion failures | | | | ✔ |
|
||||
| wrappers with issue tracker references | | | | ✔ |
|
||||
| wrappers for user-facing hints and details | | | | ✔ |
|
||||
| wrappers to attach secondary causes | | | | ✔ |
|
||||
| wrappers to attach [`logtags`](https://github.com/cockroachdb/logtags) details from `context.Context` | | | | ✔ |
|
||||
| `errors.FormatError()`, `Formatter`, `Printer` | | | (under construction) | ✔ |
|
||||
| `errors.SafeFormatError()`, `SafeFormatter` | | | | ✔ |
|
||||
| wrapper-aware `IsPermission()`, `IsTimeout()`, `IsExist()`, `IsNotExist()` | | | | ✔ |
|
||||
|
||||
"Forward compatibility" above refers to the ability of this library to
|
||||
recognize and properly handle network communication of error types it
|
||||
does not know about, for example when a more recent version of a
|
||||
software package sends a new error object to another system running an
|
||||
older version of the package.
|
||||
|
||||
## How to use
|
||||
|
||||
- construct errors with `errors.New()`, etc as usual, but also see the other [error leaf constructors](#Available-error-leaves) below.
|
||||
- wrap errors with `errors.Wrap()` as usual, but also see the [other wrappers](#Available-wrapper-constructors) below.
|
||||
- test error identity with `errors.Is()` as usual.
|
||||
**Unique in this library**: this works even if the error has traversed the network!
|
||||
Also, `errors.IsAny()` to recognize two or more reference errors.
|
||||
- replace uses of `os.IsPermission()`, `os.IsTimeout()`, `os.IsExist()` and `os.IsNotExist()` by their analog in sub-package `oserror` so
|
||||
that they can peek through layers of wrapping.
|
||||
- access error causes with `errors.UnwrapOnce()` / `errors.UnwrapAll()` (note: `errors.Cause()` and `errors.Unwrap()` also provided for compatibility with other error packages).
|
||||
- encode/decode errors to protobuf with `errors.EncodeError()` / `errors.DecodeError()`.
|
||||
- extract **PII-free safe details** with `errors.GetSafeDetails()`.
|
||||
- extract human-facing hints and details with `errors.GetAllHints()`/`errors.GetAllDetails()` or `errors.FlattenHints()`/`errors.FlattenDetails()`.
|
||||
- produce detailed Sentry.io reports with `errors.BuildSentryReport()` / `errors.ReportError()`.
|
||||
- implement your own error leaf types and wrapper types:
|
||||
- implement the `error` and `errors.Wrapper` interfaces as usual.
|
||||
- register encode/decode functions: call `errors.Register{Leaf,Wrapper}{Encoder,Decoder}()` in a `init()` function in your package.
|
||||
- implement `Format()` that redirects to `errors.FormatError()`.
|
||||
- see the section [Building your own error types](#Building-your-own-error-types) below.
|
||||
|
||||
## What comes out of an error?
|
||||
|
||||
| Error detail | `Error()` and format `%s`/`%q`/`%v` | format `%+v` | `GetSafeDetails()` | Sentry report via `ReportError()` |
|
||||
|-----------------------------------------------------------------|-------------------------------------|--------------|-------------------------------|-----------------------------------|
|
||||
| main message, eg `New()` | visible | visible | yes (CHANGED IN v1.6) | full (CHANGED IN v1.6) |
|
||||
| wrap prefix, eg `WithMessage()` | visible (as prefix) | visible | yes (CHANGED IN v1.6) | full (CHANGED IN v1.6) |
|
||||
| stack trace, eg `WithStack()` | not visible | simplified | yes | full |
|
||||
| hint , eg `WithHint()` | not visible | visible | no | type only |
|
||||
| detail, eg `WithDetail()` | not visible | visible | no | type only |
|
||||
| assertion failure annotation, eg `WithAssertionFailure()` | not visible | visible | no | type only |
|
||||
| issue links, eg `WithIssueLink()`, `UnimplementedError()` | not visible | visible | yes | full |
|
||||
| safe details, eg `WithSafeDetails()` | not visible | visible | yes | full |
|
||||
| telemetry keys, eg. `WithTelemetryKey()` | not visible | visible | yes | full |
|
||||
| secondary errors, eg. `WithSecondaryError()`, `CombineErrors()` | not visible | visible | redacted, recursively | redacted, recursively |
|
||||
| barrier origins, eg. `Handled()` | not visible | visible | redacted, recursively | redacted, recursively |
|
||||
| error domain, eg. `WithDomain()` | not visible | visible | yes | full |
|
||||
| context tags, eg. `WithContextTags()` | not visible | visible | keys visible, values redacted | keys visible, values redacted |
|
||||
|
||||
## Available error leaves
|
||||
|
||||
An error *leaf* is an object that implements the `error` interface,
|
||||
but does not refer to another error via a `Unwrap()` or `Cause()`
|
||||
method.
|
||||
|
||||
- `New(string) error`, `Newf(string, ...interface{}) error`, `Errorf(string, ...interface{}) error`: leaf errors with message
|
||||
- **when to use: common error cases.**
|
||||
- what it does: also captures the stack trace at point of call and redacts the provided message for safe reporting.
|
||||
- how to access the detail: `Error()`, regular Go formatting. **Details in Sentry report.**
|
||||
- see also: Section [Error composition](#Error-composition-summary) below. `errors.NewWithDepth()` variants to customize at which call depth the stack trace is captured.
|
||||
|
||||
- `AssertionFailedf(string, ...interface{}) error`, `NewAssertionFailureWithWrappedErrf(error, string, ...interface{}) error`: signals an assertion failure / programming error.
|
||||
- **when to use: when an invariant is violated; when an unreachable code path is reached.**
|
||||
- what it does: also captures the stack trace at point of call, redacts the provided strings for safe reporting, prepares a hint to inform a human user.
|
||||
- how to access the detail: `IsAssertionFailure()`/`HasAssertionFailure()`, format with `%+v`, Safe details included in Sentry reports.
|
||||
- see also: Section [Error composition](#Error-composition-summary) below. `errors.AssertionFailedWithDepthf()` variant to customize at which call depth the stack trace is captured.
|
||||
|
||||
- `Handled(error) error`, `Opaque(error) error`, `HandledWithMessage(error, string) error`: captures an error cause but make it invisible to `Unwrap()` / `Is()`.
|
||||
- **when to use: when a new error occurs while handling an error, and the original error must be "hidden".**
|
||||
- what it does: captures the cause in a hidden field. The error message is preserved unless the `...WithMessage()` variant is used.
|
||||
- how to access the detail: format with `%+v`, redacted details reported in Sentry reports.
|
||||
|
||||
- `UnimplementedError(IssueLink, string) error`: captures a message string and a URL reference to an external resource to denote a feature that was not yet implemented.
|
||||
- **when to use: to inform (human) users that some feature is not implemented yet and refer them to some external resource.**
|
||||
- what it does: captures the message, URL and detail in a wrapper. The URL and detail are considered safe for reporting.
|
||||
- how to access the detail: `errors.GetAllHints()`, `errors.FlattenHints()`, format with `%+v`, URL and detail included in Sentry report (not the message).
|
||||
- see also: `errors.WithIssueLink()` below for errors that are not specifically about unimplemented features.
|
||||
|
||||
## Available wrapper constructors
|
||||
|
||||
An error *wrapper* is an object that implements the `error` interface,
|
||||
and also refers to another error via an `Unwrap()` (preferred) and/or
|
||||
`Cause()` method.
|
||||
|
||||
All wrapper constructors can be applied safely to a `nil` `error`:
|
||||
they behave as no-ops in this case:
|
||||
|
||||
```go
|
||||
// The following:
|
||||
// if err := foo(); err != nil {
|
||||
// return errors.Wrap(err, "foo")
|
||||
// }
|
||||
// return nil
|
||||
//
|
||||
// is not needed. Instead, you can use this:
|
||||
return errors.Wrap(foo(), "foo")
|
||||
```
|
||||
|
||||
- `Wrap(error, string) error`, `Wrapf(error, string, ...interface{}) error`:
|
||||
- **when to use: on error return paths.**
|
||||
- what it does: combines `WithMessage()`, `WithStack()`, `WithSafeDetails()`.
|
||||
- how to access the details: `Error()`, regular Go formatting. **Details in Sentry report.**
|
||||
- see also: Section [Error composition](#Error-composition-summary) below. `WrapWithDepth()` variants to customize at which depth the stack trace is captured.
|
||||
|
||||
- `WithSecondaryError(error, error) error`: annotate an error with a secondary error.
|
||||
- **when to use: when an additional error occurs in the code that is handling a primary error.** Consider using `errors.CombineErrors()` instead (see below).
|
||||
- what it does: it captures the secondary error but hides it from `errors.Is()`.
|
||||
- how to access the detail: format with `%+v`, redacted recursively in Sentry reports.
|
||||
- see also: `errors.CombineErrors()`
|
||||
|
||||
- `CombineErrors(error, error) error`: combines two errors into one.
|
||||
- **when to use: when two operations occur concurrently and either can return an error, and only one final error must be returned.**
|
||||
- what it does: returns either of its arguments if the other is `nil`, otherwise calls `WithSecondaryError()`.
|
||||
- how to access the detail: see `WithSecondaryError()` above.
|
||||
|
||||
- `Mark(error, error) error`: gives the identity of one error to another error.
|
||||
- **when to use: when a caller expects to recognize a sentinel error with `errors.Is()` but the callee provides a diversity of error messages.**
|
||||
- what it does: it overrides the "error mark" used internally by `errors.Is()`.
|
||||
- how to access the detail: format with `%+v`, Sentry reports.
|
||||
|
||||
- `WithStack(error) error`: annotate with stack trace
|
||||
- **when to use:** usually not needed, use `errors.Wrap()`/`errors.Wrapf()` instead.
|
||||
|
||||
**Special cases:**
|
||||
|
||||
- when returning a sentinel, for example:
|
||||
|
||||
```go
|
||||
var myErr = errors.New("foo")
|
||||
|
||||
func myFunc() error {
|
||||
if ... {
|
||||
return errors.WithStack(myErr)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- on error return paths, when not trivial but also not warranting a wrap. For example:
|
||||
|
||||
```go
|
||||
err := foo()
|
||||
if err != nil {
|
||||
doSomething()
|
||||
if !somecond {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- what it does: captures (efficiently) a stack trace.
|
||||
- how to access the details: format with `%+v`, `errors.GetSafeDetails()`, Sentry reports. The stack trace is considered safe for reporting.
|
||||
- see also: `WithStackDepth()` to customize the call depth at which the stack trace is captured.
|
||||
|
||||
- `WithSafeDetails(error, string, ...interface{}) error`: safe details for reporting.
|
||||
- when to use: probably never. Use `errors.Wrap()`/`errors.Wrapf()` instead.
|
||||
- what it does: saves some strings for safe reporting.
|
||||
- how to access the detail: format with `%+v`, `errors.GetSafeDetails()`, Sentry report.
|
||||
|
||||
- `WithMessage(error, string) error`, `WithMessagef(error, string, ...interface{}) error`: message prefix.
|
||||
- when to use: probably never. Use `errors.Wrap()`/`errors.Wrapf()` instead.
|
||||
- what it does: adds a message prefix.
|
||||
- how to access the detail: `Error()`, regular Go formatting, Sentry Report.
|
||||
|
||||
- `WithDetail(error, string) error`, `WithDetailf(error, string, ...interface{}) error`, user-facing detail with contextual information.
|
||||
- **when to use: need to embark a message string to output when the error is presented to a developer.**
|
||||
- what it does: captures detail strings.
|
||||
- how to access the detail: `errors.GetAllDetails()`, `errors.FlattenDetails()` (all details are preserved), format with `%+v`. Not included in Sentry reports.
|
||||
|
||||
- `WithHint(error, string) error`, `WithHintf(error, string, ...interface{}) error`: user-facing detail with suggestion for action to take.
|
||||
- **when to use: need to embark a message string to output when the error is presented to an end user.**
|
||||
- what it does: captures hint strings.
|
||||
- how to access the detail: `errors.GetAllHints()`, `errors.FlattenHints()` (hints are de-duplicated), format with `%+v`. Not included in Sentry reports.
|
||||
|
||||
- `WithIssueLink(error, IssueLink) error`: annotate an error with an URL and arbitrary string.
|
||||
- **when to use: to refer (human) users to some external resources.**
|
||||
- what it does: captures the URL and detail in a wrapper. Both are considered safe for reporting.
|
||||
- how to access the detail: `errors.GetAllHints()`, `errors.FlattenHints()`, `errors.GetSafeDetails()`, format with `%+v`, Sentry report.
|
||||
- see also: `errors.UnimplementedError()` to construct leaves (see previous section).
|
||||
|
||||
- `WithTelemetry(error, string) error`: annotate an error with a key suitable for telemetry.
|
||||
- **when to use: to gather strings during error handling, for capture in the telemetry sub-system of a server package.**
|
||||
- what it does: captures the string. The telemetry key is considered safe for reporting.
|
||||
- how to access the detail: `errors.GetTelemetryKeys()`, `errors.GetSafeDetails()`, format with `%+v`, Sentry report.
|
||||
|
||||
- `WithDomain(error, Domain) error`, `HandledInDomain(error, Domain) error`, `HandledInDomainWithMessage(error, Domain, string) error` **(experimental)**: annotate an error with an origin package.
|
||||
- **when to use: at package boundaries.**
|
||||
- what it does: captures the identity of the error domain. Can be asserted with `errors.EnsureNotInDomain()`, `errors.NotInDomain()`.
|
||||
- how to access the detail: format with `%+v`, Sentry report.
|
||||
|
||||
- `WithAssertionFailure(error) error`: annotate an error as being an assertion failure.
|
||||
- when to use: probably never. Use `errors.AssertionFailedf()` and variants.
|
||||
- what it does: wraps the error with a special type. Triggers an auto-generated hint.
|
||||
- how to access the detail: `IsAssertionFailure()`/`HasAssertionFailure()`, `errors.GetAllHints()`, `errors.FlattenHints()`, format with `%+v`, Sentry report.
|
||||
|
||||
- `WithContextTags(error, context.Context) error`: annotate an error with the k/v pairs attached to a `context.Context` instance with the [`logtags`](https://github.com/cockroachdb/logtags) package.
|
||||
- **when to use: when capturing/producing an error and a `context.Context` is available.**
|
||||
- what it does: it captures the `logtags.Buffer` object in the wrapper.
|
||||
- how to access the detail: `errors.GetContextTags()`, format with `%+v`, Sentry reports.
|
||||
|
||||
## Providing PII-free details
|
||||
|
||||
The library support PII-free strings essentially as follows:
|
||||
|
||||
- by default, many strings included in an error object are considered
|
||||
to be PII-unsafe, and are stripped out when building a Sentry
|
||||
report.
|
||||
- some fields in the library are assumed to be PII-safe by default.
|
||||
- you can opt additional strings in to Sentry reports.
|
||||
|
||||
The following strings from this library are considered to be PII-free,
|
||||
and thus included in Sentry reports automatically:
|
||||
|
||||
- the *type* of error objects,
|
||||
- stack traces (containing only file paths, line numbers, function names - arguments are not included),
|
||||
- issue tracker links (including URL and detail field),
|
||||
- telemetry keys,
|
||||
- error domains,
|
||||
- context tag keys,
|
||||
- the `format string` argument of `Newf`, `AssertionFailedf`, etc (the constructors ending with `...f()`),
|
||||
- the *type* of the additional arguments passed to the `...f()` constructors,
|
||||
- the *value of specific argument types* passed to the `...f()` constructors, when known to be PII-safe.
|
||||
For details of which arguments are considered PII-free, see the [`redact` package](https://github.com/cockroachdb/redact).
|
||||
|
||||
It is possible to opt additional in to Sentry reporting, using either of the following methods:
|
||||
|
||||
- implement the `errors.SafeDetailer` interface, providing the
|
||||
`SafeDetails() []string` method on your error type.
|
||||
|
||||
- enclose additional arguments passed to the `...f()` constructors with `errors.Safe()`. For example:
|
||||
`err := errors.Newf("my code: %d", errors.Safe(123))`
|
||||
— in this example, the value 123 will be included when a Sentry report is constructed.
|
||||
- it also makes it available via `errors.GetSafeDetails()`/`GetAllSafeDetails()`.
|
||||
- the value 123 is also part of the main error message returned by `Error()`.
|
||||
|
||||
- attach additional arbitrary strings with `errors.WithSafeDetails(error, string, ...interface{}) error` and
|
||||
also use `errors.Safe()`.
|
||||
For example: `err = errors.WithSafeDetails(err, "additional data: %s", errors.Safe("hello"))`.
|
||||
- in this example, the string "hello" will be included in Sentry reports.
|
||||
- however, it is not part of the main error message returned by `Error()`.
|
||||
|
||||
For more details on how Sentry reports are built, see the [`report`](report) sub-package.
|
||||
|
||||
## Building your own error types
|
||||
|
||||
You can create an error type as usual in Go: implement the `error`
|
||||
interface, and, if your type is also a wrapper, the `errors.Wrapper`
|
||||
interface (an `Unwrap()` method). You may also want to implement the
|
||||
`Cause()` method for backward compatibility with
|
||||
`github.com/pkg/errors`, if your project also uses that.
|
||||
|
||||
If your error type is a wrapper, you should implement a `Format()`
|
||||
method that redirects to `errors.FormatError()`, otherwise `%+v` will
|
||||
not work. Additionally, if your type has a payload not otherwise
|
||||
visible via `Error()`, you may want to implement
|
||||
`errors.SafeFormatter`. See [making `%+v` work with your
|
||||
type](#Making-v-work-with-your-type) below for details.
|
||||
|
||||
Finally, you may want your new error type to be portable across
|
||||
the network.
|
||||
|
||||
If your error type is a leaf, and already implements `proto.Message`
|
||||
(from [gogoproto](https://github.com/gogo/protobuf)), you are all set
|
||||
and the errors library will use that automatically. If you do not or
|
||||
cannot implement `proto.Message`, or your error type is a wrapper,
|
||||
read on.
|
||||
|
||||
At a minimum, you will need a *decoder function*: while
|
||||
`cockroachdb/errors` already does a bunch of encoding/decoding work on
|
||||
new types automatically, the one thing it really cannot do on its own
|
||||
is instantiate a Go object using your new type.
|
||||
|
||||
Here is the simplest decode function for a new leaf error type and a
|
||||
new wrapper type:
|
||||
|
||||
```go
|
||||
// note: we use the gogoproto `proto` sub-package.
|
||||
func yourDecode(_ string, _ []string, _ proto.Message) error {
|
||||
return &yourType{}
|
||||
}
|
||||
|
||||
func init() {
|
||||
errors.RegisterLeafEncoder((*yourType)(nil), yourDecodeFunc)
|
||||
}
|
||||
|
||||
func yourDecodeWrapper(cause error, _ string, _ []string, _ proto.Message) error {
|
||||
// Note: the library already takes care of encoding/decoding the cause.
|
||||
return &yourWrapperType{cause: cause}
|
||||
}
|
||||
|
||||
func init() {
|
||||
errors.RegisterWrapperDecoder((*yourWrapperType)(nil), yourDecodeWrapper)
|
||||
}
|
||||
```
|
||||
|
||||
In the case where your type does not have any other field (empty
|
||||
struct for leafs, just a cause for wrappers), this is all you have to
|
||||
do.
|
||||
|
||||
(See the type `withAssertionFailure` in
|
||||
[`assert/assert.go`](assert/assert.go) for an example of this simple
|
||||
case.)
|
||||
|
||||
If your type does have additional fields, you *may* still not need a
|
||||
custom encoder. This is because the library automatically
|
||||
encodes/decodes the main error message and any safe strings that your
|
||||
error types makes available via the `errors.SafeDetailer` interface
|
||||
(the `SafeDetails()` method).
|
||||
|
||||
Say, for example, you have the following leaf type:
|
||||
|
||||
```go
|
||||
type myLeaf struct {
|
||||
code int
|
||||
}
|
||||
|
||||
func (m *myLeaf) Error() string { return fmt.Sprintf("my error: %d" + m.code }
|
||||
```
|
||||
|
||||
In that case, the library will automatically encode the result of
|
||||
calling `Error()`. This string will then be passed back to your
|
||||
decoder function as the first argument. This makes it possible
|
||||
to decode the `code` field exactly:
|
||||
|
||||
```go
|
||||
func myLeafDecoder(msg string, _ []string, _ proto.Message) error {
|
||||
codeS := strings.TrimPrefix(msg, "my error: ")
|
||||
code, _ := strconv.Atoi(codeS)
|
||||
// Note: error handling for strconv is omitted here to simplify
|
||||
// the explanation. If your decoder function should fail, simply
|
||||
// return a `nil` error object (not another unrelated error!).
|
||||
return &myLeaf{code: code}
|
||||
}
|
||||
```
|
||||
|
||||
Likewise, if your fields are PII-free, they are safe to expose via the
|
||||
`errors.SafeDetailer` interface. Those strings also get encoded
|
||||
automatically, and get passed to the decoder function as the second
|
||||
argument.
|
||||
|
||||
For example, say you have the following leaf type:
|
||||
|
||||
```go
|
||||
type myLeaf struct {
|
||||
// both fields are PII-free.
|
||||
code int
|
||||
tag string
|
||||
}
|
||||
|
||||
func (m *myLeaf) Error() string { ... }
|
||||
```
|
||||
|
||||
Then you can expose the fields as safe details as follows:
|
||||
|
||||
```go
|
||||
func (m *myLeaf) SafeDetails() []string {
|
||||
return []string{fmt.Sprintf("%d", m.code), m.tag}
|
||||
}
|
||||
```
|
||||
|
||||
(If the data is PII-free, then it is good to do this in any case: it
|
||||
enables any network system that receives an error of your type, but
|
||||
does not know about it, to still produce useful Sentry reports.)
|
||||
|
||||
Once you have this, the decode function receives the strings and you
|
||||
can use them to re-construct the error:
|
||||
|
||||
```go
|
||||
func myLeafDecoder(_ string, details []string, _ proto.Message) error {
|
||||
// Note: you may want to test the length of the details slice
|
||||
// is correct.
|
||||
code, _ := strconv.Atoi(details[0])
|
||||
tag := details[1]
|
||||
return &myLeaf{code: code, tag: tag}
|
||||
}
|
||||
```
|
||||
|
||||
(For an example, see the `withTelemetry` type in [`telemetry/with_telemetry.go`](telemetry/with_telemetry.go).)
|
||||
|
||||
__The only case where you need a custom encoder is when your error
|
||||
type contains some fields that are not reflected in the error message
|
||||
(so you can't extract them back from there), and are not PII-free and
|
||||
thus cannot be reported as "safe details".__
|
||||
|
||||
To take inspiration from examples, see the following types in the
|
||||
library that need a custom encoder:
|
||||
|
||||
- Hints/details in [`hintdetail/with_hint.go`](hintdetail/with_hint.go) and [`hintdetail/with_detail.go`](hintdetail/with_detail.go).
|
||||
- Secondary error wrappers in [`secondary/with_secondary.go`](secondary/with_secondary.go).
|
||||
- Marker error wrappers at the end of [`markers/markers.go`](markers/markers.go).
|
||||
|
||||
### Making `%+v` work with your type
|
||||
|
||||
In short:
|
||||
|
||||
- When in doubt, you should always implement the `fmt.Formatter`
|
||||
interface (`Format(fmt.State, rune)`) on your custom error types,
|
||||
exactly as follows:
|
||||
|
||||
```go
|
||||
func (e *yourType) Format(s *fmt.State, verb rune) { errors.FormatError(e, s, verb) }
|
||||
```
|
||||
|
||||
(If you do not provide this redirection for your own custom wrapper
|
||||
type, this will disable the recursive application of the `%+v` flag
|
||||
to the causes chained from your wrapper.)
|
||||
|
||||
- You may optionally implement the `errors.SafeFormatter` interface:
|
||||
`SafeFormatError(p errors.Printer) (next error)`. This is optional, but
|
||||
should be done when some details are not included by `Error()` and
|
||||
should be emitted upon `%+v`.
|
||||
|
||||
The example `withHTTPCode` wrapper [included in the source tree](exthttp/ext_http.go)
|
||||
achieves this as follows:
|
||||
|
||||
```go
|
||||
// Format() implements fmt.Formatter, is required until Go knows about FormatError.
|
||||
func (w *withHTTPCode) Format(s fmt.State, verb rune) { errors.FormatError(w, s, verb) }
|
||||
|
||||
// FormatError() formats the error.
|
||||
func (w *withHTTPCode) SafeFormatError(p errors.Printer) (next error) {
|
||||
// Note: no need to print out the cause here!
|
||||
// FormatError() knows how to do this automatically.
|
||||
if p.Detail() {
|
||||
p.Printf("http code: %d", errors.Safe(w.code))
|
||||
}
|
||||
return w.cause
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Technical details follow:
|
||||
|
||||
- The errors library follows [the Go 2
|
||||
proposal](https://go.googlesource.com/proposal/+/master/design/29934-error-values.md).
|
||||
|
||||
- At some point in the future, Go's standard `fmt` library will learn
|
||||
[how to recognize error wrappers, and how to use the `errors.Formatter`
|
||||
interface automatically](https://github.com/golang/go/issues/29934). Until
|
||||
then, you must ensure that you also implement a `Format()` method
|
||||
(from `fmt.Formatter`) that redirects to `errors.FormatError`.
|
||||
|
||||
Note: you may implement `fmt.Formatter` (`Format()` method) in this
|
||||
way without implementing `errors.Formatter` (a `FormatError()`
|
||||
method). In that case, `errors.FormatError` will use a separate code
|
||||
path that does "the right thing", even for wrappers.
|
||||
|
||||
- The library provides an implementation of `errors.FormatError()`,
|
||||
modeled after the same function in Go 2. This is responsible for
|
||||
printing out error details, and knows how to present a chain of
|
||||
causes in a semi-structured format upon formatting with `%+v`.
|
||||
|
||||
### Ensuring `errors.Is` works when errors/packages are renamed
|
||||
|
||||
If a Go package containing a custom error type is renamed, or the
|
||||
error type itself is renamed, and errors of this type are transported
|
||||
over the network, then another system with a different code layout
|
||||
(e.g. running a different version of the software) may not be able to
|
||||
recognize the error any more via `errors.Is`.
|
||||
|
||||
To ensure that network portability continues to work across multiple
|
||||
software versions, in the case error types get renamed or Go packages
|
||||
get moved / renamed / etc, the server code must call
|
||||
`errors.RegisterTypeMigration()` from e.g. an `init()` function.
|
||||
|
||||
Example use:
|
||||
|
||||
```go
|
||||
previousPath := "github.com/old/path/to/error/package"
|
||||
previousTypeName := "oldpackage.oldErrorName"
|
||||
newErrorInstance := &newTypeName{...}
|
||||
errors.RegisterTypeMigration(previousPath, previousTypeName, newErrorInstance)
|
||||
```
|
||||
|
||||
## Error composition (summary)
|
||||
|
||||
| Constructor | Composes |
|
||||
|------------------------------------|-----------------------------------------------------------------------------------|
|
||||
| `New` | `NewWithDepth` (see below) |
|
||||
| `Errorf` | = `Newf` |
|
||||
| `Newf` | `NewWithDepthf` (see below) |
|
||||
| `WithMessage` | custom wrapper with message prefix and knowledge of safe strings |
|
||||
| `Wrap` | `WrapWithDepth` (see below) |
|
||||
| `Wrapf` | `WrapWithDepthf` (see below) |
|
||||
| `AssertionFailed` | `AssertionFailedWithDepthf` (see below) |
|
||||
| `NewWithDepth` | custom leaf with knowledge of safe strings + `WithStackDepth` (see below) |
|
||||
| `NewWithDepthf` | custom leaf with knowledge of safe strings + `WithSafeDetails` + `WithStackDepth` |
|
||||
| `WithMessagef` | custom wrapper with message prefix and knowledge of safe strings |
|
||||
| `WrapWithDepth` | `WithMessage` + `WithStackDepth` |
|
||||
| `WrapWithDepthf` | `WithMessagef` + `WithStackDepth` |
|
||||
| `AssertionFailedWithDepthf` | `NewWithDepthf` + `WithAssertionFailure` |
|
||||
| `NewAssertionErrorWithWrappedErrf` | `HandledWithMessagef` (barrier) + `WrapWithDepthf` + `WithAssertionFailure` |
|
||||
| `Join` | `JoinWithDepth` (see below) |
|
||||
| `JoinWithDepth` | multi-cause wrapper + `WithStackDepth` |
|
||||
## API (not constructing error objects)
|
||||
|
||||
The following is a summary of the non-constructor API functions, grouped by category.
|
||||
Detailed documentation can be found at: https://pkg.go.dev/github.com/cockroachdb/errors
|
||||
|
||||
```go
|
||||
// Access causes.
|
||||
func UnwrapAll(err error) error
|
||||
func UnwrapOnce(err error) error
|
||||
func Cause(err error) error // compatibility
|
||||
func Unwrap(err error) error // compatibility
|
||||
type Wrapper interface { ... } // compatibility
|
||||
|
||||
// Error formatting.
|
||||
type Formatter interface { ... } // compatibility, not recommended
|
||||
type SafeFormatter interface { ... }
|
||||
type Printer interface { ... }
|
||||
func FormatError(err error, s fmt.State, verb rune)
|
||||
func Formattable(err error) fmt.Formatter
|
||||
|
||||
// Identify errors.
|
||||
func Is(err, reference error) bool
|
||||
func IsAny(err error, references ...error) bool
|
||||
func If(err error, pred func(err error) (interface{}, bool)) (interface{}, bool)
|
||||
func As(err error, target interface{}) bool
|
||||
|
||||
// Encode/decode errors.
|
||||
type EncodedError // this is protobuf-encodable
|
||||
func EncodeError(ctx context.Context, err error) EncodedError
|
||||
func DecodeError(ctx context.Context, enc EncodedError) error
|
||||
|
||||
// Register encode/decode functions for custom/new error types.
|
||||
func RegisterLeafDecoder(typeName TypeKey, decoder LeafDecoder)
|
||||
func RegisterLeafEncoder(typeName TypeKey, encoder LeafEncoder)
|
||||
func RegisterWrapperDecoder(typeName TypeKey, decoder WrapperDecoder)
|
||||
func RegisterWrapperEncoder(typeName TypeKey, encoder WrapperEncoder)
|
||||
func RegisterWrapperEncoderWithMessageOverride (typeName TypeKey, encoder WrapperEncoderWithMessageOverride)
|
||||
func RegisterMultiCauseEncoder(theType TypeKey, encoder MultiCauseEncoder)
|
||||
func RegisterMultiCauseDecoder(theType TypeKey, decoder MultiCauseDecoder)
|
||||
type LeafEncoder = func(ctx context.Context, err error) (msg string, safeDetails []string, payload proto.Message)
|
||||
type LeafDecoder = func(ctx context.Context, msg string, safeDetails []string, payload proto.Message) error
|
||||
type WrapperEncoder = func(ctx context.Context, err error) (msgPrefix string, safeDetails []string, payload proto.Message)
|
||||
type WrapperEncoderWithMessageOverride = func(ctx context.Context, err error) (msgPrefix string, safeDetails []string, payload proto.Message, overrideError bool)
|
||||
type WrapperDecoder = func(ctx context.Context, cause error, msgPrefix string, safeDetails []string, payload proto.Message) error
|
||||
type MultiCauseEncoder = func(ctx context.Context, err error) (msg string, safeDetails []string, payload proto.Message)
|
||||
type MultiCauseDecoder = func(ctx context.Context, causes []error, msgPrefix string, safeDetails []string, payload proto.Message) error
|
||||
|
||||
// Registering package renames for custom error types.
|
||||
func RegisterTypeMigration(previousPkgPath, previousTypeName string, newType error)
|
||||
|
||||
// Sentry reports.
|
||||
func BuildSentryReport(err error) (*sentry.Event, map[string]interface{})
|
||||
func ReportError(err error) (string)
|
||||
|
||||
// Stack trace captures.
|
||||
func GetOneLineSource(err error) (file string, line int, fn string, ok bool)
|
||||
type ReportableStackTrace = sentry.StackTrace
|
||||
func GetReportableStackTrace(err error) *ReportableStackTrace
|
||||
|
||||
// Safe (PII-free) details.
|
||||
type SafeDetailPayload struct { ... }
|
||||
func GetAllSafeDetails(err error) []SafeDetailPayload
|
||||
func GetSafeDetails(err error) (payload SafeDetailPayload)
|
||||
|
||||
// Obsolete APIs.
|
||||
type SafeMessager interface { ... }
|
||||
func Redact(r interface{}) string
|
||||
|
||||
// Aliases redact.Safe.
|
||||
func Safe(v interface{}) SafeMessager
|
||||
|
||||
// Assertion failures.
|
||||
func HasAssertionFailure(err error) bool
|
||||
func IsAssertionFailure(err error) bool
|
||||
|
||||
// User-facing details and hints.
|
||||
func GetAllDetails(err error) []string
|
||||
func FlattenDetails(err error) string
|
||||
func GetAllHints(err error) []string
|
||||
func FlattenHints(err error) string
|
||||
|
||||
// Issue links / URL wrappers.
|
||||
func HasIssueLink(err error) bool
|
||||
func IsIssueLink(err error) bool
|
||||
func GetAllIssueLinks(err error) (issues []IssueLink)
|
||||
|
||||
// Unimplemented errors.
|
||||
func HasUnimplementedError(err error) bool
|
||||
func IsUnimplementedError(err error) bool
|
||||
|
||||
// Telemetry keys.
|
||||
func GetTelemetryKeys(err error) []string
|
||||
|
||||
// Domain errors.
|
||||
type Domain
|
||||
const NoDomain Domain
|
||||
func GetDomain(err error) Domain
|
||||
func NamedDomain(domainName string) Domain
|
||||
func PackageDomain() Domain
|
||||
func PackageDomainAtDepth(depth int) Domain
|
||||
func EnsureNotInDomain(err error, constructor DomainOverrideFn, forbiddenDomains ...Domain) error
|
||||
func NotInDomain(err error, doms ...Domain) bool
|
||||
|
||||
// Context tags.
|
||||
func GetContextTags(err error) []*logtags.Buffer
|
||||
```
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package assert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
"github.com/cockroachdb/errors/markers"
|
||||
"github.com/cockroachdb/errors/stdstrings"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// WithAssertionFailure decorates the error with an assertion failure marker.
|
||||
// This is not intended to be used directly (see AssertionFailed() for
|
||||
// further decoration).
|
||||
//
|
||||
// Detail is shown:
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithAssertionFailure(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withAssertionFailure{cause: err}
|
||||
}
|
||||
|
||||
// HasAssertionFailure returns true if the error or any of its causes
|
||||
// is an assertion failure annotation.
|
||||
func HasAssertionFailure(err error) bool {
|
||||
_, ok := markers.If(err, func(err error) (v interface{}, ok bool) {
|
||||
v, ok = err.(*withAssertionFailure)
|
||||
return
|
||||
})
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsAssertionFailure returns true if the error (not its causes) is an
|
||||
// assertion failure annotation. Consider using markers.If or
|
||||
// HasAssertionFailure to test both the error and its causes.
|
||||
func IsAssertionFailure(err error) bool {
|
||||
_, ok := err.(*withAssertionFailure)
|
||||
return ok
|
||||
}
|
||||
|
||||
type withAssertionFailure struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
var _ error = (*withAssertionFailure)(nil)
|
||||
var _ fmt.Formatter = (*withAssertionFailure)(nil)
|
||||
var _ errbase.SafeFormatter = (*withAssertionFailure)(nil)
|
||||
|
||||
// ErrorHint implements the hintdetail.ErrorHinter interface.
|
||||
func (w *withAssertionFailure) ErrorHint() string {
|
||||
return AssertionErrorHint + stdstrings.IssueReferral
|
||||
}
|
||||
|
||||
// AssertionErrorHint is the hint emitted upon assertion failures.
|
||||
const AssertionErrorHint = `You have encountered an unexpected error.`
|
||||
|
||||
func (w *withAssertionFailure) Error() string { return w.cause.Error() }
|
||||
func (w *withAssertionFailure) Cause() error { return w.cause }
|
||||
func (w *withAssertionFailure) Unwrap() error { return w.cause }
|
||||
|
||||
func (w *withAssertionFailure) Format(s fmt.State, verb rune) { errbase.FormatError(w, s, verb) }
|
||||
func (w *withAssertionFailure) SafeFormatError(p errbase.Printer) error {
|
||||
if p.Detail() {
|
||||
p.Printf("assertion failure")
|
||||
}
|
||||
return w.cause
|
||||
}
|
||||
|
||||
func decodeAssertFailure(
|
||||
_ context.Context, cause error, _ string, _ []string, _ proto.Message,
|
||||
) error {
|
||||
return &withAssertionFailure{cause: cause}
|
||||
}
|
||||
|
||||
func init() {
|
||||
errbase.RegisterWrapperDecoder(errbase.GetTypeKey((*withAssertionFailure)(nil)), decodeAssertFailure)
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import "github.com/cockroachdb/errors/assert"
|
||||
|
||||
// WithAssertionFailure decorates the error with an assertion failure marker.
|
||||
// This is not intended to be used directly (see AssertionFailed() for
|
||||
// further decoration).
|
||||
//
|
||||
// Detail is shown:
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithAssertionFailure(err error) error { return assert.WithAssertionFailure(err) }
|
||||
|
||||
// HasAssertionFailure returns true if the error or any of its causes
|
||||
// is an assertion failure annotation.
|
||||
func HasAssertionFailure(err error) bool { return assert.HasAssertionFailure(err) }
|
||||
|
||||
// IsAssertionFailure returns true if the error (not its causes) is an
|
||||
// assertion failure annotation. Consider using markers.If or
|
||||
// HasAssertionFailure to test both the error and its causes.
|
||||
func IsAssertionFailure(err error) bool { return assert.IsAssertionFailure(err) }
|
|
@ -0,0 +1,151 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package barriers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
"github.com/cockroachdb/redact"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// Handled swallows the provided error and hides it from the
|
||||
// Cause()/Unwrap() interface, and thus the Is() facility that
|
||||
// identifies causes. However, it retains it for the purpose of
|
||||
// printing the error out (e.g. for troubleshooting). The error
|
||||
// message is preserved in full.
|
||||
//
|
||||
// Detail is shown:
|
||||
// - via `errors.GetSafeDetails()`, shows details from hidden error.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func Handled(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return HandledWithSafeMessage(err, redact.Sprint(err))
|
||||
}
|
||||
|
||||
// HandledWithMessage is like Handled except the message is overridden.
|
||||
// This can be used e.g. to hide message details or to prevent
|
||||
// downstream code to make assertions on the message's contents.
|
||||
func HandledWithMessage(err error, msg string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return HandledWithSafeMessage(err, redact.Sprint(msg))
|
||||
}
|
||||
|
||||
// HandledWithSafeMessage is like Handled except the message is overridden.
|
||||
// This can be used e.g. to hide message details or to prevent
|
||||
// downstream code to make assertions on the message's contents.
|
||||
func HandledWithSafeMessage(err error, msg redact.RedactableString) error {
|
||||
return &barrierErr{maskedErr: err, smsg: msg}
|
||||
}
|
||||
|
||||
// HandledWithMessagef is like HandledWithMessagef except the message
|
||||
// is formatted.
|
||||
func HandledWithMessagef(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &barrierErr{maskedErr: err, smsg: redact.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
// barrierErr is a leaf error type. It encapsulates a chain of
|
||||
// original causes, but these causes are hidden so that they inhibit
|
||||
// matching via Is() and the Cause()/Unwrap() recursions.
|
||||
type barrierErr struct {
|
||||
// Message for the barrier itself.
|
||||
// In the common case, the message from the masked error
|
||||
// is used as-is (see Handled() above) however it is
|
||||
// useful to cache it here since the masked error may
|
||||
// have a long chain of wrappers and its Error() call
|
||||
// may be expensive.
|
||||
smsg redact.RedactableString
|
||||
// Masked error chain.
|
||||
maskedErr error
|
||||
}
|
||||
|
||||
var _ error = (*barrierErr)(nil)
|
||||
var _ errbase.SafeDetailer = (*barrierErr)(nil)
|
||||
var _ errbase.SafeFormatter = (*barrierErr)(nil)
|
||||
var _ fmt.Formatter = (*barrierErr)(nil)
|
||||
|
||||
// barrierErr is an error.
|
||||
func (e *barrierErr) Error() string { return e.smsg.StripMarkers() }
|
||||
|
||||
// SafeDetails reports the PII-free details from the masked error.
|
||||
func (e *barrierErr) SafeDetails() []string {
|
||||
var details []string
|
||||
for err := e.maskedErr; err != nil; err = errbase.UnwrapOnce(err) {
|
||||
sd := errbase.GetSafeDetails(err)
|
||||
details = sd.Fill(details)
|
||||
}
|
||||
details = append(details, redact.Sprintf("masked error: %+v", e.maskedErr).Redact().StripMarkers())
|
||||
return details
|
||||
}
|
||||
|
||||
// Printing a barrier reveals the details.
|
||||
func (e *barrierErr) Format(s fmt.State, verb rune) { errbase.FormatError(e, s, verb) }
|
||||
|
||||
func (e *barrierErr) SafeFormatError(p errbase.Printer) (next error) {
|
||||
p.Print(e.smsg)
|
||||
if p.Detail() {
|
||||
p.Printf("-- cause hidden behind barrier\n%+v", e.maskedErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A barrier error is encoded exactly.
|
||||
func encodeBarrier(
|
||||
ctx context.Context, err error,
|
||||
) (msg string, details []string, payload proto.Message) {
|
||||
e := err.(*barrierErr)
|
||||
enc := errbase.EncodeError(ctx, e.maskedErr)
|
||||
return string(e.smsg), e.SafeDetails(), &enc
|
||||
}
|
||||
|
||||
// A barrier error is decoded exactly.
|
||||
func decodeBarrier(ctx context.Context, msg string, _ []string, payload proto.Message) error {
|
||||
enc := payload.(*errbase.EncodedError)
|
||||
return &barrierErr{smsg: redact.RedactableString(msg), maskedErr: errbase.DecodeError(ctx, *enc)}
|
||||
}
|
||||
|
||||
// Previous versions of barrier errors.
|
||||
func decodeBarrierPrev(ctx context.Context, msg string, _ []string, payload proto.Message) error {
|
||||
enc := payload.(*errbase.EncodedError)
|
||||
return &barrierErr{smsg: redact.Sprint(msg), maskedErr: errbase.DecodeError(ctx, *enc)}
|
||||
}
|
||||
|
||||
// barrierError is the "old" type name of barrierErr. We use a new
|
||||
// name now to ensure a different decode function is used when
|
||||
// importing barriers from the previous structure, where the
|
||||
// message is not redactable.
|
||||
type barrierError struct {
|
||||
msg string
|
||||
maskedErr error
|
||||
}
|
||||
|
||||
func (b *barrierError) Error() string { return "" }
|
||||
|
||||
func init() {
|
||||
errbase.RegisterLeafDecoder(errbase.GetTypeKey((*barrierError)(nil)), decodeBarrierPrev)
|
||||
tn := errbase.GetTypeKey((*barrierErr)(nil))
|
||||
errbase.RegisterLeafDecoder(tn, decodeBarrier)
|
||||
errbase.RegisterLeafEncoder(tn, encodeBarrier)
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import "github.com/cockroachdb/errors/barriers"
|
||||
|
||||
// Handled swallows the provided error and hides it from the
|
||||
// Cause()/Unwrap() interface, and thus the Is() facility that
|
||||
// identifies causes. However, it retains it for the purpose of
|
||||
// printing the error out (e.g. for troubleshooting). The error
|
||||
// message is preserved in full.
|
||||
//
|
||||
// Detail is shown:
|
||||
// - via `errors.GetSafeDetails()`, shows details from hidden error.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func Handled(err error) error { return barriers.Handled(err) }
|
||||
|
||||
// HandledWithMessage is like Handled except the message is overridden.
|
||||
// This can be used e.g. to hide message details or to prevent
|
||||
// downstream code to make assertions on the message's contents.
|
||||
func HandledWithMessage(err error, msg string) error { return barriers.HandledWithMessage(err, msg) }
|
|
@ -0,0 +1,113 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package contexttags
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
"github.com/cockroachdb/logtags"
|
||||
"github.com/cockroachdb/redact"
|
||||
)
|
||||
|
||||
// WithContextTags captures the k/v pairs stored in the context via the
|
||||
// `logtags` package and annotates them on the error.
|
||||
//
|
||||
// Only the strong representation of values remains available. This is
|
||||
// because the library cannot guarantee that the underlying value is
|
||||
// preserved across the network. To avoid creating a stateful interface
|
||||
// (where the user code needs to know whether an error has traveled
|
||||
// through the network or not), the library restricts access to the
|
||||
// value part as strings. See GetContextTags() below.
|
||||
//
|
||||
// Detail is shown:
|
||||
// - via `errors.GetSafeDetails()`.
|
||||
// - via `GetContextTags()` below.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithContextTags(err error, ctx context.Context) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
tags := logtags.FromContext(ctx)
|
||||
if tags == nil {
|
||||
return err
|
||||
}
|
||||
return &withContext{cause: err, tags: tags}
|
||||
}
|
||||
|
||||
// GetContextTags retrieves the k/v pairs stored in the error.
|
||||
// The sets are returned from outermost to innermost level of cause.
|
||||
// The returned logtags.Buffer only know about the string
|
||||
// representation of the values originally captured by the error.
|
||||
func GetContextTags(err error) (res []*logtags.Buffer) {
|
||||
for e := err; e != nil; e = errbase.UnwrapOnce(e) {
|
||||
if w, ok := e.(*withContext); ok {
|
||||
b := w.tags
|
||||
// Ensure that the buffer does not contain any non-string.
|
||||
if hasNonStringValue(b) {
|
||||
b = convertToStringsOnly(b)
|
||||
}
|
||||
res = append(res, b)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func hasNonStringValue(b *logtags.Buffer) bool {
|
||||
for _, t := range b.Get() {
|
||||
v := t.Value()
|
||||
if v == nil {
|
||||
return true
|
||||
}
|
||||
if _, ok := v.(string); !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func convertToStringsOnly(b *logtags.Buffer) (res *logtags.Buffer) {
|
||||
for _, t := range b.Get() {
|
||||
res = res.Add(t.Key(), t.ValueStr())
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func redactTags(b *logtags.Buffer) []string {
|
||||
res := make([]string, len(b.Get()))
|
||||
redactableTagsIterate(b, func(i int, r redact.RedactableString) {
|
||||
res[i] = r.Redact().StripMarkers()
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
func redactableTagsIterate(b *logtags.Buffer, fn func(i int, s redact.RedactableString)) {
|
||||
var empty redact.SafeString
|
||||
for i, t := range b.Get() {
|
||||
k := t.Key()
|
||||
v := t.Value()
|
||||
eq := empty
|
||||
var val interface{} = empty
|
||||
if v != nil {
|
||||
if len(k) > 1 {
|
||||
eq = "="
|
||||
}
|
||||
val = v
|
||||
}
|
||||
res := redact.Sprintf("%s%s%v", redact.Safe(k), eq, val)
|
||||
fn(i, res)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package contexttags
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/cockroachdb/logtags"
|
||||
"github.com/cockroachdb/redact"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
type withContext struct {
|
||||
cause error
|
||||
// tags stores the context k/v pairs, non-redacted.
|
||||
// The errors library only gives access to the string representation
|
||||
// of the value part. This is because the network encoding of
|
||||
// a withContext instance only stores the string.
|
||||
tags *logtags.Buffer
|
||||
// redactedTags stores the context k/v pairs, redacted.
|
||||
// When this is defined, SafeDetails() uses it. Otherwise, it
|
||||
// re-redact tags above.
|
||||
redactedTags []string
|
||||
}
|
||||
|
||||
var _ error = (*withContext)(nil)
|
||||
var _ errbase.SafeDetailer = (*withContext)(nil)
|
||||
var _ errbase.SafeFormatter = (*withContext)(nil)
|
||||
var _ fmt.Formatter = (*withContext)(nil)
|
||||
|
||||
// withContext is an error. The original error message is preserved.
|
||||
func (w *withContext) Error() string { return w.cause.Error() }
|
||||
|
||||
// the cause is reachable.
|
||||
func (w *withContext) Cause() error { return w.cause }
|
||||
func (w *withContext) Unwrap() error { return w.cause }
|
||||
|
||||
// Printing a withContext reveals the tags.
|
||||
func (w *withContext) Format(s fmt.State, verb rune) { errbase.FormatError(w, s, verb) }
|
||||
|
||||
func (w *withContext) SafeFormatError(p errbase.Printer) error {
|
||||
if p.Detail() && w.tags != nil {
|
||||
p.Printf("tags: [")
|
||||
redactableTagsIterate(w.tags, func(i int, r redact.RedactableString) {
|
||||
if i > 0 {
|
||||
p.Printf(",")
|
||||
}
|
||||
p.Print(r)
|
||||
})
|
||||
p.Printf("]")
|
||||
}
|
||||
return w.cause
|
||||
}
|
||||
|
||||
// SafeDetails implements the errbase.SafeDetailer interface.
|
||||
func (w *withContext) SafeDetails() []string {
|
||||
if w.redactedTags != nil {
|
||||
return w.redactedTags
|
||||
}
|
||||
return redactTags(w.tags)
|
||||
}
|
||||
|
||||
func encodeWithContext(_ context.Context, err error) (string, []string, proto.Message) {
|
||||
w := err.(*withContext)
|
||||
p := &errorspb.TagsPayload{}
|
||||
for _, t := range w.tags.Get() {
|
||||
p.Tags = append(p.Tags, errorspb.TagPayload{Tag: t.Key(), Value: t.ValueStr()})
|
||||
}
|
||||
return "", w.SafeDetails(), p
|
||||
}
|
||||
|
||||
func decodeWithContext(
|
||||
_ context.Context, cause error, _ string, redactedTags []string, payload proto.Message,
|
||||
) error {
|
||||
m, ok := payload.(*errorspb.TagsPayload)
|
||||
if !ok {
|
||||
// If this ever happens, this means some version of the library
|
||||
// (presumably future) changed the payload type, and we're
|
||||
// receiving this here. In this case, give up and let
|
||||
// DecodeError use the opaque type.
|
||||
return nil
|
||||
}
|
||||
if len(m.Tags) == 0 && len(redactedTags) == 0 {
|
||||
// There are no tags stored. Either there are no tags stored, or
|
||||
// we received some new version of the protobuf message which does
|
||||
// things differently. Again, use the opaque type.
|
||||
return nil
|
||||
}
|
||||
// Convert the k/v pairs.
|
||||
var b *logtags.Buffer
|
||||
for _, t := range m.Tags {
|
||||
b = b.Add(t.Tag, t.Value)
|
||||
}
|
||||
return &withContext{cause: cause, tags: b, redactedTags: redactedTags}
|
||||
}
|
||||
|
||||
func init() {
|
||||
errbase.RegisterWrapperEncoder(errbase.GetTypeKey((*withContext)(nil)), encodeWithContext)
|
||||
errbase.RegisterWrapperDecoder(errbase.GetTypeKey((*withContext)(nil)), decodeWithContext)
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors/contexttags"
|
||||
"github.com/cockroachdb/logtags"
|
||||
)
|
||||
|
||||
// WithContextTags captures the k/v pairs stored in the context via the
|
||||
// `logtags` package and annotates them on the error.
|
||||
//
|
||||
// Only the strong representation of values remains available. This is
|
||||
// because the library cannot guarantee that the underlying value is
|
||||
// preserved across the network. To avoid creating a stateful interface
|
||||
// (where the user code needs to know whether an error has traveled
|
||||
// through the network or not), the library restricts access to the
|
||||
// value part as strings. See GetContextTags() below.
|
||||
//
|
||||
// Detail is shown:
|
||||
// - via `errors.GetSafeDetails()`.
|
||||
// - via `GetContextTags()` below.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithContextTags(err error, ctx context.Context) error {
|
||||
return contexttags.WithContextTags(err, ctx)
|
||||
}
|
||||
|
||||
// GetContextTags retrieves the k/v pairs stored in the error.
|
||||
// The sets are returned from outermost to innermost level of cause.
|
||||
// The returned logtags.Buffer only know about the string
|
||||
// representation of the values originally captured by the error.
|
||||
func GetContextTags(err error) []*logtags.Buffer { return contexttags.GetContextTags(err) }
|
|
@ -0,0 +1,156 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package domains
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/cockroachdb/errors/barriers"
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
)
|
||||
|
||||
// Domain is the type of a domain annotation.
|
||||
type Domain string
|
||||
|
||||
// NoDomain is the domain of errors that don't originate
|
||||
// from a barrier.
|
||||
const NoDomain Domain = "error domain: <none>"
|
||||
|
||||
// GetDomain extracts the domain of the given error, or NoDomain if
|
||||
// the error's cause does not have a domain annotation.
|
||||
func GetDomain(err error) Domain {
|
||||
for {
|
||||
if b, ok := err.(*withDomain); ok {
|
||||
return b.domain
|
||||
}
|
||||
// Recurse to the cause.
|
||||
if c := errbase.UnwrapOnce(err); c != nil {
|
||||
err = c
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return NoDomain
|
||||
}
|
||||
|
||||
// WithDomain wraps an error so that it appears to come from the given domain.
|
||||
//
|
||||
// Domain is shown:
|
||||
// - via `errors.GetSafeDetails()`.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithDomain(err error, domain Domain) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withDomain{cause: err, domain: domain}
|
||||
}
|
||||
|
||||
// New creates an error in the implicit domain (see PackageDomain() below)
|
||||
// of its caller.
|
||||
//
|
||||
// Domain is shown:
|
||||
// - via `errors.GetSafeDetails()`.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func New(msg string) error {
|
||||
return WithDomain(errors.New(msg), PackageDomainAtDepth(1))
|
||||
}
|
||||
|
||||
// Newf/Errorf with format and args can be implemented similarly.
|
||||
|
||||
// HandledInDomain creates an error in the given domain and retains
|
||||
// the details of the given original error as context for
|
||||
// debugging. The original error is hidden and does not become a
|
||||
// "cause" for the new error. The original's error _message_
|
||||
// is preserved.
|
||||
//
|
||||
// See the documentation of `WithDomain()` and `errors.Handled()` for details.
|
||||
func HandledInDomain(err error, domain Domain) error {
|
||||
return WithDomain(barriers.Handled(err), domain)
|
||||
}
|
||||
|
||||
// HandledInDomainWithMessage is like HandledWithMessage but with a domain.
|
||||
func HandledInDomainWithMessage(err error, domain Domain, msg string) error {
|
||||
return WithDomain(barriers.HandledWithMessage(err, msg), domain)
|
||||
}
|
||||
|
||||
// Handled creates a handled error in the implicit domain (see
|
||||
// PackageDomain() below) of its caller.
|
||||
//
|
||||
// See the documentation of `barriers.Handled()` for details.
|
||||
func Handled(err error) error {
|
||||
return HandledInDomain(err, PackageDomainAtDepth(1))
|
||||
}
|
||||
|
||||
// Handledf with format and args can be implemented similarly.
|
||||
|
||||
// NotInDomain returns true if and only if the error's
|
||||
// domain is not one of the specified domains.
|
||||
func NotInDomain(err error, domains ...Domain) bool {
|
||||
return notInDomainInternal(GetDomain(err), domains...)
|
||||
}
|
||||
|
||||
func notInDomainInternal(d Domain, domains ...Domain) bool {
|
||||
for _, given := range domains {
|
||||
if d == given {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EnsureNotInDomain checks whether the error is in the given domain(s).
|
||||
// If it is, the given constructor if provided is called to construct
|
||||
// an alternate error. If no error constructor is provided,
|
||||
// a new barrier is constructed automatically using the first
|
||||
// provided domain as new domain. The original error message
|
||||
// is preserved.
|
||||
func EnsureNotInDomain(
|
||||
err error, constructor func(originalDomain Domain, err error) error, forbiddenDomains ...Domain,
|
||||
) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Is the error already in the wanted domains?
|
||||
errDomain := GetDomain(err)
|
||||
if notInDomainInternal(errDomain, forbiddenDomains...) {
|
||||
// No: no-op.
|
||||
return err
|
||||
}
|
||||
return constructor(errDomain, err)
|
||||
}
|
||||
|
||||
// PackageDomain returns an error domain that represents the
|
||||
// package of its caller.
|
||||
func PackageDomain() Domain {
|
||||
return PackageDomainAtDepth(1)
|
||||
}
|
||||
|
||||
// PackageDomainAtDepth returns an error domain that describes the
|
||||
// package at the given call depth.
|
||||
func PackageDomainAtDepth(depth int) Domain {
|
||||
_, f, _, _ := runtime.Caller(1 + depth)
|
||||
return Domain("error domain: pkg " + filepath.Dir(f))
|
||||
}
|
||||
|
||||
// NamedDomain returns an error domain identified by the given string.
|
||||
func NamedDomain(domainName string) Domain {
|
||||
return Domain(fmt.Sprintf("error domain: %q", domainName))
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package domains
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
"github.com/cockroachdb/redact"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// withDomain is a wrapper type that adds a domain annotation to an
|
||||
// error.
|
||||
type withDomain struct {
|
||||
// Mandatory: error cause
|
||||
cause error
|
||||
// Mandatory: domain. This also must be free of PII
|
||||
// as it will be reported in "safe details".
|
||||
domain Domain
|
||||
}
|
||||
|
||||
var _ error = (*withDomain)(nil)
|
||||
var _ errbase.SafeDetailer = (*withDomain)(nil)
|
||||
var _ errbase.TypeKeyMarker = (*withDomain)(nil)
|
||||
var _ fmt.Formatter = (*withDomain)(nil)
|
||||
var _ errbase.SafeFormatter = (*withDomain)(nil)
|
||||
|
||||
// withDomain is an error. The original error message is preserved.
|
||||
func (e *withDomain) Error() string { return e.cause.Error() }
|
||||
|
||||
// the cause is reachable.
|
||||
func (e *withDomain) Cause() error { return e.cause }
|
||||
func (e *withDomain) Unwrap() error { return e.cause }
|
||||
|
||||
// ErrorKeyMarker implements the TypeNameMarker interface.
|
||||
// The full type name of barriers is extended with the domain as extra marker.
|
||||
// This ensures that domain-annotated errors appear to be of different types
|
||||
// for the purpose of Is().
|
||||
func (e *withDomain) ErrorKeyMarker() string { return string(e.domain) }
|
||||
|
||||
// SafeDetails reports the domain.
|
||||
func (e *withDomain) SafeDetails() []string {
|
||||
return []string{string(e.domain)}
|
||||
}
|
||||
|
||||
func (e *withDomain) Format(s fmt.State, verb rune) { errbase.FormatError(e, s, verb) }
|
||||
|
||||
func (e *withDomain) SafeFormatError(p errbase.Printer) error {
|
||||
if p.Detail() {
|
||||
p.Print(redact.Safe(e.domain))
|
||||
}
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// A domain-annotated error is decoded exactly.
|
||||
func decodeWithDomain(
|
||||
_ context.Context, cause error, _ string, details []string, _ proto.Message,
|
||||
) error {
|
||||
if len(details) == 0 {
|
||||
// decoding failure: expecting at least one detail string
|
||||
// (the one that carries the domain string).
|
||||
return nil
|
||||
}
|
||||
return &withDomain{cause: cause, domain: Domain(details[0])}
|
||||
}
|
||||
|
||||
func init() {
|
||||
tn := errbase.GetTypeKey((*withDomain)(nil))
|
||||
errbase.RegisterWrapperDecoder(tn, decodeWithDomain)
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import "github.com/cockroachdb/errors/domains"
|
||||
|
||||
// Domain is the type of a domain annotation.
|
||||
type Domain = domains.Domain
|
||||
|
||||
// NoDomain is the domain of errors that don't originate
|
||||
// from a barrier.
|
||||
const NoDomain Domain = domains.NoDomain
|
||||
|
||||
// NamedDomain returns an error domain identified by the given string.
|
||||
func NamedDomain(domainName string) Domain { return domains.NamedDomain(domainName) }
|
||||
|
||||
// PackageDomain returns an error domain that represents the
|
||||
// package of its caller.
|
||||
func PackageDomain() Domain { return domains.PackageDomainAtDepth(1) }
|
||||
|
||||
// PackageDomainAtDepth returns an error domain that describes the
|
||||
// package at the given call depth.
|
||||
func PackageDomainAtDepth(depth int) Domain { return domains.PackageDomainAtDepth(depth) }
|
||||
|
||||
// WithDomain wraps an error so that it appears to come from the given domain.
|
||||
//
|
||||
// Domain is shown:
|
||||
// - via `errors.GetSafeDetails()`.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func WithDomain(err error, domain Domain) error { return domains.WithDomain(err, domain) }
|
||||
|
||||
// NotInDomain returns true if and only if the error's
|
||||
// domain is not one of the specified domains.
|
||||
func NotInDomain(err error, doms ...Domain) bool { return domains.NotInDomain(err, doms...) }
|
||||
|
||||
// EnsureNotInDomain checks whether the error is in the given domain(s).
|
||||
// If it is, the given constructor if provided is called to construct
|
||||
// an alternate error. If no error constructor is provided,
|
||||
// a new barrier is constructed automatically using the first
|
||||
// provided domain as new domain. The original error message
|
||||
// is preserved.
|
||||
func EnsureNotInDomain(err error, constructor DomainOverrideFn, forbiddenDomains ...Domain) error {
|
||||
return domains.EnsureNotInDomain(err, constructor, forbiddenDomains...)
|
||||
}
|
||||
|
||||
// DomainOverrideFn is the type of the callback function passed to EnsureNotInDomain().
|
||||
type DomainOverrideFn = func(originalDomain Domain, err error) error
|
||||
|
||||
// HandledInDomain creates an error in the given domain and retains
|
||||
// the details of the given original error as context for
|
||||
// debugging. The original error is hidden and does not become a
|
||||
// "cause" for the new error. The original's error _message_
|
||||
// is preserved.
|
||||
//
|
||||
// See the documentation of `WithDomain()` and `errors.Handled()` for details.
|
||||
func HandledInDomain(err error, domain Domain) error { return domains.HandledInDomain(err, domain) }
|
||||
|
||||
// HandledInDomainWithMessage is like HandledWithMessage but with a domain.
|
||||
func HandledInDomainWithMessage(err error, domain Domain, msg string) error {
|
||||
return domains.HandledInDomainWithMessage(err, domain, msg)
|
||||
}
|
||||
|
||||
// GetDomain extracts the domain of the given error, or NoDomain if
|
||||
// the error's cause does not have a domain annotation.
|
||||
func GetDomain(err error) Domain { return domains.GetDomain(err) }
|
|
@ -0,0 +1,214 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"context"
|
||||
goErr "errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
pkgErr "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This file provides the library the ability to encode/decode
|
||||
// standard error types.
|
||||
|
||||
// errors.errorString from base Go does not need an encoding
|
||||
// function, because the base encoding logic in EncodeLeaf() is
|
||||
// able to extract everything about it.
|
||||
|
||||
// we can then decode it exactly.
|
||||
func decodeErrorString(_ context.Context, msg string, _ []string, _ proto.Message) error {
|
||||
return goErr.New(msg)
|
||||
}
|
||||
|
||||
// context.DeadlineExceeded uses a custom type.
|
||||
func decodeDeadlineExceeded(_ context.Context, _ string, _ []string, _ proto.Message) error {
|
||||
return context.DeadlineExceeded
|
||||
}
|
||||
|
||||
// errors.fundamental from github.com/pkg/errors cannot be encoded
|
||||
// exactly because it includes a non-serializable stack trace
|
||||
// object. In order to work with it, we encode it by dumping
|
||||
// the stack trace in a safe reporting detail field, and decode
|
||||
// it as an opaqueLeaf instance in this package.
|
||||
|
||||
func encodePkgFundamental(
|
||||
_ context.Context, err error,
|
||||
) (msg string, safe []string, _ proto.Message) {
|
||||
msg = err.Error()
|
||||
iErr := err.(interface{ StackTrace() pkgErr.StackTrace })
|
||||
safeDetails := []string{fmt.Sprintf("%+v", iErr.StackTrace())}
|
||||
return msg, safeDetails, nil
|
||||
}
|
||||
|
||||
// errors.withMessage from github.com/pkg/errors can be encoded
|
||||
// exactly because it just has a message prefix. The base encoding
|
||||
// logic in EncodeWrapper() is able to extract everything from it.
|
||||
|
||||
// we can then decode it exactly.
|
||||
func decodeWithMessage(
|
||||
_ context.Context, cause error, msgPrefix string, _ []string, _ proto.Message,
|
||||
) error {
|
||||
return pkgErr.WithMessage(cause, msgPrefix)
|
||||
}
|
||||
|
||||
// errors.withStack from github.com/pkg/errors cannot be encoded
|
||||
// exactly because it includes a non-serializable stack trace
|
||||
// object. In order to work with it, we encode it by dumping
|
||||
// the stack trace in a safe reporting detail field, and decode
|
||||
// it as an opaqueWrapper instance in this package.
|
||||
|
||||
func encodePkgWithStack(
|
||||
_ context.Context, err error,
|
||||
) (msgPrefix string, safe []string, _ proto.Message) {
|
||||
iErr := err.(interface{ StackTrace() pkgErr.StackTrace })
|
||||
safeDetails := []string{fmt.Sprintf("%+v", iErr.StackTrace())}
|
||||
return "" /* withStack does not have a message prefix */, safeDetails, nil
|
||||
}
|
||||
|
||||
func encodePathError(
|
||||
_ context.Context, err error,
|
||||
) (msgPrefix string, safe []string, details proto.Message) {
|
||||
p := err.(*os.PathError)
|
||||
msg := p.Op + " " + p.Path
|
||||
details = &errorspb.StringsPayload{
|
||||
Details: []string{p.Op, p.Path},
|
||||
}
|
||||
return msg, []string{p.Op}, details
|
||||
}
|
||||
|
||||
func decodePathError(
|
||||
_ context.Context, cause error, _ string, _ []string, payload proto.Message,
|
||||
) (result error) {
|
||||
m, ok := payload.(*errorspb.StringsPayload)
|
||||
if !ok || len(m.Details) < 2 {
|
||||
// If this ever happens, this means some version of the library
|
||||
// (presumably future) changed the payload type, and we're
|
||||
// receiving this here. In this case, give up and let
|
||||
// DecodeError use the opaque type.
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{
|
||||
Op: m.Details[0],
|
||||
Path: m.Details[1],
|
||||
Err: cause,
|
||||
}
|
||||
}
|
||||
|
||||
func encodeLinkError(
|
||||
_ context.Context, err error,
|
||||
) (msgPrefix string, safe []string, details proto.Message) {
|
||||
p := err.(*os.LinkError)
|
||||
msg := p.Op + " " + p.Old + " " + p.New
|
||||
details = &errorspb.StringsPayload{
|
||||
Details: []string{p.Op, p.Old, p.New},
|
||||
}
|
||||
return msg, []string{p.Op}, details
|
||||
}
|
||||
|
||||
func decodeLinkError(
|
||||
_ context.Context, cause error, _ string, _ []string, payload proto.Message,
|
||||
) (result error) {
|
||||
m, ok := payload.(*errorspb.StringsPayload)
|
||||
if !ok || len(m.Details) < 3 {
|
||||
// If this ever happens, this means some version of the library
|
||||
// (presumably future) changed the payload type, and we're
|
||||
// receiving this here. In this case, give up and let
|
||||
// DecodeError use the opaque type.
|
||||
return nil
|
||||
}
|
||||
return &os.LinkError{
|
||||
Op: m.Details[0],
|
||||
Old: m.Details[1],
|
||||
New: m.Details[2],
|
||||
Err: cause,
|
||||
}
|
||||
}
|
||||
|
||||
func encodeSyscallError(
|
||||
_ context.Context, err error,
|
||||
) (msgPrefix string, safe []string, details proto.Message) {
|
||||
p := err.(*os.SyscallError)
|
||||
return p.Syscall, nil, nil
|
||||
}
|
||||
|
||||
func decodeSyscallError(
|
||||
_ context.Context, cause error, msg string, _ []string, _ proto.Message,
|
||||
) (result error) {
|
||||
return os.NewSyscallError(msg, cause)
|
||||
}
|
||||
|
||||
// OpaqueErrno represents a syscall.Errno error object that
|
||||
// was constructed on a different OS/platform combination.
|
||||
type OpaqueErrno struct {
|
||||
msg string
|
||||
details *errorspb.ErrnoPayload
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (o *OpaqueErrno) Error() string { return o.msg }
|
||||
|
||||
// Is tests whether this opaque errno object represents a special os error type.
|
||||
func (o *OpaqueErrno) Is(target error) bool {
|
||||
return (target == os.ErrPermission && o.details.IsPermission) ||
|
||||
(target == os.ErrExist && o.details.IsExist) ||
|
||||
(target == os.ErrNotExist && o.details.IsNotExist)
|
||||
}
|
||||
|
||||
// Temporary tests whether this opaque errno object encodes a temporary error.
|
||||
func (o *OpaqueErrno) Temporary() bool { return o.details.IsTemporary }
|
||||
|
||||
// Timeout tests whether this opaque errno object encodes a timeout error.
|
||||
func (o *OpaqueErrno) Timeout() bool { return o.details.IsTimeout }
|
||||
|
||||
func encodeOpaqueErrno(
|
||||
_ context.Context, err error,
|
||||
) (msg string, safe []string, payload proto.Message) {
|
||||
e := err.(*OpaqueErrno)
|
||||
return e.Error(), []string{e.Error()}, e.details
|
||||
}
|
||||
|
||||
func init() {
|
||||
baseErr := goErr.New("")
|
||||
RegisterLeafDecoder(GetTypeKey(baseErr), decodeErrorString)
|
||||
|
||||
RegisterLeafDecoder(GetTypeKey(context.DeadlineExceeded), decodeDeadlineExceeded)
|
||||
|
||||
pkgE := pkgErr.New("")
|
||||
RegisterLeafEncoder(GetTypeKey(pkgE), encodePkgFundamental)
|
||||
|
||||
RegisterWrapperDecoder(GetTypeKey(pkgErr.WithMessage(baseErr, "")), decodeWithMessage)
|
||||
|
||||
ws := pkgErr.WithStack(baseErr)
|
||||
RegisterWrapperEncoder(GetTypeKey(ws), encodePkgWithStack)
|
||||
|
||||
registerOsPathErrorMigration() // Needed for Go 1.16.
|
||||
pKey := GetTypeKey(&os.PathError{})
|
||||
RegisterWrapperEncoder(pKey, encodePathError)
|
||||
RegisterWrapperDecoder(pKey, decodePathError)
|
||||
|
||||
pKey = GetTypeKey(&os.LinkError{})
|
||||
RegisterWrapperEncoder(pKey, encodeLinkError)
|
||||
RegisterWrapperDecoder(pKey, decodeLinkError)
|
||||
pKey = GetTypeKey(&os.SyscallError{})
|
||||
RegisterWrapperEncoder(pKey, encodeSyscallError)
|
||||
RegisterWrapperDecoder(pKey, decodeSyscallError)
|
||||
|
||||
RegisterLeafEncoder(GetTypeKey(&OpaqueErrno{}), encodeOpaqueErrno)
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const thisArch = runtime.GOOS + ":" + runtime.GOARCH
|
||||
|
||||
func encodeErrno(_ context.Context, err error) (msg string, safe []string, payload proto.Message) {
|
||||
e := err.(syscall.Errno)
|
||||
payload = &errorspb.ErrnoPayload{
|
||||
OrigErrno: int64(e),
|
||||
Arch: thisArch,
|
||||
IsPermission: e.Is(os.ErrPermission),
|
||||
IsExist: e.Is(os.ErrExist),
|
||||
IsNotExist: e.Is(os.ErrNotExist),
|
||||
IsTimeout: e.Timeout(),
|
||||
IsTemporary: e.Temporary(),
|
||||
}
|
||||
return e.Error(), []string{e.Error()}, payload
|
||||
}
|
||||
|
||||
func decodeErrno(_ context.Context, msg string, _ []string, payload proto.Message) error {
|
||||
m, ok := payload.(*errorspb.ErrnoPayload)
|
||||
if !ok {
|
||||
// If this ever happens, this means some version of the library
|
||||
// (presumably future) changed the payload type, and we're
|
||||
// receiving this here. In this case, give up and let
|
||||
// DecodeError use the opaque type.
|
||||
return nil
|
||||
}
|
||||
if m.Arch != thisArch {
|
||||
// The errno object is coming from a different platform. We'll
|
||||
// keep it opaque here.
|
||||
return &OpaqueErrno{msg: msg, details: m}
|
||||
}
|
||||
return syscall.Errno(m.OrigErrno)
|
||||
}
|
||||
|
||||
func init() {
|
||||
pKey := GetTypeKey(syscall.Errno(0))
|
||||
RegisterLeafEncoder(pKey, encodeErrno)
|
||||
RegisterLeafDecoder(pKey, decodeErrno)
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// DecodeError decodes an error.
|
||||
//
|
||||
// Can only be called if the EncodedError is set (see IsSet()).
|
||||
func DecodeError(ctx context.Context, enc EncodedError) error {
|
||||
if w := enc.GetWrapper(); w != nil {
|
||||
return decodeWrapper(ctx, w)
|
||||
}
|
||||
return decodeLeaf(ctx, enc.GetLeaf())
|
||||
}
|
||||
|
||||
func decodeLeaf(ctx context.Context, enc *errorspb.EncodedErrorLeaf) error {
|
||||
// In case there is a detailed payload, decode it.
|
||||
var payload proto.Message
|
||||
if enc.Details.FullDetails != nil {
|
||||
var d types.DynamicAny
|
||||
err := types.UnmarshalAny(enc.Details.FullDetails, &d)
|
||||
if err != nil {
|
||||
// It's OK if we can't decode. We'll use
|
||||
// the opaque type below.
|
||||
warningFn(ctx, "error while unmarshalling error: %+v", err)
|
||||
} else {
|
||||
payload = d.Message
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have a leaf decoder for this type?
|
||||
typeKey := TypeKey(enc.Details.ErrorTypeMark.FamilyName)
|
||||
if decoder, ok := leafDecoders[typeKey]; ok {
|
||||
// Yes, use it.
|
||||
genErr := decoder(ctx, enc.Message, enc.Details.ReportablePayload, payload)
|
||||
if genErr != nil {
|
||||
// Decoding succeeded. Use this.
|
||||
return genErr
|
||||
}
|
||||
// Decoding failed, we'll drop through to opaqueLeaf{} below.
|
||||
} else if decoder, ok := multiCauseDecoders[typeKey]; ok {
|
||||
causes := make([]error, len(enc.MultierrorCauses))
|
||||
for i, e := range enc.MultierrorCauses {
|
||||
causes[i] = DecodeError(ctx, *e)
|
||||
}
|
||||
genErr := decoder(ctx, causes, enc.Message, enc.Details.ReportablePayload, payload)
|
||||
if genErr != nil {
|
||||
return genErr
|
||||
}
|
||||
} else {
|
||||
// Shortcut for non-registered proto-encodable error types:
|
||||
// if it already implements `error`, it's good to go.
|
||||
if e, ok := payload.(error); ok {
|
||||
// yes: we're done!
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
if len(enc.MultierrorCauses) > 0 {
|
||||
causes := make([]error, len(enc.MultierrorCauses))
|
||||
for i, e := range enc.MultierrorCauses {
|
||||
causes[i] = DecodeError(ctx, *e)
|
||||
}
|
||||
leaf := &opaqueLeafCauses{
|
||||
causes: causes,
|
||||
}
|
||||
leaf.msg = enc.Message
|
||||
leaf.details = enc.Details
|
||||
return leaf
|
||||
}
|
||||
|
||||
// No decoder and no error type: we'll keep what we received and
|
||||
// make it ready to re-encode exactly (if the error leaves over the
|
||||
// network again).
|
||||
return &opaqueLeaf{
|
||||
msg: enc.Message,
|
||||
details: enc.Details,
|
||||
}
|
||||
}
|
||||
|
||||
func decodeWrapper(ctx context.Context, enc *errorspb.EncodedWrapper) error {
|
||||
// First decode the cause.
|
||||
cause := DecodeError(ctx, enc.Cause)
|
||||
|
||||
// In case there is a detailed payload, decode it.
|
||||
var payload proto.Message
|
||||
if enc.Details.FullDetails != nil {
|
||||
var d types.DynamicAny
|
||||
err := types.UnmarshalAny(enc.Details.FullDetails, &d)
|
||||
if err != nil {
|
||||
// It's OK if we can't decode. We'll use
|
||||
// the opaque type below.
|
||||
warningFn(ctx, "error while unmarshalling wrapper error: %+v", err)
|
||||
} else {
|
||||
payload = d.Message
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have a wrapper decoder for this?
|
||||
typeKey := TypeKey(enc.Details.ErrorTypeMark.FamilyName)
|
||||
if decoder, ok := decoders[typeKey]; ok {
|
||||
// Yes, use it.
|
||||
genErr := decoder(ctx, cause, enc.Message, enc.Details.ReportablePayload, payload)
|
||||
if genErr != nil {
|
||||
// Decoding succeeded. Use this.
|
||||
return genErr
|
||||
}
|
||||
// Decoding failed, we'll drop through to opaqueWrapper{} below.
|
||||
}
|
||||
|
||||
// Otherwise, preserve all details about the original object.
|
||||
return &opaqueWrapper{
|
||||
cause: cause,
|
||||
prefix: enc.Message,
|
||||
details: enc.Details,
|
||||
messageType: MessageType(enc.MessageType),
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterLeafDecoder can be used to register new leaf error types to
|
||||
// the library. Registered types will be decoded using their own
|
||||
// Go type when an error is decoded. Wrappers that have not been
|
||||
// registered will be decoded using the opaqueLeaf type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterLeafDecoder().
|
||||
func RegisterLeafDecoder(theType TypeKey, decoder LeafDecoder) {
|
||||
if decoder == nil {
|
||||
delete(leafDecoders, theType)
|
||||
} else {
|
||||
leafDecoders[theType] = decoder
|
||||
}
|
||||
}
|
||||
|
||||
// LeafDecoder is to be provided (via RegisterLeafDecoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
// A nil return indicates that decoding was not successful.
|
||||
type LeafDecoder = func(ctx context.Context, msg string, safeDetails []string, payload proto.Message) error
|
||||
|
||||
// registry for RegisterLeafDecoder.
|
||||
var leafDecoders = map[TypeKey]LeafDecoder{}
|
||||
|
||||
// RegisterWrapperDecoder can be used to register new wrapper types to
|
||||
// the library. Registered wrappers will be decoded using their own
|
||||
// Go type when an error is decoded. Wrappers that have not been
|
||||
// registered will be decoded using the opaqueWrapper type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperDecoder().
|
||||
func RegisterWrapperDecoder(theType TypeKey, decoder WrapperDecoder) {
|
||||
if decoder == nil {
|
||||
delete(decoders, theType)
|
||||
} else {
|
||||
decoders[theType] = decoder
|
||||
}
|
||||
}
|
||||
|
||||
// WrapperDecoder is to be provided (via RegisterWrapperDecoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
// A nil return indicates that decoding was not successful.
|
||||
type WrapperDecoder = func(ctx context.Context, cause error, msgPrefix string, safeDetails []string, payload proto.Message) error
|
||||
|
||||
// registry for RegisterWrapperType.
|
||||
var decoders = map[TypeKey]WrapperDecoder{}
|
||||
|
||||
// MultiCauseDecoder is to be provided (via RegisterMultiCauseDecoder
|
||||
// above) by additional multi-cause wrapper types not yet known by the
|
||||
// library. A nil return indicates that decoding was not successful.
|
||||
type MultiCauseDecoder = func(ctx context.Context, causes []error, msgPrefix string, safeDetails []string, payload proto.Message) error
|
||||
|
||||
// registry for RegisterMultiCauseDecoder.
|
||||
var multiCauseDecoders = map[TypeKey]MultiCauseDecoder{}
|
||||
|
||||
// RegisterMultiCauseDecoder can be used to register new multi-cause
|
||||
// wrapper types to the library. Registered wrappers will be decoded
|
||||
// using their own Go type when an error is decoded. Multi-cause
|
||||
// wrappers that have not been registered will be decoded using the
|
||||
// opaqueWrapper type.
|
||||
func RegisterMultiCauseDecoder(theType TypeKey, decoder MultiCauseDecoder) {
|
||||
if decoder == nil {
|
||||
delete(multiCauseDecoders, theType)
|
||||
} else {
|
||||
multiCauseDecoders[theType] = decoder
|
||||
}
|
||||
}
|
|
@ -0,0 +1,438 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// EncodedError is the type of an encoded (and protobuf-encodable) error.
|
||||
type EncodedError = errorspb.EncodedError
|
||||
|
||||
// EncodeError encodes an error.
|
||||
func EncodeError(ctx context.Context, err error) EncodedError {
|
||||
if cause := UnwrapOnce(err); cause != nil {
|
||||
return encodeWrapper(ctx, err, cause)
|
||||
}
|
||||
return encodeLeaf(ctx, err, UnwrapMulti(err))
|
||||
}
|
||||
|
||||
// encodeLeaf encodes a leaf error. This function accepts a `causes`
|
||||
// argument because we encode multi-cause errors using the Leaf
|
||||
// protobuf. This was done to enable backwards compatibility when
|
||||
// introducing this functionality since the Wrapper type already has a
|
||||
// required single `cause` field.
|
||||
func encodeLeaf(ctx context.Context, err error, causes []error) EncodedError {
|
||||
var msg string
|
||||
var details errorspb.EncodedErrorDetails
|
||||
|
||||
if e, ok := err.(*opaqueLeaf); ok {
|
||||
msg = e.msg
|
||||
details = e.details
|
||||
} else if e, ok := err.(*opaqueLeafCauses); ok {
|
||||
msg = e.msg
|
||||
details = e.details
|
||||
} else {
|
||||
details.OriginalTypeName, details.ErrorTypeMark.FamilyName, details.ErrorTypeMark.Extension = getTypeDetails(err, false /*onlyFamily*/)
|
||||
|
||||
var payload proto.Message
|
||||
|
||||
// If we have a manually registered encoder, use that.
|
||||
typeKey := TypeKey(details.ErrorTypeMark.FamilyName)
|
||||
if enc, ok := leafEncoders[typeKey]; ok {
|
||||
msg, details.ReportablePayload, payload = enc(ctx, err)
|
||||
} else {
|
||||
// No encoder. Let's try to manually extract fields.
|
||||
|
||||
// The message comes from Error(). Simple.
|
||||
msg = err.Error()
|
||||
|
||||
// If there are known safe details, use them.
|
||||
if s, ok := err.(SafeDetailer); ok {
|
||||
details.ReportablePayload = s.SafeDetails()
|
||||
}
|
||||
|
||||
// If it's also a protobuf message, we'll use that as
|
||||
// payload. DecodeLeaf() will know how to turn that back into a
|
||||
// full error if there is no decoder.
|
||||
payload, _ = err.(proto.Message)
|
||||
}
|
||||
// If there is a detail payload, encode it.
|
||||
details.FullDetails = encodeAsAny(ctx, err, payload)
|
||||
}
|
||||
|
||||
var cs []*EncodedError
|
||||
if len(causes) > 0 {
|
||||
cs = make([]*EncodedError, len(causes))
|
||||
for i, ee := range causes {
|
||||
ee := EncodeError(ctx, ee)
|
||||
cs[i] = &ee
|
||||
}
|
||||
}
|
||||
|
||||
return EncodedError{
|
||||
Error: &errorspb.EncodedError_Leaf{
|
||||
Leaf: &errorspb.EncodedErrorLeaf{
|
||||
Message: msg,
|
||||
Details: details,
|
||||
MultierrorCauses: cs,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// warningFn can be overridden with a suitable logging function using
|
||||
// SetWarningFn() below.
|
||||
var warningFn = func(_ context.Context, format string, args ...interface{}) {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
|
||||
// SetWarningFn enables configuration of the warning function.
|
||||
func SetWarningFn(fn func(context.Context, string, ...interface{})) {
|
||||
warningFn = fn
|
||||
}
|
||||
|
||||
func encodeAsAny(ctx context.Context, err error, payload proto.Message) *types.Any {
|
||||
if payload == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
any, marshalErr := types.MarshalAny(payload)
|
||||
if marshalErr != nil {
|
||||
warningFn(ctx,
|
||||
"error %+v (%T) announces proto message, but marshaling fails: %+v",
|
||||
err, err, marshalErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
return any
|
||||
}
|
||||
|
||||
// encodeWrapper encodes an error wrapper.
|
||||
func encodeWrapper(ctx context.Context, err, cause error) EncodedError {
|
||||
var msg string
|
||||
var details errorspb.EncodedErrorDetails
|
||||
messageType := Prefix
|
||||
|
||||
if e, ok := err.(*opaqueWrapper); ok {
|
||||
// We delegate all knowledge of the error string
|
||||
// to the original encoder and do not try to re-engineer
|
||||
// the prefix out of the error. This helps maintain
|
||||
// backward compatibility with earlier versions of the
|
||||
// encoder which don't have any understanding of
|
||||
// error string ownership by the wrapper.
|
||||
msg = e.prefix
|
||||
details = e.details
|
||||
messageType = e.messageType
|
||||
} else {
|
||||
details.OriginalTypeName, details.ErrorTypeMark.FamilyName, details.ErrorTypeMark.Extension = getTypeDetails(err, false /*onlyFamily*/)
|
||||
|
||||
var payload proto.Message
|
||||
|
||||
// If we have a manually registered encoder, use that.
|
||||
typeKey := TypeKey(details.ErrorTypeMark.FamilyName)
|
||||
if enc, ok := encoders[typeKey]; ok {
|
||||
msg, details.ReportablePayload, payload, messageType = enc(ctx, err)
|
||||
} else {
|
||||
// No encoder.
|
||||
// In that case, we'll try to compute a message prefix
|
||||
// manually.
|
||||
msg, messageType = extractPrefix(err, cause)
|
||||
|
||||
// If there are known safe details, use them.
|
||||
if s, ok := err.(SafeDetailer); ok {
|
||||
details.ReportablePayload = s.SafeDetails()
|
||||
}
|
||||
|
||||
// That's all we can get.
|
||||
}
|
||||
// If there is a detail payload, encode it.
|
||||
details.FullDetails = encodeAsAny(ctx, err, payload)
|
||||
}
|
||||
|
||||
return EncodedError{
|
||||
Error: &errorspb.EncodedError_Wrapper{
|
||||
Wrapper: &errorspb.EncodedWrapper{
|
||||
Cause: EncodeError(ctx, cause),
|
||||
Message: msg,
|
||||
Details: details,
|
||||
MessageType: errorspb.MessageType(messageType),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// extractPrefix extracts the prefix from a wrapper's error message.
|
||||
// For example,
|
||||
//
|
||||
// err := errors.New("bar")
|
||||
// err = errors.Wrap(err, "foo")
|
||||
// extractPrefix(err)
|
||||
//
|
||||
// returns "foo".
|
||||
//
|
||||
// If a presumed wrapper does not have a message prefix, it is assumed
|
||||
// to override the entire error message and `extractPrefix` returns
|
||||
// the entire message and the boolean `true` to signify that the causes
|
||||
// should not be appended to it.
|
||||
func extractPrefix(err, cause error) (string, MessageType) {
|
||||
causeSuffix := cause.Error()
|
||||
errMsg := err.Error()
|
||||
|
||||
if strings.HasSuffix(errMsg, causeSuffix) {
|
||||
prefix := errMsg[:len(errMsg)-len(causeSuffix)]
|
||||
// If error msg matches exactly then this is a wrapper
|
||||
// with no message of its own.
|
||||
if len(prefix) == 0 {
|
||||
return "", Prefix
|
||||
}
|
||||
if strings.HasSuffix(prefix, ": ") {
|
||||
return prefix[:len(prefix)-2], Prefix
|
||||
}
|
||||
}
|
||||
// If we don't have the cause as a suffix, then we have
|
||||
// some other string as our error msg, preserve that and
|
||||
// mark as override
|
||||
return errMsg, FullMessage
|
||||
}
|
||||
|
||||
func getTypeDetails(
|
||||
err error, onlyFamily bool,
|
||||
) (origTypeName string, typeKeyFamily string, typeKeyExtension string) {
|
||||
// If we have received an error of type not known locally,
|
||||
// we still know its type name. Return that.
|
||||
switch t := err.(type) {
|
||||
case *opaqueLeaf:
|
||||
return t.details.OriginalTypeName, t.details.ErrorTypeMark.FamilyName, t.details.ErrorTypeMark.Extension
|
||||
case *opaqueLeafCauses:
|
||||
return t.details.OriginalTypeName, t.details.ErrorTypeMark.FamilyName, t.details.ErrorTypeMark.Extension
|
||||
case *opaqueWrapper:
|
||||
return t.details.OriginalTypeName, t.details.ErrorTypeMark.FamilyName, t.details.ErrorTypeMark.Extension
|
||||
}
|
||||
|
||||
// Compute the full error name, for reporting and printing details.
|
||||
tn := getFullTypeName(err)
|
||||
// Compute a family name, used to find decoders and to compare error identities.
|
||||
fm := tn
|
||||
if prevKey, ok := backwardRegistry[TypeKey(tn)]; ok {
|
||||
fm = string(prevKey)
|
||||
}
|
||||
|
||||
if onlyFamily {
|
||||
return tn, fm, ""
|
||||
}
|
||||
|
||||
// If the error has an extra type marker, add it.
|
||||
// This is not used by the base functionality but
|
||||
// is hooked into by the barrier subsystem.
|
||||
var em string
|
||||
if tm, ok := err.(TypeKeyMarker); ok {
|
||||
em = tm.ErrorKeyMarker()
|
||||
}
|
||||
return tn, fm, em
|
||||
}
|
||||
|
||||
// TypeKeyMarker can be implemented by errors that wish to extend
|
||||
// their type name as seen by GetTypeKey().
|
||||
//
|
||||
// Note: the key marker is considered safe for reporting and
|
||||
// is included in sentry reports.
|
||||
type TypeKeyMarker interface {
|
||||
ErrorKeyMarker() string
|
||||
}
|
||||
|
||||
func getFullTypeName(err error) string {
|
||||
t := reflect.TypeOf(err)
|
||||
pkgPath := getPkgPath(t)
|
||||
return makeTypeKey(pkgPath, t.String())
|
||||
}
|
||||
|
||||
func makeTypeKey(pkgPath, typeNameString string) string {
|
||||
return pkgPath + "/" + typeNameString
|
||||
}
|
||||
|
||||
// getPkgPath extract the package path for a Go type. We'll do some
|
||||
// extra work for typical types that did not get a name, for example
|
||||
// *E has the package path of E.
|
||||
func getPkgPath(t reflect.Type) string {
|
||||
pkgPath := t.PkgPath()
|
||||
if pkgPath != "" {
|
||||
return pkgPath
|
||||
}
|
||||
// Try harder.
|
||||
switch t.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return getPkgPath(t.Elem())
|
||||
}
|
||||
// Nothing to report.
|
||||
return ""
|
||||
}
|
||||
|
||||
// TypeKey identifies an error for the purpose of looking up decoders.
|
||||
// It is equivalent to the "family name" in ErrorTypeMarker.
|
||||
type TypeKey string
|
||||
|
||||
// GetTypeKey retrieve the type key for a given error object. This
|
||||
// is meant for use in combination with the Register functions.
|
||||
func GetTypeKey(err error) TypeKey {
|
||||
_, familyName, _ := getTypeDetails(err, true /*onlyFamily*/)
|
||||
return TypeKey(familyName)
|
||||
}
|
||||
|
||||
// GetTypeMark retrieves the ErrorTypeMark for a given error object.
|
||||
// This is meant for use in the markers sub-package.
|
||||
func GetTypeMark(err error) errorspb.ErrorTypeMark {
|
||||
_, familyName, extension := getTypeDetails(err, false /*onlyFamily*/)
|
||||
return errorspb.ErrorTypeMark{FamilyName: familyName, Extension: extension}
|
||||
}
|
||||
|
||||
// RegisterLeafEncoder can be used to register new leaf error types to
|
||||
// the library. Registered types will be encoded using their own
|
||||
// Go type when an error is encoded. Wrappers that have not been
|
||||
// registered will be encoded using the opaqueLeaf type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterLeafEncoder().
|
||||
func RegisterLeafEncoder(theType TypeKey, encoder LeafEncoder) {
|
||||
if encoder == nil {
|
||||
delete(leafEncoders, theType)
|
||||
} else {
|
||||
leafEncoders[theType] = encoder
|
||||
}
|
||||
}
|
||||
|
||||
// LeafEncoder is to be provided (via RegisterLeafEncoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
type LeafEncoder = func(ctx context.Context, err error) (msg string, safeDetails []string, payload proto.Message)
|
||||
|
||||
// registry for RegisterLeafEncoder.
|
||||
var leafEncoders = map[TypeKey]LeafEncoder{}
|
||||
|
||||
// RegisterMultiCauseEncoder can be used to register new multi-cause
|
||||
// error types to the library. Registered types will be encoded using
|
||||
// their own Go type when an error is encoded. Multi-cause wrappers
|
||||
// that have not been registered will be encoded using the
|
||||
// opaqueWrapper type.
|
||||
func RegisterMultiCauseEncoder(theType TypeKey, encoder MultiCauseEncoder) {
|
||||
// This implementation is a simple wrapper around `LeafEncoder`
|
||||
// because we implemented multi-cause error wrapper encoding into a
|
||||
// `Leaf` instead of a `Wrapper` for smoother backwards
|
||||
// compatibility support. Exposing this detail to consumers of the
|
||||
// API is confusing and hence avoided. The causes of the error are
|
||||
// encoded separately regardless of this encoder's implementation.
|
||||
RegisterLeafEncoder(theType, encoder)
|
||||
}
|
||||
|
||||
// MultiCauseEncoder is to be provided (via RegisterMultiCauseEncoder
|
||||
// above) by additional multi-cause wrapper types not yet known to this
|
||||
// library. The encoder will automatically extract and encode the
|
||||
// causes of this error by calling `Unwrap()` and expecting a slice of
|
||||
// errors.
|
||||
type MultiCauseEncoder = func(ctx context.Context, err error) (msg string, safeDetails []string, payload proto.Message)
|
||||
|
||||
// RegisterWrapperEncoder can be used to register new wrapper types to
|
||||
// the library. Registered wrappers will be encoded using their own
|
||||
// Go type when an error is encoded. Wrappers that have not been
|
||||
// registered will be encoded using the opaqueWrapper type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperEncoder().
|
||||
func RegisterWrapperEncoder(theType TypeKey, encoder WrapperEncoder) {
|
||||
RegisterWrapperEncoderWithMessageType(
|
||||
theType,
|
||||
func(ctx context.Context, err error) (
|
||||
msgPrefix string,
|
||||
safeDetails []string,
|
||||
payload proto.Message,
|
||||
messageType MessageType,
|
||||
) {
|
||||
prefix, details, payload := encoder(ctx, err)
|
||||
return prefix, details, payload, messageType
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterWrapperEncoderWithMessageType can be used to register
|
||||
// new wrapper types to the library. Registered wrappers will be
|
||||
// encoded using their own Go type when an error is encoded. Wrappers
|
||||
// that have not been registered will be encoded using the
|
||||
// opaqueWrapper type.
|
||||
//
|
||||
// This function differs from RegisterWrapperEncoder by allowing the
|
||||
// caller to explicitly decide whether the wrapper owns the entire
|
||||
// error message or not. Otherwise, the relationship is inferred.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperEncoder().
|
||||
func RegisterWrapperEncoderWithMessageType(
|
||||
theType TypeKey, encoder WrapperEncoderWithMessageType,
|
||||
) {
|
||||
if encoder == nil {
|
||||
delete(encoders, theType)
|
||||
} else {
|
||||
encoders[theType] = encoder
|
||||
}
|
||||
}
|
||||
|
||||
// WrapperEncoder is to be provided (via RegisterWrapperEncoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
type WrapperEncoder func(ctx context.Context, err error) (
|
||||
msgPrefix string,
|
||||
safeDetails []string,
|
||||
payload proto.Message,
|
||||
)
|
||||
|
||||
// MessageType is used to encode information about an error message
|
||||
// within a wrapper error type. This information is used to affect
|
||||
// display logic.
|
||||
type MessageType errorspb.MessageType
|
||||
|
||||
// Values below should match the ones in errorspb.MessageType for
|
||||
// direct conversion.
|
||||
const (
|
||||
// Prefix denotes an error message that should be prepended to the
|
||||
// message of its cause.
|
||||
Prefix MessageType = MessageType(errorspb.MessageType_PREFIX)
|
||||
// FullMessage denotes an error message that contains the text of its
|
||||
// causes and can be displayed standalone.
|
||||
FullMessage = MessageType(errorspb.MessageType_FULL_MESSAGE)
|
||||
)
|
||||
|
||||
// WrapperEncoderWithMessageType is to be provided (via
|
||||
// RegisterWrapperEncoderWithMessageType above) by additional wrapper
|
||||
// types not yet known to this library. This encoder returns an
|
||||
// additional enum which indicates whether the wrapper owns the error
|
||||
// message completely instead of simply being a prefix with the error
|
||||
// message of its causes appended to it. This information is encoded
|
||||
// along with the prefix in order to provide context during error
|
||||
// display.
|
||||
type WrapperEncoderWithMessageType func(ctx context.Context, err error) (
|
||||
msgPrefix string,
|
||||
safeDetails []string,
|
||||
payload proto.Message,
|
||||
messageType MessageType,
|
||||
)
|
||||
|
||||
// registry for RegisterWrapperType.
|
||||
var encoders = map[TypeKey]WrapperEncoderWithMessageType{}
|
|
@ -0,0 +1,990 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// This file is forked and modified from golang.org/x/xerrors,
|
||||
// at commit 3ee3066db522c6628d440a3a91c4abdd7f5ef22f (2019-05-10).
|
||||
// From the original code:
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
// Changes specific to this fork marked as inline comments.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/redact"
|
||||
"github.com/kr/pretty"
|
||||
pkgErr "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// FormatError formats an error according to s and verb.
|
||||
// This is a helper meant for use when implementing the fmt.Formatter
|
||||
// interface on custom error objects.
|
||||
//
|
||||
// If the error implements errors.Formatter, FormatError calls its
|
||||
// FormatError method of f with an errors.Printer configured according
|
||||
// to s and verb, and writes the result to s.
|
||||
//
|
||||
// Otherwise, if it is a wrapper, FormatError prints out its error prefix,
|
||||
// then recurses on its cause.
|
||||
//
|
||||
// Otherwise, its Error() text is printed.
|
||||
func FormatError(err error, s fmt.State, verb rune) {
|
||||
formatErrorInternal(err, s, verb, false /* redactableOutput */)
|
||||
}
|
||||
|
||||
// FormatRedactableError formats an error as a safe object.
|
||||
//
|
||||
// Note that certain verb/flags combinations are currently not
|
||||
// supported, and result in a rendering that considers the entire
|
||||
// object as unsafe. For example, %q, %#v are not yet supported.
|
||||
func FormatRedactableError(err error, s redact.SafePrinter, verb rune) {
|
||||
formatErrorInternal(err, s, verb, true /* redactable */)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Also inform the redact package of how to print an error
|
||||
// safely. This is used when an error is passed as argument
|
||||
// to one of the redact print functions.
|
||||
redact.RegisterRedactErrorFn(FormatRedactableError)
|
||||
}
|
||||
|
||||
// Formattable wraps an error into a fmt.Formatter which
|
||||
// will provide "smart" formatting even if the outer layer
|
||||
// of the error does not implement the Formatter interface.
|
||||
func Formattable(err error) fmt.Formatter {
|
||||
return &errorFormatter{err}
|
||||
}
|
||||
|
||||
// formatErrorInternal is the shared logic between FormatError
|
||||
// and FormatErrorRedactable.
|
||||
//
|
||||
// When the redactableOutput argument is true, the fmt.State argument
|
||||
// is really a redact.SafePrinter and casted down as necessary.
|
||||
//
|
||||
// If verb and flags are not one of the supported error formatting
|
||||
// combinations (in particular, %q, %#v etc), then the redactableOutput
|
||||
// argument is ignored. This limitation may be lifted in a later
|
||||
// version.
|
||||
func formatErrorInternal(err error, s fmt.State, verb rune, redactableOutput bool) {
|
||||
// Assuming this function is only called from the Format method, and given
|
||||
// that FormatError takes precedence over Format, it cannot be called from
|
||||
// any package that supports errors.Formatter. It is therefore safe to
|
||||
// disregard that State may be a specific printer implementation and use one
|
||||
// of our choice instead.
|
||||
|
||||
p := state{State: s, redactableOutput: redactableOutput}
|
||||
|
||||
switch {
|
||||
case verb == 'v' && s.Flag('+') && !s.Flag('#'):
|
||||
// Here we are going to format as per %+v, into p.buf.
|
||||
//
|
||||
// We need to start with the innermost (root cause) error first,
|
||||
// then the layers of wrapping from innermost to outermost, so as
|
||||
// to enable stack trace de-duplication. This requires a
|
||||
// post-order traversal. Since we have a linked list, the best we
|
||||
// can do is a recursion.
|
||||
p.formatRecursive(
|
||||
err,
|
||||
true, /* isOutermost */
|
||||
true, /* withDetail */
|
||||
false, /* withDepth */
|
||||
0, /* depth */
|
||||
)
|
||||
|
||||
// We now have all the data, we can render the result.
|
||||
p.formatEntries(err)
|
||||
|
||||
// We're done formatting. Apply width/precision parameters.
|
||||
p.finishDisplay(verb)
|
||||
|
||||
case !redactableOutput && verb == 'v' && s.Flag('#'):
|
||||
// We only know how to process %#v if redactable output is not
|
||||
// requested. This is because the structured output may emit
|
||||
// arbitrary unsafe strings without redaction markers,
|
||||
// or improperly balanced/escaped redaction markers.
|
||||
if stringer, ok := err.(fmt.GoStringer); ok {
|
||||
io.WriteString(&p.finalBuf, stringer.GoString())
|
||||
} else {
|
||||
// Not a GoStringer: delegate to the pretty library.
|
||||
fmt.Fprintf(&p.finalBuf, "%# v", pretty.Formatter(err))
|
||||
}
|
||||
p.finishDisplay(verb)
|
||||
|
||||
case verb == 's' ||
|
||||
// We only handle %v/%+v or other combinations here; %#v is unsupported.
|
||||
(verb == 'v' && !s.Flag('#')) ||
|
||||
// If redactable output is not requested, then we also
|
||||
// know how to format %x/%X (print bytes of error message in hex)
|
||||
// and %q (quote the result).
|
||||
// If redactable output is requested, then we don't know
|
||||
// how to perform these exotic verbs, because they
|
||||
// may muck with the redaction markers. In this case,
|
||||
// we simply refuse the format as per the default clause below.
|
||||
(!redactableOutput && (verb == 'x' || verb == 'X' || verb == 'q')):
|
||||
// Only the error message.
|
||||
//
|
||||
// Use an intermediate buffer because there may be alignment
|
||||
// instructions to obey in the final rendering or
|
||||
// quotes to add (for %q).
|
||||
//
|
||||
// Conceptually, we could just do
|
||||
// p.buf.WriteString(err.Error())
|
||||
// However we also advertise that Error() can be implemented
|
||||
// by calling FormatError(), in which case we'd get an infinite
|
||||
// recursion. So we have no choice but to peel the data
|
||||
// and then assemble the pieces ourselves.
|
||||
p.formatRecursive(
|
||||
err,
|
||||
true, /* isOutermost */
|
||||
false, /* withDetail */
|
||||
false, /* withDepth */
|
||||
0, /* depth */
|
||||
)
|
||||
p.formatSingleLineOutput()
|
||||
p.finishDisplay(verb)
|
||||
|
||||
default:
|
||||
// Unknown verb. Do like fmt.Printf and tell the user we're
|
||||
// confused.
|
||||
//
|
||||
// Note that the following logic is correct regardless of the
|
||||
// value of 'redactableOutput', because the display of the verb and type
|
||||
// are always safe for redaction. If/when this code is changed to
|
||||
// print more details, care is to be taken to add redaction
|
||||
// markers if s.redactableOutput is set.
|
||||
p.finalBuf.WriteString("%!")
|
||||
p.finalBuf.WriteRune(verb)
|
||||
p.finalBuf.WriteByte('(')
|
||||
switch {
|
||||
case err != nil:
|
||||
p.finalBuf.WriteString(reflect.TypeOf(err).String())
|
||||
default:
|
||||
p.finalBuf.WriteString("<nil>")
|
||||
}
|
||||
p.finalBuf.WriteByte(')')
|
||||
io.Copy(s, &p.finalBuf)
|
||||
}
|
||||
}
|
||||
|
||||
// formatEntries reads the entries from s.entries and produces a
|
||||
// detailed rendering in s.finalBuf.
|
||||
//
|
||||
// Note that if s.redactableOutput is true, s.finalBuf is to contain a
|
||||
// RedactableBytes. However, we are not using the helper facilities
|
||||
// from redact.SafePrinter to do this, so care should be taken below
|
||||
// to properly escape markers, etc.
|
||||
func (s *state) formatEntries(err error) {
|
||||
// The first entry at the top is special. We format it as follows:
|
||||
//
|
||||
// <complete error message>
|
||||
// (1) <details>
|
||||
s.formatSingleLineOutput()
|
||||
s.finalBuf.WriteString("\n(1)")
|
||||
|
||||
s.printEntry(s.entries[len(s.entries)-1])
|
||||
|
||||
// All the entries that follow are printed as follows:
|
||||
//
|
||||
// Wraps: (N) <details>
|
||||
//
|
||||
for i, j := len(s.entries)-2, 2; i >= 0; i, j = i-1, j+1 {
|
||||
s.finalBuf.WriteByte('\n')
|
||||
// Extra indentation starts at depth==2 because the direct
|
||||
// children of the root error area already printed on separate
|
||||
// newlines.
|
||||
for m := 0; m < s.entries[i].depth-1; m += 1 {
|
||||
if m == s.entries[i].depth-2 {
|
||||
s.finalBuf.WriteString("└─ ")
|
||||
} else {
|
||||
s.finalBuf.WriteByte(' ')
|
||||
s.finalBuf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(&s.finalBuf, "Wraps: (%d)", j)
|
||||
entry := s.entries[i]
|
||||
s.printEntry(entry)
|
||||
}
|
||||
|
||||
// At the end, we link all the (N) references to the Go type of the
|
||||
// error.
|
||||
s.finalBuf.WriteString("\nError types:")
|
||||
for i, j := len(s.entries)-1, 1; i >= 0; i, j = i-1, j+1 {
|
||||
fmt.Fprintf(&s.finalBuf, " (%d) %T", j, s.entries[i].err)
|
||||
}
|
||||
}
|
||||
|
||||
// printEntry renders the entry given as argument
|
||||
// into s.finalBuf.
|
||||
//
|
||||
// If s.redactableOutput is set, then s.finalBuf is to contain
|
||||
// a RedactableBytes, with redaction markers. In that
|
||||
// case, we must be careful to escape (or not) the entry
|
||||
// depending on entry.redactable.
|
||||
//
|
||||
// If s.redactableOutput is unset, then we are not caring about
|
||||
// redactability. In that case entry.redactable is not set
|
||||
// anyway and we can pass contents through.
|
||||
func (s *state) printEntry(entry formatEntry) {
|
||||
if len(entry.head) > 0 {
|
||||
if entry.head[0] != '\n' {
|
||||
s.finalBuf.WriteByte(' ')
|
||||
}
|
||||
if len(entry.head) > 0 {
|
||||
if !s.redactableOutput || entry.redactable {
|
||||
// If we don't care about redaction, then we can pass the string
|
||||
// through.
|
||||
//
|
||||
// If we do care about redaction, and entry.redactable is true,
|
||||
// then entry.head is already a RedactableBytes. Then we can
|
||||
// also pass it through.
|
||||
s.finalBuf.Write(entry.head)
|
||||
} else {
|
||||
// We care about redaction, and the head is unsafe. Escape it
|
||||
// and enclose the result within redaction markers.
|
||||
s.finalBuf.Write([]byte(redact.EscapeBytes(entry.head)))
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(entry.details) > 0 {
|
||||
if len(entry.head) == 0 {
|
||||
if entry.details[0] != '\n' {
|
||||
s.finalBuf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
if !s.redactableOutput || entry.redactable {
|
||||
// If we don't care about redaction, then we can pass the string
|
||||
// through.
|
||||
//
|
||||
// If we do care about redaction, and entry.redactable is true,
|
||||
// then entry.details is already a RedactableBytes. Then we can
|
||||
// also pass it through.
|
||||
s.finalBuf.Write(entry.details)
|
||||
} else {
|
||||
// We care about redaction, and the details are unsafe. Escape
|
||||
// them and enclose the result within redaction markers.
|
||||
s.finalBuf.Write([]byte(redact.EscapeBytes(entry.details)))
|
||||
}
|
||||
}
|
||||
if entry.stackTrace != nil {
|
||||
s.finalBuf.WriteString("\n -- stack trace:")
|
||||
s.finalBuf.WriteString(strings.ReplaceAll(
|
||||
fmt.Sprintf("%+v", entry.stackTrace),
|
||||
"\n", string(detailSep)))
|
||||
if entry.elidedStackTrace {
|
||||
fmt.Fprintf(&s.finalBuf, "%s[...repeated from below...]", detailSep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// formatSingleLineOutput prints the details extracted via
|
||||
// formatRecursive() through the chain of errors as if .Error() has
|
||||
// been called: it only prints the non-detail parts and prints them on
|
||||
// one line with ": " separators.
|
||||
//
|
||||
// This function is used both when FormatError() is called indirectly
|
||||
// from .Error(), e.g. in:
|
||||
//
|
||||
// (e *myType) Error() { return fmt.Sprintf("%v", e) }
|
||||
// (e *myType) Format(s fmt.State, verb rune) { errors.FormatError(s, verb, e) }
|
||||
//
|
||||
// and also to print the first line in the output of a %+v format.
|
||||
//
|
||||
// It reads from s.entries and writes to s.finalBuf.
|
||||
// s.buf is left untouched.
|
||||
//
|
||||
// Note that if s.redactableOutput is true, s.finalBuf is to contain a
|
||||
// RedactableBytes. However, we are not using the helper facilities
|
||||
// from redact.SafePrinter to do this, so care should be taken below
|
||||
// to properly escape markers, etc.
|
||||
func (s *state) formatSingleLineOutput() {
|
||||
for i := len(s.entries) - 1; i >= 0; i-- {
|
||||
entry := &s.entries[i]
|
||||
if entry.elideShort {
|
||||
continue
|
||||
}
|
||||
if s.finalBuf.Len() > 0 && len(entry.head) > 0 {
|
||||
s.finalBuf.WriteString(": ")
|
||||
}
|
||||
if len(entry.head) == 0 {
|
||||
// shortcut, to avoid the copy below.
|
||||
continue
|
||||
}
|
||||
if !s.redactableOutput || entry.redactable {
|
||||
// If we don't care about redaction, then we can pass the string
|
||||
// through.
|
||||
//
|
||||
// If we do care about redaction, and entry.redactable is true,
|
||||
// then entry.head is already a RedactableBytes. Then we can
|
||||
// also pass it through.
|
||||
s.finalBuf.Write(entry.head)
|
||||
} else {
|
||||
// We do care about redaction, but entry.redactable is unset.
|
||||
// This means entry.head is unsafe. We need to escape it.
|
||||
s.finalBuf.Write([]byte(redact.EscapeBytes(entry.head)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// formatRecursive performs a post-order traversal on the chain of
|
||||
// errors to collect error details from innermost to outermost.
|
||||
//
|
||||
// It uses s.buf as an intermediate buffer to collect strings.
|
||||
// It populates s.entries as a result.
|
||||
// Between each layer of error, s.buf is reset.
|
||||
//
|
||||
// s.finalBuf is untouched. The conversion of s.entries
|
||||
// to s.finalBuf is done by formatSingleLineOutput() and/or
|
||||
// formatEntries().
|
||||
//
|
||||
// `withDepth` and `depth` are used to tag subtrees of multi-cause
|
||||
// errors for added indentation during printing. Once a multi-cause
|
||||
// error is encountered, all subsequent calls with set `withDepth` to
|
||||
// true, and increment `depth` during recursion. This information is
|
||||
// persisted into the generated entries and used later to display the
|
||||
// error with increased indentation based in the depth.
|
||||
func (s *state) formatRecursive(err error, isOutermost, withDetail, withDepth bool, depth int) int {
|
||||
cause := UnwrapOnce(err)
|
||||
numChildren := 0
|
||||
if cause != nil {
|
||||
// Recurse first, which populates entries list starting from innermost
|
||||
// entry. If we've previously seen a multi-cause wrapper, `withDepth`
|
||||
// will be true, and we'll record the depth below ensuring that extra
|
||||
// indentation is applied to this inner cause during printing.
|
||||
// Otherwise, we maintain "straight" vertical formatting by keeping the
|
||||
// parent callers `withDepth` value of `false` by default.
|
||||
numChildren += s.formatRecursive(cause, false, withDetail, withDepth, depth+1)
|
||||
}
|
||||
|
||||
causes := UnwrapMulti(err)
|
||||
for _, c := range causes {
|
||||
// Override `withDepth` to true for all child entries ensuring they have
|
||||
// indentation applied during formatting to distinguish them from
|
||||
// parents.
|
||||
numChildren += s.formatRecursive(c, false, withDetail, true, depth+1)
|
||||
}
|
||||
// inserted := len(s.entries) - 1 - startChildren
|
||||
|
||||
// Reinitialize the state for this stage of wrapping.
|
||||
s.wantDetail = withDetail
|
||||
s.needSpace = false
|
||||
s.needNewline = 0
|
||||
s.multiLine = false
|
||||
s.notEmpty = false
|
||||
s.hasDetail = false
|
||||
s.headBuf = nil
|
||||
|
||||
seenTrace := false
|
||||
|
||||
bufIsRedactable := false
|
||||
|
||||
switch v := err.(type) {
|
||||
case SafeFormatter:
|
||||
bufIsRedactable = true
|
||||
desiredShortening := v.SafeFormatError((*safePrinter)(s))
|
||||
if desiredShortening == nil {
|
||||
// The error wants to elide the short messages from inner causes.
|
||||
// Read backwards through list of entries up to the number of new
|
||||
// entries created "under" this one amount and mark `elideShort`
|
||||
// true.
|
||||
s.elideShortChildren(numChildren)
|
||||
}
|
||||
|
||||
case Formatter:
|
||||
desiredShortening := v.FormatError((*printer)(s))
|
||||
if desiredShortening == nil {
|
||||
// The error wants to elide the short messages from inner
|
||||
// causes. Do it.
|
||||
s.elideShortChildren(numChildren)
|
||||
}
|
||||
|
||||
case fmt.Formatter:
|
||||
// We can only use a fmt.Formatter when both the following
|
||||
// conditions are true:
|
||||
// - when it is the leaf error, because a fmt.Formatter
|
||||
// on a wrapper also recurses.
|
||||
// - when it is not the outermost wrapper, because
|
||||
// the Format() method is likely to be calling FormatError()
|
||||
// to do its job and we want to avoid an infinite recursion.
|
||||
if !isOutermost && cause == nil {
|
||||
v.Format(s, 'v')
|
||||
if st, ok := err.(StackTraceProvider); ok {
|
||||
// This is likely a leaf error from github/pkg/errors.
|
||||
// The thing probably printed its stack trace on its own.
|
||||
seenTrace = true
|
||||
// We'll subsequently simplify stack traces in wrappers.
|
||||
s.lastStack = st.StackTrace()
|
||||
}
|
||||
} else {
|
||||
if elideCauseMsg := s.formatSimple(err, cause); elideCauseMsg {
|
||||
// The error wants to elide the short messages from inner
|
||||
// causes. Do it.
|
||||
s.elideShortChildren(numChildren)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// Handle the special case overrides for context.Canceled,
|
||||
// os.PathError, etc for which we know how to extract some safe
|
||||
// strings.
|
||||
//
|
||||
// We need to do this in the `default` branch, instead of doing
|
||||
// this above the switch, because the special handler could call a
|
||||
// .Error() that delegates its implementation to fmt.Formatter,
|
||||
// errors.Safeformatter or errors.Formattable, which brings us
|
||||
// back to this method in a call cycle. So we need to handle the
|
||||
// various interfaces first.
|
||||
printDone := false
|
||||
for _, fn := range specialCases {
|
||||
if handled, desiredShortening := fn(err, (*safePrinter)(s), cause == nil /* leaf */); handled {
|
||||
printDone = true
|
||||
bufIsRedactable = true
|
||||
if desiredShortening == nil {
|
||||
// The error wants to elide the short messages from inner
|
||||
// causes. Do it.
|
||||
s.elideShortChildren(numChildren)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !printDone {
|
||||
// If the error did not implement errors.Formatter nor
|
||||
// fmt.Formatter, but it is a wrapper, still attempt best effort:
|
||||
// print what we can at this level.
|
||||
elideChildren := s.formatSimple(err, cause)
|
||||
// always elideChildren when dealing with multi-cause errors.
|
||||
if len(causes) > 0 {
|
||||
elideChildren = true
|
||||
}
|
||||
if elideChildren {
|
||||
// The error wants to elide the short messages from inner
|
||||
// causes. Do it.
|
||||
s.elideShortChildren(numChildren)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect the result.
|
||||
entry := s.collectEntry(err, bufIsRedactable, withDepth, depth)
|
||||
|
||||
// If there's an embedded stack trace, also collect it.
|
||||
// This will get either a stack from pkg/errors, or ours.
|
||||
if !seenTrace {
|
||||
if st, ok := err.(StackTraceProvider); ok {
|
||||
entry.stackTrace, entry.elidedStackTrace = ElideSharedStackTraceSuffix(s.lastStack, st.StackTrace())
|
||||
s.lastStack = entry.stackTrace
|
||||
}
|
||||
}
|
||||
|
||||
// Remember the entry for later rendering.
|
||||
s.entries = append(s.entries, entry)
|
||||
s.buf = bytes.Buffer{}
|
||||
|
||||
return numChildren + 1
|
||||
}
|
||||
|
||||
// elideShortChildren takes a number of entries to set `elideShort` to
|
||||
// false. The reason a number of entries is needed is that we may be
|
||||
// eliding a subtree of causes in the case of a multi-cause error. In
|
||||
// the multi-cause case, we need to know how many of the prior errors
|
||||
// in the list of entries is a child of this subtree.
|
||||
func (s *state) elideShortChildren(newEntries int) {
|
||||
for i := 0; i < newEntries; i++ {
|
||||
s.entries[len(s.entries)-1-i].elideShort = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) collectEntry(err error, bufIsRedactable bool, withDepth bool, depth int) formatEntry {
|
||||
entry := formatEntry{err: err}
|
||||
if s.wantDetail {
|
||||
// The buffer has been populated as a result of formatting with
|
||||
// %+v. In that case, if the printer has separated detail
|
||||
// from non-detail, we can use the split.
|
||||
if s.hasDetail {
|
||||
entry.head = s.headBuf
|
||||
entry.details = s.buf.Bytes()
|
||||
} else {
|
||||
entry.head = s.buf.Bytes()
|
||||
}
|
||||
} else {
|
||||
entry.head = s.headBuf
|
||||
if len(entry.head) > 0 && entry.head[len(entry.head)-1] != '\n' &&
|
||||
s.buf.Len() > 0 && s.buf.Bytes()[0] != '\n' {
|
||||
entry.head = append(entry.head, '\n')
|
||||
}
|
||||
entry.head = append(entry.head, s.buf.Bytes()...)
|
||||
}
|
||||
|
||||
if bufIsRedactable {
|
||||
// In this case, we've produced entry.head/entry.details using a
|
||||
// SafeFormatError() invocation. The strings in
|
||||
// entry.head/entry.detail contain redaction markers at this
|
||||
// point.
|
||||
if s.redactableOutput {
|
||||
// Redaction markers desired in the final output. Keep the
|
||||
// redaction markers.
|
||||
entry.redactable = true
|
||||
} else {
|
||||
// Markers not desired in the final output: strip the markers.
|
||||
entry.head = redact.RedactableBytes(entry.head).StripMarkers()
|
||||
entry.details = redact.RedactableBytes(entry.details).StripMarkers()
|
||||
}
|
||||
}
|
||||
|
||||
if withDepth {
|
||||
entry.depth = depth
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// safeErrorPrinterFn is the type of a function that can take
|
||||
// over the safe printing of an error. This is used to inject special
|
||||
// cases into the formatting in errutil. We need this machinery to
|
||||
// prevent import cycles.
|
||||
type safeErrorPrinterFn = func(err error, p Printer, isLeaf bool) (handled bool, next error)
|
||||
|
||||
// specialCases is a list of functions to apply for special cases.
|
||||
var specialCases []safeErrorPrinterFn
|
||||
|
||||
// RegisterSpecialCasePrinter registers a handler.
|
||||
func RegisterSpecialCasePrinter(fn safeErrorPrinterFn) {
|
||||
specialCases = append(specialCases, fn)
|
||||
}
|
||||
|
||||
// formatSimple performs a best effort at extracting the details at a
|
||||
// given level of wrapping when the error object does not implement
|
||||
// the Formatter interface.
|
||||
// Returns true if we want to elide errors from causal chain.
|
||||
func (s *state) formatSimple(err, cause error) bool {
|
||||
var pref string
|
||||
elideCauses := false
|
||||
if cause != nil {
|
||||
var messageType MessageType
|
||||
pref, messageType = extractPrefix(err, cause)
|
||||
if messageType == FullMessage {
|
||||
elideCauses = true
|
||||
}
|
||||
} else {
|
||||
pref = err.Error()
|
||||
}
|
||||
if len(pref) > 0 {
|
||||
s.Write([]byte(pref))
|
||||
}
|
||||
return elideCauses
|
||||
}
|
||||
|
||||
// finishDisplay renders s.finalBuf into s.State.
|
||||
func (p *state) finishDisplay(verb rune) {
|
||||
if p.redactableOutput {
|
||||
// If we're rendering in redactable form, then s.finalBuf contains
|
||||
// a RedactableBytes. We can emit that directly.
|
||||
sp := p.State.(redact.SafePrinter)
|
||||
sp.Print(redact.RedactableBytes(p.finalBuf.Bytes()))
|
||||
return
|
||||
}
|
||||
// Not redactable: render depending on flags and verb.
|
||||
|
||||
width, okW := p.Width()
|
||||
_, okP := p.Precision()
|
||||
|
||||
// If `direct` is set to false, then the buffer is always
|
||||
// passed through fmt.Printf regardless of the width and alignment
|
||||
// settings. This is important for e.g. %q where quotes must be added
|
||||
// in any case.
|
||||
// If `direct` is set to true, then the detour via
|
||||
// fmt.Printf only occurs if there is a width or alignment
|
||||
// specifier.
|
||||
direct := verb == 'v' || verb == 's'
|
||||
|
||||
if !direct || (okW && width > 0) || okP {
|
||||
_, format := redact.MakeFormat(p, verb)
|
||||
fmt.Fprintf(p.State, format, p.finalBuf.String())
|
||||
} else {
|
||||
io.Copy(p.State, &p.finalBuf)
|
||||
}
|
||||
}
|
||||
|
||||
var detailSep = []byte("\n | ")
|
||||
|
||||
// state tracks error printing state. It implements fmt.State.
|
||||
type state struct {
|
||||
// state inherits fmt.State.
|
||||
//
|
||||
// If we are rendering with redactableOutput=true, then fmt.State
|
||||
// can be downcasted to redact.SafePrinter.
|
||||
fmt.State
|
||||
|
||||
// redactableOutput indicates whether we want the output
|
||||
// to use redaction markers. When set to true,
|
||||
// the fmt.State above is actually a redact.SafePrinter.
|
||||
redactableOutput bool
|
||||
|
||||
// finalBuf contains the final rendered string, prior to being
|
||||
// copied to the fmt.State above.
|
||||
//
|
||||
// If redactableOutput is true, then finalBuf contains a RedactableBytes
|
||||
// and safe redaction markers. Otherwise, it can be considered
|
||||
// an unsafe string.
|
||||
finalBuf bytes.Buffer
|
||||
|
||||
// entries collect the result of formatRecursive(). They are
|
||||
// consumed by formatSingleLineOutput() and formatEntries() to
|
||||
// produce the contents of finalBuf.
|
||||
entries []formatEntry
|
||||
|
||||
// buf collects the details of the current error object at a given
|
||||
// stage of recursion in formatRecursive().
|
||||
//
|
||||
// At each stage of recursion (level of wrapping), buf contains
|
||||
// successively:
|
||||
//
|
||||
// - at the beginning, the "simple" part of the error message --
|
||||
// either the pre-Detail() string if the error implements Formatter,
|
||||
// or the result of Error().
|
||||
//
|
||||
// - after the first call to Detail(), buf is copied to headBuf,
|
||||
// then reset, then starts collecting the "advanced" part of the
|
||||
// error message.
|
||||
//
|
||||
// At the end of an error layer, the contents of buf and headBuf
|
||||
// are collected into a formatEntry by collectEntry().
|
||||
// This collection does not touch finalBuf above.
|
||||
//
|
||||
// The entries are later consumed by formatSingleLineOutput() or
|
||||
// formatEntries() to produce the contents of finalBuf.
|
||||
//
|
||||
//
|
||||
// Notes regarding redaction markers and string safety. Throughout a
|
||||
// single "level" of error, there are three cases to consider:
|
||||
//
|
||||
// - the error level implements SafeErrorFormatter and
|
||||
// s.redactableOutput is set. In that case, the error's
|
||||
// SafeErrorFormat() is used to produce a RedactableBytes in
|
||||
// buf/headBuf via safePrinter{}, and an entry is collected at the
|
||||
// end of that with the redactable bit set on the entry.
|
||||
//
|
||||
// - the error level implements SafeErrorFormatter
|
||||
// and s.redactableOutput is *not* set. In this case,
|
||||
// for convenience we implement non-redactable output by using
|
||||
// SafeErrorFormat() to generate a RedactableBytes into
|
||||
// buf/headBuf via safePrinter{}, and then stripping the redaction
|
||||
// markers to produce the entry. The entry is not marked as
|
||||
// redactable.
|
||||
//
|
||||
// - in the remaining case (s.redactableOutput is not set or the
|
||||
// error only implements Formatter), then we use FormatError()
|
||||
// to produce a non-redactable string into buf/headBuf,
|
||||
// and mark the resulting entry as non-redactable.
|
||||
buf bytes.Buffer
|
||||
// When an error's FormatError() calls Detail(), the current
|
||||
// value of buf above is copied to headBuf, and a new
|
||||
// buf is initialized.
|
||||
headBuf []byte
|
||||
|
||||
// lastStack tracks the last stack trace observed when looking at
|
||||
// the errors from innermost to outermost. This is used to elide
|
||||
// redundant stack trace entries.
|
||||
lastStack StackTrace
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// The following attributes organize the synchronization of writes
|
||||
// to buf and headBuf, during the rendering of a single error
|
||||
// layer. They get reset between layers.
|
||||
|
||||
// hasDetail becomes true at each level of the formatRecursive()
|
||||
// recursion after the first call to .Detail(). It is used to
|
||||
// determine how to translate buf/headBuf into a formatEntry.
|
||||
hasDetail bool
|
||||
|
||||
// wantDetail is set to true when the error is formatted via %+v.
|
||||
// When false, printer.Detail() will always return false and the
|
||||
// error's .FormatError() method can perform less work. (This is an
|
||||
// optimization for the common case when an error's .Error() method
|
||||
// delegates its work to its .FormatError() via fmt.Format and
|
||||
// errors.FormatError().)
|
||||
wantDetail bool
|
||||
|
||||
// collectingRedactableString is true iff the data being accumulated
|
||||
// into buf comes from a redact string. It ensures that newline
|
||||
// characters are not included inside redaction markers.
|
||||
collectingRedactableString bool
|
||||
|
||||
// notEmpty tracks, at each level of recursion of formatRecursive(),
|
||||
// whether there were any details printed by an error's
|
||||
// .FormatError() method. It is used to properly determine whether
|
||||
// the printout should start with a newline and padding.
|
||||
notEmpty bool
|
||||
// needSpace tracks whether the next character displayed should pad
|
||||
// using a space character.
|
||||
needSpace bool
|
||||
// needNewline tracks whether the next character displayed should
|
||||
// pad using a newline and indentation.
|
||||
needNewline int
|
||||
// multiLine tracks whether the details so far contain multiple
|
||||
// lines. It is used to determine whether an enclosed stack trace,
|
||||
// if any, should be introduced with a separator.
|
||||
multiLine bool
|
||||
}
|
||||
|
||||
// formatEntry collects the textual details about one level of
|
||||
// wrapping or the leaf error in an error chain.
|
||||
type formatEntry struct {
|
||||
err error
|
||||
// redactable is true iff the data in head and details
|
||||
// are RedactableBytes. See the explanatory comments
|
||||
// on (state).buf for when this is set.
|
||||
redactable bool
|
||||
// head is the part of the text that is suitable for printing in the
|
||||
// one-liner summary, or when producing the output of .Error().
|
||||
head []byte
|
||||
// details is the part of the text produced in the advanced output
|
||||
// included for `%+v` formats.
|
||||
details []byte
|
||||
// elideShort, if true, elides the value of 'head' from concatenated
|
||||
// "short" messages produced by formatSingleLineOutput().
|
||||
elideShort bool
|
||||
|
||||
// stackTrace, if non-nil, reports the stack trace embedded at this
|
||||
// level of error.
|
||||
stackTrace StackTrace
|
||||
// elidedStackTrace, if true, indicates that the stack trace was
|
||||
// truncated to avoid duplication of entries. This is used to
|
||||
// display a truncation indicator during verbose rendering.
|
||||
elidedStackTrace bool
|
||||
|
||||
// depth, if positive, represents a nesting depth of this error as
|
||||
// a causer of others. This is used with verbose printing to
|
||||
// illustrate the nesting depth for multi-cause error wrappers.
|
||||
depth int
|
||||
}
|
||||
|
||||
// String is used for debugging only.
|
||||
func (e formatEntry) String() string {
|
||||
return fmt.Sprintf("formatEntry{%T, %q, %q}", e.err, e.head, e.details)
|
||||
}
|
||||
|
||||
// Write implements io.Writer.
|
||||
func (s *state) Write(b []byte) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
k := 0
|
||||
|
||||
sep := detailSep
|
||||
if !s.wantDetail {
|
||||
sep = []byte("\n")
|
||||
}
|
||||
|
||||
for i, c := range b {
|
||||
if c == '\n' {
|
||||
//if s.needNewline > 0 {
|
||||
// for i := 0; i < s.needNewline-1; i++ {
|
||||
// s.buf.Write(detailSep[:len(sep)-1])
|
||||
// }
|
||||
// s.needNewline = 0
|
||||
//}
|
||||
// Flush all the bytes seen so far.
|
||||
s.buf.Write(b[k:i])
|
||||
// Don't print the newline itself; instead, prepare the state so
|
||||
// that the _next_ character encountered will pad with a newline.
|
||||
// This algorithm avoids terminating error details with excess
|
||||
// newline characters.
|
||||
k = i + 1
|
||||
s.needNewline++
|
||||
s.needSpace = false
|
||||
s.multiLine = true
|
||||
if s.wantDetail {
|
||||
s.switchOver()
|
||||
}
|
||||
} else {
|
||||
if s.needNewline > 0 && s.notEmpty {
|
||||
// If newline chars were pending, display them now.
|
||||
for i := 0; i < s.needNewline-1; i++ {
|
||||
s.buf.Write(detailSep[:len(sep)-1])
|
||||
}
|
||||
s.buf.Write(sep)
|
||||
s.needNewline = 0
|
||||
s.needSpace = false
|
||||
} else if s.needSpace {
|
||||
s.buf.WriteByte(' ')
|
||||
s.needSpace = false
|
||||
}
|
||||
s.notEmpty = true
|
||||
}
|
||||
}
|
||||
//if s.needNewline > 0 {
|
||||
// for i := 0; i < s.needNewline-1; i++ {
|
||||
// s.buf.Write(detailSep[:len(sep)-1])
|
||||
// }
|
||||
//}
|
||||
s.buf.Write(b[k:])
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// printer wraps a state to implement an xerrors.Printer.
|
||||
type printer state
|
||||
|
||||
func (p *state) detail() bool {
|
||||
if !p.wantDetail {
|
||||
return false
|
||||
}
|
||||
if p.notEmpty {
|
||||
p.needNewline = 1
|
||||
}
|
||||
p.switchOver()
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *state) switchOver() {
|
||||
if p.hasDetail {
|
||||
return
|
||||
}
|
||||
p.headBuf = p.buf.Bytes()
|
||||
p.buf = bytes.Buffer{}
|
||||
p.notEmpty = false
|
||||
p.hasDetail = true
|
||||
|
||||
// One of the newlines is accounted for in the switch over.
|
||||
// p.needNewline -= 1
|
||||
}
|
||||
|
||||
func (s *printer) Detail() bool {
|
||||
return ((*state)(s)).detail()
|
||||
}
|
||||
|
||||
func (s *printer) Print(args ...interface{}) {
|
||||
s.enhanceArgs(args)
|
||||
fmt.Fprint((*state)(s), args...)
|
||||
}
|
||||
|
||||
func (s *printer) Printf(format string, args ...interface{}) {
|
||||
s.enhanceArgs(args)
|
||||
fmt.Fprintf((*state)(s), format, args...)
|
||||
}
|
||||
|
||||
func (s *printer) enhanceArgs(args []interface{}) {
|
||||
prevStack := s.lastStack
|
||||
lastSeen := prevStack
|
||||
for i := range args {
|
||||
if st, ok := args[i].(pkgErr.StackTrace); ok {
|
||||
args[i], _ = ElideSharedStackTraceSuffix(prevStack, st)
|
||||
lastSeen = st
|
||||
}
|
||||
if err, ok := args[i].(error); ok {
|
||||
args[i] = &errorFormatter{err}
|
||||
}
|
||||
}
|
||||
s.lastStack = lastSeen
|
||||
}
|
||||
|
||||
// safePrinter is a variant to printer used when the current error
|
||||
// level implements SafeFormatter.
|
||||
//
|
||||
// In any case, it uses the error's SafeFormatError() method to
|
||||
// prepare a RedactableBytes into s.buf / s.headBuf.
|
||||
// The the explanation for `buf` in the state struct.
|
||||
type safePrinter state
|
||||
|
||||
func (s *safePrinter) Detail() bool {
|
||||
return ((*state)(s)).detail()
|
||||
}
|
||||
|
||||
func (s *safePrinter) Print(args ...interface{}) {
|
||||
s.enhanceArgs(args)
|
||||
redact.Fprint((*state)(s), args...)
|
||||
}
|
||||
|
||||
func (s *safePrinter) Printf(format string, args ...interface{}) {
|
||||
s.enhanceArgs(args)
|
||||
redact.Fprintf((*state)(s), format, args...)
|
||||
}
|
||||
|
||||
func (s *safePrinter) enhanceArgs(args []interface{}) {
|
||||
prevStack := s.lastStack
|
||||
lastSeen := prevStack
|
||||
for i := range args {
|
||||
if st, ok := args[i].(pkgErr.StackTrace); ok {
|
||||
thisStack, _ := ElideSharedStackTraceSuffix(prevStack, st)
|
||||
// Stack traces are safe strings.
|
||||
args[i] = redact.Safe(thisStack)
|
||||
lastSeen = st
|
||||
}
|
||||
// In contrast with (*printer).enhanceArgs(), we don't use a
|
||||
// special case for `error` here, because the redact package
|
||||
// already helps us recursing into a safe print for
|
||||
// error objects.
|
||||
}
|
||||
s.lastStack = lastSeen
|
||||
}
|
||||
|
||||
type errorFormatter struct{ err error }
|
||||
|
||||
// Format implements the fmt.Formatter interface.
|
||||
func (ef *errorFormatter) Format(s fmt.State, verb rune) { FormatError(ef.err, s, verb) }
|
||||
|
||||
// Error implements error, so that `redact` knows what to do with it.
|
||||
func (ef *errorFormatter) Error() string { return ef.err.Error() }
|
||||
|
||||
// Unwrap makes it a wrapper.
|
||||
func (ef *errorFormatter) Unwrap() error { return ef.err }
|
||||
|
||||
// Cause makes it a wrapper.
|
||||
func (ef *errorFormatter) Cause() error { return ef.err }
|
||||
|
||||
// ElideSharedStackTraceSuffix removes the suffix of newStack that's already
|
||||
// present in prevStack. The function returns true if some entries
|
||||
// were elided.
|
||||
func ElideSharedStackTraceSuffix(prevStack, newStack StackTrace) (StackTrace, bool) {
|
||||
if len(prevStack) == 0 {
|
||||
return newStack, false
|
||||
}
|
||||
if len(newStack) == 0 {
|
||||
return newStack, false
|
||||
}
|
||||
|
||||
// Skip over the common suffix.
|
||||
var i, j int
|
||||
for i, j = len(newStack)-1, len(prevStack)-1; i > 0 && j > 0; i, j = i-1, j-1 {
|
||||
if newStack[i] != prevStack[j] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == 0 {
|
||||
// Keep at least one entry.
|
||||
i = 1
|
||||
}
|
||||
return newStack[:i], i < len(newStack)-1
|
||||
}
|
||||
|
||||
// StackTrace is the type of the data for a call stack.
|
||||
// This mirrors the type of the same name in github.com/pkg/errors.
|
||||
type StackTrace = pkgErr.StackTrace
|
||||
|
||||
// StackFrame is the type of a single call frame entry.
|
||||
// This mirrors the type of the same name in github.com/pkg/errors.
|
||||
type StackFrame = pkgErr.Frame
|
||||
|
||||
// StackTraceProvider is a provider of StackTraces.
|
||||
// This is, intendedly, defined to be implemented by pkg/errors.stack.
|
||||
type StackTraceProvider interface {
|
||||
StackTrace() StackTrace
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// This file is taken from golang.org/x/xerrors,
|
||||
// at commit 3ee3066db522c6628d440a3a91c4abdd7f5ef22f (2019-05-10).
|
||||
// From the original code:
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package errbase
|
||||
|
||||
// A Formatter formats error messages.
|
||||
//
|
||||
// NB: Consider implementing SafeFormatter instead. This will ensure
|
||||
// that error displays can distinguish bits that are PII-safe.
|
||||
type Formatter interface {
|
||||
error
|
||||
|
||||
// FormatError prints the receiver's first error.
|
||||
// The return value decides what happens in the case
|
||||
// FormatError() is used to produce a "short" message,
|
||||
// eg. when it is used to implement Error():
|
||||
//
|
||||
// - if it returns nil, then the short message
|
||||
// contains no more than that produced for this error,
|
||||
// even if the error has a further causal chain.
|
||||
//
|
||||
// - if it returns non-nil, then the short message
|
||||
// contains the value printed by this error,
|
||||
// followed by that of its causal chain.
|
||||
// (e.g. thiserror: itscause: furthercause)
|
||||
//
|
||||
// Note that all the causal chain is reported in verbose reports in
|
||||
// any case.
|
||||
FormatError(p Printer) (next error)
|
||||
}
|
||||
|
||||
// SafeFormatter is implemented by error leaf or wrapper types that want
|
||||
// to separate safe and non-safe information when printed out.
|
||||
//
|
||||
// When multiple errors are chained (e.g. via errors.Wrap), intermediate
|
||||
// layers in the error that do not implement SafeError are considered
|
||||
// “unsafe”
|
||||
type SafeFormatter interface {
|
||||
// SafeFormatError prints the receiver's first error.
|
||||
//
|
||||
// The provided Printer behaves like a redact.SafePrinter its
|
||||
// Print() and Printf() methods conditionally add redaction markers
|
||||
// around unsafe bits.
|
||||
//
|
||||
// The return value of SafeFormatError() decides what happens in the
|
||||
// case the method is used to produce a "short" message, eg. when it
|
||||
// is used to implement Error():
|
||||
//
|
||||
// - if it returns nil, then the short message
|
||||
// contains no more than that produced for this error,
|
||||
// even if the error has a further causal chain.
|
||||
//
|
||||
// - if it returns non-nil, then the short message
|
||||
// contains the value printed by this error,
|
||||
// followed by that of its causal chain.
|
||||
// (e.g. thiserror: itscause: furthercause)
|
||||
//
|
||||
// Note that all the causal chain is reported in verbose reports in
|
||||
// any case.
|
||||
SafeFormatError(p Printer) (next error)
|
||||
}
|
||||
|
||||
// A Printer formats error messages.
|
||||
//
|
||||
// The most common implementation of Printer is the one provided by package fmt
|
||||
// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
|
||||
// typically provide their own implementations.
|
||||
type Printer interface {
|
||||
// Print appends args to the message output.
|
||||
Print(args ...interface{})
|
||||
|
||||
// Printf writes a formatted string.
|
||||
Printf(format string, args ...interface{})
|
||||
|
||||
// Detail reports whether error detail is requested.
|
||||
// After the first call to Detail, all text written to the Printer
|
||||
// is formatted as additional detail, or ignored when
|
||||
// detail has not been requested.
|
||||
// If Detail returns false, the caller can avoid printing the detail at all.
|
||||
Detail() bool
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import "fmt"
|
||||
|
||||
// This file provides the library with the ability to handle cases
|
||||
// where an error type migrates, i.e. its package changes path or the
|
||||
// type name is changed.
|
||||
//
|
||||
// There are several scenarios to contend with. Assuming the error
|
||||
// type is initially called "foo", in version v1 of the code.
|
||||
//
|
||||
// Scenario 1: simple migration
|
||||
// - v2 renames foo -> bar
|
||||
// v2 calls: RegisterTypeMigration("foo", &bar{})
|
||||
// - v2 and v1 are connected
|
||||
// - v1 sends an error to v2:
|
||||
// - v2 has the migration registered, recognizes that "foo"
|
||||
// refers to bar
|
||||
// - v2 sends an error to v1
|
||||
// - v2 rewrites the error key upon send to the name known to v1
|
||||
//
|
||||
// Scenario 2: simultaneous migration
|
||||
// - vA renames foo -> bar
|
||||
// vA calls RegisterTypeMigration("foo", &bar{})
|
||||
// - vB renames foo -> qux
|
||||
// vB calls RegisterTypeMigration("foo", &qux{})
|
||||
// - vA and vB are connected
|
||||
// - vA sends an error to vB:
|
||||
// - vA translates the error key upon send from bar to foo's key
|
||||
// - vB recognizes that "foo" refers to qux
|
||||
//
|
||||
// Scenario 3: migrated error passing through
|
||||
// - v2 renames foo -> bar
|
||||
// v2 calls: RegisterTypeMigration("foo", &bar{})
|
||||
// - v2.a, v2.b and v1 are connected: v2.a -> v1 -> v2.b
|
||||
// - v2.a sends an error to v2.b via v1:
|
||||
// - v2.a encodes using foo's key, v1 receives as foo
|
||||
// - v1 encodes using foo's key
|
||||
// - v2.b receive's foo's key, knows about migration, decodes as bar
|
||||
//
|
||||
// Scenario 4: migrated error passing through node that does not know
|
||||
// about it whatsoever (the key is preserved).
|
||||
// - v2 renames foo -> bar
|
||||
// v2 calls: RegisterTypeMigration("foo", &bar{})
|
||||
// - v2.a, v2.b and v0 are connected: v2.a -> v0 -> v2.b
|
||||
// (v0 does not know about error foo at all)
|
||||
// - v2.a sends an error to v2.b via v0:
|
||||
// - v2.a encodes using foo's key, v0 receives as "unknown foo"
|
||||
// - v0 passes through unchanged
|
||||
// - v2.b receive's foo's key, knows about migration, decodes as bar
|
||||
//
|
||||
// Scenario 5: comparison between migrated and non-migrated errors
|
||||
// on 3rd party node.
|
||||
// - v2 renames foo -> bar
|
||||
// - v2 sends error bar to v0
|
||||
// - v1 sends an equivalent error with type foo to v0
|
||||
// - v0 (that doesn't know about the type) compares the two errors.
|
||||
// Here we're expecting v0 to properly ascertain the errors are equivalent.
|
||||
|
||||
// RegisterTypeMigration tells the library that the type of the error
|
||||
// given as 3rd argument was previously known with type
|
||||
// previousTypeName, located at previousPkgPath.
|
||||
//
|
||||
// The value of previousTypeName must be the result of calling
|
||||
// reflect.TypeOf(err).String() on the original error object.
|
||||
// This is usually composed as follows:
|
||||
// [*]<shortpackage>.<errortype>
|
||||
//
|
||||
// For example, Go's standard error type has name "*errors.errorString".
|
||||
// The asterisk indicates that `errorString` implements the `error`
|
||||
// interface via pointer receiver.
|
||||
//
|
||||
// Meanwhile, the singleton error type context.DeadlineExceeded
|
||||
// has name "context.deadlineExceededError", without asterisk
|
||||
// because the type implements `error` by value.
|
||||
//
|
||||
// Remember that the short package name inside the error type name and
|
||||
// the last component of the package path can be different. This is
|
||||
// why they must be specified separately.
|
||||
func RegisterTypeMigration(previousPkgPath, previousTypeName string, newType error) {
|
||||
prevKey := TypeKey(makeTypeKey(previousPkgPath, previousTypeName))
|
||||
newKey := TypeKey(getFullTypeName(newType))
|
||||
|
||||
// Register the backward migration: make the encode function
|
||||
// aware of the old name.
|
||||
if f, ok := backwardRegistry[newKey]; ok {
|
||||
panic(fmt.Errorf("migration to type %q already registered (from %q)", newKey, f))
|
||||
}
|
||||
backwardRegistry[newKey] = prevKey
|
||||
// If any other key was registered as a migration from newKey,
|
||||
// we'll forward those as well.
|
||||
// This changes X -> newKey to X -> prevKey for every X.
|
||||
for new, prev := range backwardRegistry {
|
||||
if prev == newKey {
|
||||
backwardRegistry[new] = prevKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// registry used when encoding an error, so that the receiver observes
|
||||
// the original key. This maps new keys to old keys.
|
||||
var backwardRegistry = map[TypeKey]TypeKey{}
|
||||
|
||||
// TestingWithEmptyMigrationRegistry is intended for use by tests.
|
||||
func TestingWithEmptyMigrationRegistry() (restore func()) {
|
||||
save := backwardRegistry
|
||||
backwardRegistry = map[TypeKey]TypeKey{}
|
||||
return func() { backwardRegistry = save }
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
"github.com/cockroachdb/redact"
|
||||
)
|
||||
|
||||
// opaqueLeaf is used when receiving an unknown leaf type.
|
||||
// Its important property is that if it is communicated
|
||||
// back to some network system that _does_ know about
|
||||
// the type, the original object can be restored.
|
||||
type opaqueLeaf struct {
|
||||
msg string
|
||||
details errorspb.EncodedErrorDetails
|
||||
}
|
||||
|
||||
// opaqueLeafCauses is used when receiving an unknown multi-cause
|
||||
// wrapper type. Its important property is that if it is communicated
|
||||
// back to some network system that _does_ know about the type, the
|
||||
// original object can be restored. We encode multi-cause errors as
|
||||
// leaf nodes over the network, in order to support backwards
|
||||
// compatibility with existing single-cause wrapper messages.
|
||||
//
|
||||
// This struct *must* be initialized with a non-nil causes value in
|
||||
// order to comply with go stdlib expectations for `Unwrap()`.
|
||||
type opaqueLeafCauses struct {
|
||||
opaqueLeaf
|
||||
causes []error
|
||||
}
|
||||
|
||||
var _ error = (*opaqueLeaf)(nil)
|
||||
var _ SafeDetailer = (*opaqueLeaf)(nil)
|
||||
var _ fmt.Formatter = (*opaqueLeaf)(nil)
|
||||
var _ SafeFormatter = (*opaqueLeaf)(nil)
|
||||
|
||||
var _ error = (*opaqueLeafCauses)(nil)
|
||||
var _ SafeDetailer = (*opaqueLeafCauses)(nil)
|
||||
var _ fmt.Formatter = (*opaqueLeafCauses)(nil)
|
||||
var _ SafeFormatter = (*opaqueLeafCauses)(nil)
|
||||
|
||||
// opaqueWrapper is used when receiving an unknown wrapper type.
|
||||
// Its important property is that if it is communicated
|
||||
// back to some network system that _does_ know about
|
||||
// the type, the original object can be restored.
|
||||
type opaqueWrapper struct {
|
||||
cause error
|
||||
prefix string
|
||||
details errorspb.EncodedErrorDetails
|
||||
messageType MessageType
|
||||
}
|
||||
|
||||
var _ error = (*opaqueWrapper)(nil)
|
||||
var _ SafeDetailer = (*opaqueWrapper)(nil)
|
||||
var _ fmt.Formatter = (*opaqueWrapper)(nil)
|
||||
var _ SafeFormatter = (*opaqueWrapper)(nil)
|
||||
|
||||
func (e *opaqueLeaf) Error() string { return e.msg }
|
||||
|
||||
func (e *opaqueWrapper) Error() string {
|
||||
if e.messageType == FullMessage {
|
||||
return e.prefix
|
||||
}
|
||||
if e.prefix == "" {
|
||||
return e.cause.Error()
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", e.prefix, e.cause)
|
||||
}
|
||||
|
||||
// the opaque wrapper is a wrapper.
|
||||
func (e *opaqueWrapper) Cause() error { return e.cause }
|
||||
func (e *opaqueWrapper) Unwrap() error { return e.cause }
|
||||
|
||||
func (e *opaqueLeaf) SafeDetails() []string { return e.details.ReportablePayload }
|
||||
func (e *opaqueWrapper) SafeDetails() []string { return e.details.ReportablePayload }
|
||||
|
||||
func (e *opaqueLeaf) Format(s fmt.State, verb rune) { FormatError(e, s, verb) }
|
||||
func (e *opaqueLeafCauses) Format(s fmt.State, verb rune) { FormatError(e, s, verb) }
|
||||
func (e *opaqueWrapper) Format(s fmt.State, verb rune) { FormatError(e, s, verb) }
|
||||
|
||||
// opaqueLeafCauses is a multi-cause wrapper
|
||||
func (e *opaqueLeafCauses) Unwrap() []error { return e.causes }
|
||||
|
||||
func (e *opaqueLeaf) SafeFormatError(p Printer) (next error) {
|
||||
p.Print(e.msg)
|
||||
if p.Detail() {
|
||||
p.Printf("\n(opaque error leaf)")
|
||||
p.Printf("\ntype name: %s", redact.Safe(e.details.OriginalTypeName))
|
||||
for i, d := range e.details.ReportablePayload {
|
||||
p.Printf("\nreportable %d:\n%s", redact.Safe(i), redact.Safe(d))
|
||||
}
|
||||
if e.details.FullDetails != nil {
|
||||
p.Printf("\npayload type: %s", redact.Safe(e.details.FullDetails.TypeUrl))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *opaqueWrapper) SafeFormatError(p Printer) (next error) {
|
||||
if len(e.prefix) > 0 {
|
||||
// We use the condition if len(msg) > 0 because
|
||||
// otherwise an empty string would cause a "redactable
|
||||
// empty string" to be emitted (something that looks like "<>")
|
||||
// and the error formatting code only cleanly elides
|
||||
// the prefix properly if the output string is completely empty.
|
||||
p.Print(e.prefix)
|
||||
}
|
||||
if p.Detail() {
|
||||
p.Printf("\n(opaque error wrapper)")
|
||||
p.Printf("\ntype name: %s", redact.Safe(e.details.OriginalTypeName))
|
||||
for i, d := range e.details.ReportablePayload {
|
||||
p.Printf("\nreportable %d:\n%s", redact.Safe(i), redact.Safe(d))
|
||||
}
|
||||
if e.details.FullDetails != nil {
|
||||
p.Printf("\npayload type: %s", redact.Safe(e.details.FullDetails.TypeUrl))
|
||||
}
|
||||
}
|
||||
if e.messageType == FullMessage {
|
||||
return nil
|
||||
}
|
||||
return e.cause
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2021 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// +build go1.16
|
||||
|
||||
package errbase
|
||||
|
||||
import "io/fs"
|
||||
|
||||
func registerOsPathErrorMigration() {
|
||||
// The os.PathError type was migrated to io.fs.PathError in Go 1.16.
|
||||
RegisterTypeMigration("os", "*os.PathError", &fs.PathError{})
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2021 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// +build !go1.16
|
||||
|
||||
package errbase
|
||||
|
||||
func registerOsPathErrorMigration() {}
|
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errorspb"
|
||||
pkgErr "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SafeDetailer is an interface that can be implemented by errors that
|
||||
// can provide PII-free additional strings suitable for reporting or
|
||||
// telemetry.
|
||||
type SafeDetailer interface {
|
||||
SafeDetails() []string
|
||||
}
|
||||
|
||||
// GetAllSafeDetails collects the safe details from the given error object
|
||||
// and all its causes.
|
||||
// The details are collected from outermost to innermost level of cause.
|
||||
func GetAllSafeDetails(err error) []SafeDetailPayload {
|
||||
var details []SafeDetailPayload
|
||||
for ; err != nil; err = UnwrapOnce(err) {
|
||||
details = append(details, GetSafeDetails(err))
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
// GetSafeDetails collects the safe details from the given error
|
||||
// object. If it is a wrapper, only the details from the wrapper are
|
||||
// returned.
|
||||
func GetSafeDetails(err error) (payload SafeDetailPayload) {
|
||||
origTypeName, famName, ext := getTypeDetails(err, false /*onlyFamily*/)
|
||||
payload.OriginalTypeName = origTypeName
|
||||
payload.ErrorTypeMark = errorspb.ErrorTypeMark{
|
||||
FamilyName: famName,
|
||||
Extension: ext,
|
||||
}
|
||||
payload.SafeDetails = getDetails(err)
|
||||
return
|
||||
}
|
||||
|
||||
func getDetails(err error) []string {
|
||||
if sd, ok := err.(SafeDetailer); ok {
|
||||
return sd.SafeDetails()
|
||||
}
|
||||
// For convenience, we also know how to extract stack traces
|
||||
// in the style of github.com/pkg/errors.
|
||||
if st, ok := err.(interface{ StackTrace() pkgErr.StackTrace }); ok {
|
||||
return []string{fmt.Sprintf("%+v", st.StackTrace())}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SafeDetailPayload captures the safe strings for one
|
||||
// level of wrapping.
|
||||
type SafeDetailPayload struct {
|
||||
// OriginalTypeName is the concrete type of the error that the details
|
||||
// are coming from.
|
||||
OriginalTypeName string
|
||||
// ErrorTypeMark is the mark of the error that the details are
|
||||
// coming from. This may contain a different type name than
|
||||
// OriginalTypeName in case an error type was migrated.
|
||||
ErrorTypeMark errorspb.ErrorTypeMark
|
||||
// SafeDetails are the PII-free strings.
|
||||
SafeDetails []string
|
||||
}
|
||||
|
||||
// Fill can be used to concatenate multiple SafeDetailPayloads.
|
||||
func (s *SafeDetailPayload) Fill(slice []string) []string {
|
||||
if len(s.SafeDetails) == 0 {
|
||||
return slice
|
||||
}
|
||||
slice = append(slice, fmt.Sprintf("details for %s::%s:",
|
||||
s.ErrorTypeMark.FamilyName, s.ErrorTypeMark.Extension))
|
||||
for _, sd := range s.SafeDetails {
|
||||
slice = append(slice, " "+sd)
|
||||
}
|
||||
return slice
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errbase
|
||||
|
||||
// Sadly the go 2/1.13 design for errors has promoted the name
|
||||
// `Unwrap()` for the method that accesses the cause, whilst the
|
||||
// ecosystem has already chosen `Cause()`. In order to unwrap
|
||||
// reliably, we must thus support both.
|
||||
//
|
||||
// See: https://github.com/golang/go/issues/31778
|
||||
|
||||
// UnwrapOnce accesses the direct cause of the error if any, otherwise
|
||||
// returns nil.
|
||||
//
|
||||
// It supports both errors implementing causer (`Cause()` method, from
|
||||
// github.com/pkg/errors) and `Wrapper` (`Unwrap()` method, from the
|
||||
// Go 2 error proposal).
|
||||
//
|
||||
// UnwrapOnce treats multi-errors (those implementing the
|
||||
// `Unwrap() []error` interface as leaf-nodes since they cannot
|
||||
// reasonably be iterated through to a single cause. These errors
|
||||
// are typically constructed as a result of `fmt.Errorf` which results
|
||||
// in a `wrapErrors` instance that contains an interpolated error
|
||||
// string along with a list of causes.
|
||||
//
|
||||
// The go stdlib does not define output on `Unwrap()` for a multi-cause
|
||||
// error, so we default to nil here.
|
||||
func UnwrapOnce(err error) (cause error) {
|
||||
switch e := err.(type) {
|
||||
case interface{ Cause() error }:
|
||||
return e.Cause()
|
||||
case interface{ Unwrap() error }:
|
||||
return e.Unwrap()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnwrapAll accesses the root cause object of the error.
|
||||
// If the error has no cause (leaf error), it is returned directly.
|
||||
// UnwrapAll treats multi-errors as leaf nodes.
|
||||
func UnwrapAll(err error) error {
|
||||
for {
|
||||
if cause := UnwrapOnce(err); cause != nil {
|
||||
err = cause
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnwrapMulti access the slice of causes that an error contains, if it is a
|
||||
// multi-error.
|
||||
func UnwrapMulti(err error) []error {
|
||||
if me, ok := err.(interface{ Unwrap() []error }); ok {
|
||||
return me.Unwrap()
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
)
|
||||
|
||||
// UnwrapOnce accesses the direct cause of the error if any, otherwise
|
||||
// returns nil.
|
||||
//
|
||||
// It supports both errors implementing causer (`Cause()` method, from
|
||||
// github.com/pkg/errors) and `Wrapper` (`Unwrap()` method, from the
|
||||
// Go 2 error proposal).
|
||||
func UnwrapOnce(err error) error { return errbase.UnwrapOnce(err) }
|
||||
|
||||
// UnwrapAll accesses the root cause object of the error.
|
||||
// If the error has no cause (leaf error), it is returned directly.
|
||||
func UnwrapAll(err error) error { return errbase.UnwrapAll(err) }
|
||||
|
||||
// EncodedError is the type of an encoded (and protobuf-encodable) error.
|
||||
type EncodedError = errbase.EncodedError
|
||||
|
||||
// EncodeError encodes an error.
|
||||
func EncodeError(ctx context.Context, err error) EncodedError { return errbase.EncodeError(ctx, err) }
|
||||
|
||||
// DecodeError decodes an error.
|
||||
func DecodeError(ctx context.Context, enc EncodedError) error { return errbase.DecodeError(ctx, enc) }
|
||||
|
||||
// SafeDetailer is an interface that can be implemented by errors that
|
||||
// can provide PII-free additional strings suitable for reporting or
|
||||
// telemetry.
|
||||
type SafeDetailer = errbase.SafeDetailer
|
||||
|
||||
// GetAllSafeDetails collects the safe details from the given error object
|
||||
// and all its causes.
|
||||
// The details are collected from outermost to innermost level of cause.
|
||||
func GetAllSafeDetails(err error) []SafeDetailPayload { return errbase.GetAllSafeDetails(err) }
|
||||
|
||||
// GetSafeDetails collects the safe details from the given error
|
||||
// object. If it is a wrapper, only the details from the wrapper are
|
||||
// returned.
|
||||
func GetSafeDetails(err error) (payload SafeDetailPayload) { return errbase.GetSafeDetails(err) }
|
||||
|
||||
// SafeDetailPayload captures the safe strings for one
|
||||
// level of wrapping.
|
||||
type SafeDetailPayload = errbase.SafeDetailPayload
|
||||
|
||||
// RegisterLeafDecoder can be used to register new leaf error types to
|
||||
// the library. Registered types will be decoded using their own
|
||||
// Go type when an error is decoded. Wrappers that have not been
|
||||
// registered will be decoded using the opaqueLeaf type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterLeafDecoder().
|
||||
func RegisterLeafDecoder(typeName TypeKey, decoder LeafDecoder) {
|
||||
errbase.RegisterLeafDecoder(typeName, decoder)
|
||||
}
|
||||
|
||||
// TypeKey identifies an error for the purpose of looking up decoders.
|
||||
// It is equivalent to the "family name" in ErrorTypeMarker.
|
||||
type TypeKey = errbase.TypeKey
|
||||
|
||||
// GetTypeKey retrieve the type key for a given error object. This
|
||||
// is meant for use in combination with the Register functions.
|
||||
func GetTypeKey(err error) TypeKey { return errbase.GetTypeKey(err) }
|
||||
|
||||
// LeafDecoder is to be provided (via RegisterLeafDecoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
// A nil return indicates that decoding was not successful.
|
||||
type LeafDecoder = errbase.LeafDecoder
|
||||
|
||||
// MultiCauseDecoder is to be provided (via RegisterMultiCauseDecoder
|
||||
// above) by additional multi-cause wrapper types not yet known by the
|
||||
// library. A nil return indicates that decoding was not successful.
|
||||
type MultiCauseDecoder = errbase.MultiCauseDecoder
|
||||
|
||||
// RegisterMultiCauseDecoder can be used to register new multi-cause
|
||||
// wrapper types to the library. Registered wrappers will be decoded
|
||||
// using their own Go type when an error is decoded. Multi-cause
|
||||
// wrappers that have not been registered will be decoded using the
|
||||
// opaqueWrapper type.
|
||||
func RegisterMultiCauseDecoder(theType TypeKey, decoder MultiCauseDecoder) {
|
||||
errbase.RegisterMultiCauseDecoder(theType, decoder)
|
||||
}
|
||||
|
||||
// RegisterWrapperDecoder can be used to register new wrapper types to
|
||||
// the library. Registered wrappers will be decoded using their own
|
||||
// Go type when an error is decoded. Wrappers that have not been
|
||||
// registered will be decoded using the opaqueWrapper type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperDecoder().
|
||||
func RegisterWrapperDecoder(typeName TypeKey, decoder WrapperDecoder) {
|
||||
errbase.RegisterWrapperDecoder(typeName, decoder)
|
||||
}
|
||||
|
||||
// WrapperDecoder is to be provided (via RegisterWrapperDecoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
// A nil return indicates that decoding was not successful.
|
||||
type WrapperDecoder = errbase.WrapperDecoder
|
||||
|
||||
// RegisterLeafEncoder can be used to register new leaf error types to
|
||||
// the library. Registered types will be encoded using their own
|
||||
// Go type when an error is encoded. Wrappers that have not been
|
||||
// registered will be encoded using the opaqueLeaf type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterLeafEncoder().
|
||||
func RegisterLeafEncoder(typeName TypeKey, encoder LeafEncoder) {
|
||||
errbase.RegisterLeafEncoder(typeName, encoder)
|
||||
}
|
||||
|
||||
// LeafEncoder is to be provided (via RegisterLeafEncoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
type LeafEncoder = errbase.LeafEncoder
|
||||
|
||||
// RegisterWrapperEncoder can be used to register new wrapper types to
|
||||
// the library. Registered wrappers will be encoded using their own
|
||||
// Go type when an error is encoded. Wrappers that have not been
|
||||
// registered will be encoded using the opaqueWrapper type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperEncoder().
|
||||
func RegisterWrapperEncoder(typeName TypeKey, encoder WrapperEncoder) {
|
||||
errbase.RegisterWrapperEncoder(typeName, encoder)
|
||||
}
|
||||
|
||||
// WrapperEncoder is to be provided (via RegisterWrapperEncoder above)
|
||||
// by additional wrapper types not yet known to this library.
|
||||
type WrapperEncoder = errbase.WrapperEncoder
|
||||
|
||||
// RegisterWrapperEncoderWithMessageType can be used to register new wrapper
|
||||
// types to the library. These wrappers can optionally override the child error
|
||||
// messages with their own error string instead of relying on iterative
|
||||
// concatenation. Registered wrappers will be encoded using their own Go type
|
||||
// when an error is encoded. Wrappers that have not been registered will be
|
||||
// encoded using the opaqueWrapper type.
|
||||
//
|
||||
// Note: if the error type has been migrated from a previous location
|
||||
// or a different type, ensure that RegisterTypeMigration() was called
|
||||
// prior to RegisterWrapperEncoder().
|
||||
func RegisterWrapperEncoderWithMessageType(typeName TypeKey, encoder WrapperEncoderWithMessageType) {
|
||||
errbase.RegisterWrapperEncoderWithMessageType(typeName, encoder)
|
||||
}
|
||||
|
||||
// WrapperEncoderWithMessageType is to be provided (via
|
||||
// RegisterWrapperEncoderWithMessageType) by additional wrapper
|
||||
// types not yet known to this library.
|
||||
type WrapperEncoderWithMessageType = errbase.WrapperEncoderWithMessageType
|
||||
|
||||
// RegisterMultiCauseEncoder can be used to register new multi-cause
|
||||
// error types to the library. Registered types will be encoded using
|
||||
// their own Go type when an error is encoded. Multi-cause wrappers
|
||||
// that have not been registered will be encoded using the
|
||||
// opaqueWrapper type.
|
||||
func RegisterMultiCauseEncoder(typeName TypeKey, encoder MultiCauseEncoder) {
|
||||
errbase.RegisterMultiCauseEncoder(typeName, encoder)
|
||||
}
|
||||
|
||||
// MultiCauseEncoder is to be provided (via RegisterMultiCauseEncoder
|
||||
// above) by additional multi-cause wrapper types not yet known to this
|
||||
// library. The encoder will automatically extract and encode the
|
||||
// causes of this error by calling `Unwrap()` and expecting a slice of
|
||||
// errors.
|
||||
type MultiCauseEncoder = errbase.MultiCauseEncoder
|
||||
|
||||
// SetWarningFn enables configuration of the warning function.
|
||||
func SetWarningFn(fn func(context.Context, string, ...interface{})) { errbase.SetWarningFn(fn) }
|
||||
|
||||
// A Formatter formats error messages.
|
||||
//
|
||||
// NB: Consider implementing SafeFormatter instead. This will ensure
|
||||
// that error displays can distinguish bits that are PII-safe.
|
||||
type Formatter = errbase.Formatter
|
||||
|
||||
// SafeFormatter is implemented by error leaf or wrapper types that want
|
||||
// to separate safe and non-safe information when printed out.
|
||||
//
|
||||
// When multiple errors are chained (e.g. via errors.Wrap), intermediate
|
||||
// layers in the error that do not implement SafeError are considered
|
||||
// “unsafe”
|
||||
type SafeFormatter = errbase.SafeFormatter
|
||||
|
||||
// A Printer formats error messages.
|
||||
//
|
||||
// The most common implementation of Printer is the one provided by package fmt
|
||||
// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
|
||||
// typically provide their own implementations.
|
||||
type Printer = errbase.Printer
|
||||
|
||||
// FormatError formats an error according to s and verb.
|
||||
// This is a helper meant for use when implementing the fmt.Formatter
|
||||
// interface on custom error objects.
|
||||
//
|
||||
// If the error implements errors.Formatter, FormatError calls its
|
||||
// FormatError method of f with an errors.Printer configured according
|
||||
// to s and verb, and writes the result to s.
|
||||
//
|
||||
// Otherwise, if it is a wrapper, FormatError prints out its error prefix,
|
||||
// then recurses on its cause.
|
||||
//
|
||||
// Otherwise, its Error() text is printed.
|
||||
func FormatError(err error, s fmt.State, verb rune) { errbase.FormatError(err, s, verb) }
|
||||
|
||||
// Formattable wraps an error into a fmt.Formatter which
|
||||
// will provide "smart" formatting even if the outer layer
|
||||
// of the error does not implement the Formatter interface.
|
||||
func Formattable(err error) fmt.Formatter { return errbase.Formattable(err) }
|
||||
|
||||
// RegisterTypeMigration tells the library that the type of the error
|
||||
// given as 3rd argument was previously known with type
|
||||
// previousTypeName, located at previousPkgPath.
|
||||
//
|
||||
// The value of previousTypeName must be the result of calling
|
||||
// reflect.TypeOf(err).String() on the original error object.
|
||||
// This is usually composed as follows:
|
||||
//
|
||||
// [*]<shortpackage>.<errortype>
|
||||
//
|
||||
// For example, Go's standard error type has name "*errors.errorString".
|
||||
// The asterisk indicates that `errorString` implements the `error`
|
||||
// interface via pointer receiver.
|
||||
//
|
||||
// Meanwhile, the singleton error type context.DeadlineExceeded
|
||||
// has name "context.deadlineExceededError", without asterisk
|
||||
// because the type implements `error` by value.
|
||||
//
|
||||
// Remember that the short package name inside the error type name and
|
||||
// the last component of the package path can be different. This is
|
||||
// why they must be specified separately.
|
||||
func RegisterTypeMigration(previousPkgPath, previousTypeName string, newType error) {
|
||||
errbase.RegisterTypeMigration(previousPkgPath, previousTypeName, newType)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2021 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errorspb
|
||||
|
||||
// IsSet returns true if the EncodedError contains an error, or false if it is
|
||||
// empty.
|
||||
func (m *EncodedError) IsSet() bool {
|
||||
return m.Error != nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,149 @@
|
|||
syntax = "proto3";
|
||||
package cockroach.errorspb;
|
||||
option go_package = "errorspb";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
// EncodedError is the wire-encodable representation
|
||||
// of an error (or error cause chain).
|
||||
message EncodedError {
|
||||
oneof error {
|
||||
// This is used for leaf error objects.
|
||||
EncodedErrorLeaf leaf = 1;
|
||||
// This is used for wrapper errors.
|
||||
EncodedWrapper wrapper = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// EncodedErrorLeaf is the wire-encodable representation
|
||||
// of an error leaf or a multi-cause wrapper from go 1.20+
|
||||
message EncodedErrorLeaf {
|
||||
// The main error message (mandatory), that can be printed to human
|
||||
// users and may contain PII. This contains the value of the leaf
|
||||
// error's Error(), or using a registered encoder.
|
||||
string message = 1;
|
||||
|
||||
// The error details.
|
||||
EncodedErrorDetails details = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
// multierror_causes is a list of errors that contain the causal tree
|
||||
// of this leaf. If this field is not empty, then this leaf encodes
|
||||
// an error from go 1.20 or later that encodes multiple causes in its
|
||||
// chain.
|
||||
repeated EncodedError multierror_causes = 3;
|
||||
}
|
||||
|
||||
message EncodedErrorDetails {
|
||||
// The original fully qualified error type name (mandatory).
|
||||
// This is primarily used to print out error details
|
||||
// in error reports and Format().
|
||||
//
|
||||
// It is additionally used to populate the error mark
|
||||
// below when the family name is not known/set.
|
||||
// See the `markers` error package and the
|
||||
// RFC on error handling for details.
|
||||
string original_type_name = 1;
|
||||
|
||||
// The error mark. This is used to determine error equivalence and
|
||||
// identifying a decode function.
|
||||
// See the `markers` error package and the
|
||||
// RFC on error handling for details.
|
||||
ErrorTypeMark error_type_mark = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
// The reportable payload (optional), which is as descriptive as
|
||||
// possible but may not contain PII.
|
||||
//
|
||||
// This is extracted automatically using a registered encoder, if
|
||||
// any, or the SafeDetailer interface.
|
||||
repeated string reportable_payload = 3;
|
||||
|
||||
// An arbitrary payload that (presumably) encodes the
|
||||
// native error object. This is also optional.
|
||||
//
|
||||
// This is extracted automatically using a registered encoder, if
|
||||
// any.
|
||||
google.protobuf.Any full_details = 4;
|
||||
}
|
||||
|
||||
// MessageType encodes information regarding the interpretation of an
|
||||
// error's message string.
|
||||
enum MessageType {
|
||||
// Keeping zero value as default for backwards compatibility.
|
||||
// PREFIX is a message that should be prepended to its cause.
|
||||
PREFIX = 0;
|
||||
// FULL_MESSAGE is a complete error message that can be displayed
|
||||
// without its cause.
|
||||
FULL_MESSAGE = 1;
|
||||
}
|
||||
|
||||
// EncodedWrapper is the wire-encodable representation
|
||||
// of an error wrapper.
|
||||
message EncodedWrapper {
|
||||
// The cause error. Mandatory.
|
||||
EncodedError cause = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// The wrapper message. This could either be a full error message
|
||||
// that can be printed independently, or a (potentially empty) prefix
|
||||
// which is printed before the cause's own message to construct the
|
||||
// full message. This may contain PII.
|
||||
//
|
||||
// This is extracted automatically:
|
||||
//
|
||||
// - for wrappers that have a registered encoder,
|
||||
// - otherwise, when the wrapper's Error() has its cause's Error() as suffix.
|
||||
string message = 2;
|
||||
|
||||
// The error details.
|
||||
EncodedErrorDetails details = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// message_type encodes the interpretation of `message`. Prior
|
||||
// versions will not set this field and it will be left as `PREFIX`.
|
||||
// This retains backwards compatibility since the new behavior is
|
||||
// only enabled when this enum is set to `FULL_MESSAGE`.
|
||||
MessageType message_type = 4;
|
||||
}
|
||||
|
||||
// ErrorTypeMark identifies an error type for the purpose of determining
|
||||
// error equivalences and looking up decoder functions.
|
||||
message ErrorTypeMark {
|
||||
// The family name identifies the error type.
|
||||
// This is equal to original_type_name above in the common case, but
|
||||
// can be overridden when e.g. the package that defines the type
|
||||
// changes path.
|
||||
// This is the field also used for looking up a decode function.
|
||||
string family_name = 1;
|
||||
|
||||
// This marker string is used in combination with
|
||||
// the family name for the purpose of determining error equivalence.
|
||||
// This can be used to separate error instances that have the same type
|
||||
// into separate equivalence classes.
|
||||
// See the `markers` error package and the
|
||||
// RFC on error handling for details.
|
||||
string extension = 2;
|
||||
}
|
||||
|
||||
// StringsPayload is used to encode the payload of certain error
|
||||
// types.
|
||||
message StringsPayload {
|
||||
repeated string details = 1;
|
||||
}
|
||||
|
||||
// ErrnoPayload is used to encode the payload of syscall.Errno
|
||||
// errors.
|
||||
message ErrnoPayload {
|
||||
// The original errno numeric code.
|
||||
int64 orig_errno = 1;
|
||||
// The platform where the syscall.Errno was encoded. This
|
||||
// is needed because the numeric values of syscall.Errno
|
||||
// have different meanings depending on the platform.
|
||||
// When decoding, if the arch field does not match we
|
||||
// use a substitute data type instead.
|
||||
string arch = 2;
|
||||
|
||||
bool is_permission = 3;
|
||||
bool is_exist = 4;
|
||||
bool is_not_exist = 5;
|
||||
bool is_timeout = 6;
|
||||
bool is_temporary = 7;
|
||||
}
|
|
@ -0,0 +1,302 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: errorspb/hintdetail.proto
|
||||
|
||||
package errorspb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type StringPayload struct {
|
||||
Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
|
||||
}
|
||||
|
||||
func (m *StringPayload) Reset() { *m = StringPayload{} }
|
||||
func (m *StringPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*StringPayload) ProtoMessage() {}
|
||||
func (*StringPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d99b88ec9cc4bc22, []int{0}
|
||||
}
|
||||
func (m *StringPayload) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *StringPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *StringPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StringPayload.Merge(m, src)
|
||||
}
|
||||
func (m *StringPayload) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *StringPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StringPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StringPayload proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StringPayload)(nil), "cockroach.errorspb.StringPayload")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("errorspb/hintdetail.proto", fileDescriptor_d99b88ec9cc4bc22) }
|
||||
|
||||
var fileDescriptor_d99b88ec9cc4bc22 = []byte{
|
||||
// 143 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0x2d, 0x2a, 0xca,
|
||||
0x2f, 0x2a, 0x2e, 0x48, 0xd2, 0xcf, 0xc8, 0xcc, 0x2b, 0x49, 0x49, 0x2d, 0x49, 0xcc, 0xcc, 0xd1,
|
||||
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xce, 0x4f, 0xce, 0x2e, 0xca, 0x4f, 0x4c, 0xce,
|
||||
0xd0, 0x83, 0x29, 0x52, 0x52, 0xe4, 0xe2, 0x0d, 0x2e, 0x29, 0xca, 0xcc, 0x4b, 0x0f, 0x48, 0xac,
|
||||
0xcc, 0xc9, 0x4f, 0x4c, 0x11, 0x12, 0xe0, 0x62, 0xce, 0x2d, 0x4e, 0x97, 0x60, 0x54, 0x60, 0xd4,
|
||||
0xe0, 0x0c, 0x02, 0x31, 0x9d, 0xb4, 0x4e, 0x3c, 0x94, 0x63, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2,
|
||||
0x23, 0x39, 0xc6, 0x1b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1,
|
||||
0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x38, 0x60, 0xc6, 0x25, 0xb1, 0x81, 0x6d,
|
||||
0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xbd, 0x3a, 0x3e, 0x86, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *StringPayload) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *StringPayload) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *StringPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Msg) > 0 {
|
||||
i -= len(m.Msg)
|
||||
copy(dAtA[i:], m.Msg)
|
||||
i = encodeVarintHintdetail(dAtA, i, uint64(len(m.Msg)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintHintdetail(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovHintdetail(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *StringPayload) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Msg)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovHintdetail(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovHintdetail(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozHintdetail(x uint64) (n int) {
|
||||
return sovHintdetail(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *StringPayload) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHintdetail
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: StringPayload: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: StringPayload: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowHintdetail
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthHintdetail
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthHintdetail
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Msg = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipHintdetail(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthHintdetail
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipHintdetail(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHintdetail
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHintdetail
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowHintdetail
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthHintdetail
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupHintdetail
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthHintdetail
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthHintdetail = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowHintdetail = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupHintdetail = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
|
@ -0,0 +1,7 @@
|
|||
syntax = "proto3";
|
||||
package cockroach.errorspb;
|
||||
option go_package = "errorspb";
|
||||
|
||||
message StringPayload {
|
||||
string msg = 1;
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errorspb
|
||||
|
||||
func (m ErrorTypeMark) Equals(o ErrorTypeMark) bool {
|
||||
return m.FamilyName == o.FamilyName && m.Extension == o.Extension
|
||||
}
|
|
@ -0,0 +1,365 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: errorspb/markers.proto
|
||||
|
||||
package errorspb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// MarkPayload is the error payload for a forced marker.
|
||||
// See errors/markers/markers.go and the RFC on
|
||||
// error handling for details.
|
||||
type MarkPayload struct {
|
||||
Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
|
||||
Types []ErrorTypeMark `protobuf:"bytes,2,rep,name=types,proto3" json:"types"`
|
||||
}
|
||||
|
||||
func (m *MarkPayload) Reset() { *m = MarkPayload{} }
|
||||
func (m *MarkPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*MarkPayload) ProtoMessage() {}
|
||||
func (*MarkPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_84c3fd24ba37816d, []int{0}
|
||||
}
|
||||
func (m *MarkPayload) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *MarkPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *MarkPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MarkPayload.Merge(m, src)
|
||||
}
|
||||
func (m *MarkPayload) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *MarkPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MarkPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MarkPayload proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*MarkPayload)(nil), "cockroach.errorspb.MarkPayload")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("errorspb/markers.proto", fileDescriptor_84c3fd24ba37816d) }
|
||||
|
||||
var fileDescriptor_84c3fd24ba37816d = []byte{
|
||||
// 198 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0x2d, 0x2a, 0xca,
|
||||
0x2f, 0x2a, 0x2e, 0x48, 0xd2, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0x2a, 0xd6, 0x2b, 0x28, 0xca,
|
||||
0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xce, 0x4f, 0xce, 0x2e, 0xca, 0x4f, 0x4c, 0xce, 0xd0, 0x83, 0xa9,
|
||||
0x90, 0x12, 0x85, 0xab, 0x85, 0x30, 0x20, 0x4a, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x4c,
|
||||
0x7d, 0x10, 0x0b, 0x22, 0xaa, 0x14, 0xc7, 0xc5, 0xed, 0x9b, 0x58, 0x94, 0x1d, 0x90, 0x58, 0x99,
|
||||
0x93, 0x9f, 0x98, 0x22, 0x24, 0xc0, 0xc5, 0x9c, 0x5b, 0x9c, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1,
|
||||
0x19, 0x04, 0x62, 0x0a, 0xd9, 0x72, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x4b, 0x30, 0x29, 0x30,
|
||||
0x6b, 0x70, 0x1b, 0x29, 0xea, 0x61, 0xda, 0xa8, 0xe7, 0x0a, 0x62, 0x84, 0x54, 0x16, 0xa4, 0x82,
|
||||
0x8c, 0x72, 0x62, 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xa2, 0xcb, 0x49, 0xeb, 0xc4, 0x43, 0x39,
|
||||
0x86, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0xbc, 0xf1, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
|
||||
0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x8a,
|
||||
0x03, 0x66, 0x4c, 0x12, 0x1b, 0xd8, 0x49, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x43,
|
||||
0x45, 0xef, 0xed, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *MarkPayload) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *MarkPayload) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *MarkPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Types) > 0 {
|
||||
for iNdEx := len(m.Types) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Types[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintMarkers(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Msg) > 0 {
|
||||
i -= len(m.Msg)
|
||||
copy(dAtA[i:], m.Msg)
|
||||
i = encodeVarintMarkers(dAtA, i, uint64(len(m.Msg)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintMarkers(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovMarkers(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *MarkPayload) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Msg)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovMarkers(uint64(l))
|
||||
}
|
||||
if len(m.Types) > 0 {
|
||||
for _, e := range m.Types {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovMarkers(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovMarkers(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozMarkers(x uint64) (n int) {
|
||||
return sovMarkers(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *MarkPayload) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: MarkPayload: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: MarkPayload: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthMarkers
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMarkers
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Msg = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Types", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMarkers
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMarkers
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Types = append(m.Types, ErrorTypeMark{})
|
||||
if err := m.Types[len(m.Types)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMarkers(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthMarkers
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipMarkers(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMarkers
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMarkers
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupMarkers
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthMarkers
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthMarkers = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMarkers = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupMarkers = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
syntax = "proto3";
|
||||
package cockroach.errorspb;
|
||||
option go_package = "errorspb";
|
||||
|
||||
import "errorspb/errors.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
// MarkPayload is the error payload for a forced marker.
|
||||
// See errors/markers/markers.go and the RFC on
|
||||
// error handling for details.
|
||||
message MarkPayload {
|
||||
string msg = 1;
|
||||
repeated ErrorTypeMark types = 2 [(gogoproto.nullable) = false];
|
||||
}
|
|
@ -0,0 +1,525 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: errorspb/tags.proto
|
||||
|
||||
package errorspb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// TagsPayload is the error payload for a WithContext
|
||||
// marker.
|
||||
// See errors/contexttags/withcontext.go and the RFC on
|
||||
// error handling for details.
|
||||
type TagsPayload struct {
|
||||
Tags []TagPayload `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags"`
|
||||
}
|
||||
|
||||
func (m *TagsPayload) Reset() { *m = TagsPayload{} }
|
||||
func (m *TagsPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*TagsPayload) ProtoMessage() {}
|
||||
func (*TagsPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2f0d1dc5c54e9f63, []int{0}
|
||||
}
|
||||
func (m *TagsPayload) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *TagsPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *TagsPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TagsPayload.Merge(m, src)
|
||||
}
|
||||
func (m *TagsPayload) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *TagsPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TagsPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TagsPayload proto.InternalMessageInfo
|
||||
|
||||
type TagPayload struct {
|
||||
Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"`
|
||||
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TagPayload) Reset() { *m = TagPayload{} }
|
||||
func (m *TagPayload) String() string { return proto.CompactTextString(m) }
|
||||
func (*TagPayload) ProtoMessage() {}
|
||||
func (*TagPayload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2f0d1dc5c54e9f63, []int{1}
|
||||
}
|
||||
func (m *TagPayload) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *TagPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *TagPayload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TagPayload.Merge(m, src)
|
||||
}
|
||||
func (m *TagPayload) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *TagPayload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TagPayload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TagPayload proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TagsPayload)(nil), "cockroach.errorspb.TagsPayload")
|
||||
proto.RegisterType((*TagPayload)(nil), "cockroach.errorspb.TagPayload")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("errorspb/tags.proto", fileDescriptor_2f0d1dc5c54e9f63) }
|
||||
|
||||
var fileDescriptor_2f0d1dc5c54e9f63 = []byte{
|
||||
// 201 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0x2d, 0x2a, 0xca,
|
||||
0x2f, 0x2a, 0x2e, 0x48, 0xd2, 0x2f, 0x49, 0x4c, 0x2f, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
|
||||
0x12, 0x4a, 0xce, 0x4f, 0xce, 0x2e, 0xca, 0x4f, 0x4c, 0xce, 0xd0, 0x83, 0x49, 0x4b, 0x89, 0xa4,
|
||||
0xe7, 0xa7, 0xe7, 0x83, 0xa5, 0xf5, 0x41, 0x2c, 0x88, 0x4a, 0x25, 0x77, 0x2e, 0xee, 0x90, 0xc4,
|
||||
0xf4, 0xe2, 0x80, 0xc4, 0xca, 0x9c, 0xfc, 0xc4, 0x14, 0x21, 0x0b, 0x2e, 0x16, 0x90, 0x31, 0x12,
|
||||
0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x72, 0x7a, 0x98, 0xe6, 0xe8, 0x85, 0x24, 0xa6, 0x43, 0x55,
|
||||
0x3b, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd6, 0xa1, 0x64, 0xc2, 0xc5, 0x85, 0x90, 0x11,
|
||||
0x12, 0xe0, 0x62, 0x2e, 0x49, 0x4c, 0x97, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0x31, 0x85,
|
||||
0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xd6,
|
||||
0x89, 0x87, 0x72, 0x0c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0x78, 0xe3, 0x91, 0x1c,
|
||||
0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1,
|
||||
0x1c, 0x43, 0x14, 0x07, 0xcc, 0xe2, 0x24, 0x36, 0xb0, 0x8b, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff,
|
||||
0xff, 0x0a, 0x90, 0xd1, 0xc9, 0xf2, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *TagsPayload) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TagsPayload) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TagsPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Tags) > 0 {
|
||||
for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTags(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *TagPayload) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TagPayload) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TagPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Value) > 0 {
|
||||
i -= len(m.Value)
|
||||
copy(dAtA[i:], m.Value)
|
||||
i = encodeVarintTags(dAtA, i, uint64(len(m.Value)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Tag) > 0 {
|
||||
i -= len(m.Tag)
|
||||
copy(dAtA[i:], m.Tag)
|
||||
i = encodeVarintTags(dAtA, i, uint64(len(m.Tag)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTags(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTags(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *TagsPayload) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Tags) > 0 {
|
||||
for _, e := range m.Tags {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTags(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *TagPayload) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Tag)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTags(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovTags(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTags(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTags(x uint64) (n int) {
|
||||
return sovTags(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *TagsPayload) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TagsPayload: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TagsPayload: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Tags = append(m.Tags, TagPayload{})
|
||||
if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTags(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *TagPayload) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TagPayload: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TagPayload: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Tag = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTags(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTags
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTags(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTags
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTags
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTags
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTags
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTags = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTags = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTags = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
|
@ -0,0 +1,18 @@
|
|||
syntax = "proto3";
|
||||
package cockroach.errorspb;
|
||||
option go_package = "errorspb";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
// TagsPayload is the error payload for a WithContext
|
||||
// marker.
|
||||
// See errors/contexttags/withcontext.go and the RFC on
|
||||
// error handling for details.
|
||||
message TagsPayload {
|
||||
repeated TagPayload tags = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message TagPayload {
|
||||
string tag = 1;
|
||||
string value = 2;
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errorspb
|
||||
|
||||
// Error implements the error interface.
|
||||
func (t *TestError) Error() string { return "test error" }
|
|
@ -0,0 +1,258 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: errorspb/testing.proto
|
||||
|
||||
package errorspb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// TestError is meant for use in testing only.
|
||||
type TestError struct {
|
||||
}
|
||||
|
||||
func (m *TestError) Reset() { *m = TestError{} }
|
||||
func (m *TestError) String() string { return proto.CompactTextString(m) }
|
||||
func (*TestError) ProtoMessage() {}
|
||||
func (*TestError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0551f0d913d6118f, []int{0}
|
||||
}
|
||||
func (m *TestError) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *TestError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *TestError) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TestError.Merge(m, src)
|
||||
}
|
||||
func (m *TestError) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *TestError) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TestError.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TestError proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TestError)(nil), "cockroach.errorspb.TestError")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("errorspb/testing.proto", fileDescriptor_0551f0d913d6118f) }
|
||||
|
||||
var fileDescriptor_0551f0d913d6118f = []byte{
|
||||
// 118 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0x2d, 0x2a, 0xca,
|
||||
0x2f, 0x2a, 0x2e, 0x48, 0xd2, 0x2f, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0xd7, 0x2b, 0x28, 0xca,
|
||||
0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xce, 0x4f, 0xce, 0x2e, 0xca, 0x4f, 0x4c, 0xce, 0xd0, 0x83, 0xa9,
|
||||
0x50, 0xe2, 0xe6, 0xe2, 0x0c, 0x49, 0x2d, 0x2e, 0x71, 0x05, 0xf1, 0x9d, 0xb4, 0x4e, 0x3c, 0x94,
|
||||
0x63, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x1b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c,
|
||||
0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2,
|
||||
0x38, 0x60, 0x1a, 0x93, 0xd8, 0xc0, 0x66, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xf3,
|
||||
0x3e, 0x92, 0x6d, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *TestError) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TestError) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TestError) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTesting(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTesting(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *TestError) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTesting(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTesting(x uint64) (n int) {
|
||||
return sovTesting(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *TestError) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTesting
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TestError: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TestError: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTesting(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTesting
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTesting(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTesting
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTesting
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTesting
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTesting
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTesting
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTesting
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTesting = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTesting = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTesting = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
|
@ -0,0 +1,6 @@
|
|||
syntax = "proto3";
|
||||
package cockroach.errorspb;
|
||||
option go_package = "errorspb";
|
||||
|
||||
// TestError is meant for use in testing only.
|
||||
message TestError{}
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/cockroachdb/errors/errbase"
|
||||
)
|
||||
|
||||
// As finds the first error in err's chain that matches the type to which target
|
||||
// points, and if so, sets the target to its value and returns true. An error
|
||||
// matches a type if it is assignable to the target type, or if it has a method
|
||||
// As(interface{}) bool such that As(target) returns true. As will panic if target
|
||||
// is not a non-nil pointer to a type which implements error or is of interface type.
|
||||
//
|
||||
// The As method should set the target to its value and return true if err
|
||||
// matches the type to which target points.
|
||||
//
|
||||
// Note: this implementation differs from that of xerrors as follows:
|
||||
// - it also supports recursing through causes with Cause().
|
||||
// - if it detects an API use error, its panic object is a valid error.
|
||||
func As(err error, target interface{}) bool {
|
||||
if target == nil {
|
||||
panic(AssertionFailedf("errors.As: target cannot be nil"))
|
||||
}
|
||||
|
||||
// We use introspection for now, of course when/if Go gets generics
|
||||
// all this can go away.
|
||||
val := reflect.ValueOf(target)
|
||||
typ := val.Type()
|
||||
if typ.Kind() != reflect.Ptr || val.IsNil() {
|
||||
panic(AssertionFailedf("errors.As: target must be a non-nil pointer, found %T", target))
|
||||
}
|
||||
if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
|
||||
panic(AssertionFailedf("errors.As: *target must be interface or implement error, found %T", target))
|
||||
}
|
||||
|
||||
targetType := typ.Elem()
|
||||
for c := err; c != nil; c = errbase.UnwrapOnce(c) {
|
||||
if reflect.TypeOf(c).AssignableTo(targetType) {
|
||||
val.Elem().Set(reflect.ValueOf(c))
|
||||
return true
|
||||
}
|
||||
if x, ok := c.(interface{ As(interface{}) bool }); ok && x.As(target) {
|
||||
return true
|
||||
}
|
||||
|
||||
// If at any point in the single cause chain including the top,
|
||||
// we encounter a multi-cause chain, recursively explore it.
|
||||
for _, cause := range errbase.UnwrapMulti(c) {
|
||||
if As(cause, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var errorType = reflect.TypeOf((*error)(nil)).Elem()
|
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2019 The Cockroach Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package errutil
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors/assert"
|
||||
"github.com/cockroachdb/errors/barriers"
|
||||
"github.com/cockroachdb/errors/withstack"
|
||||
)
|
||||
|
||||
// AssertionFailedf creates an internal error.
|
||||
//
|
||||
// Detail is shown:
|
||||
// - via `errors.GetSafeDetails()`, shows redacted strings.
|
||||
// - when formatting with `%+v`.
|
||||
// - in Sentry reports.
|
||||
func AssertionFailedf(format string, args ...interface{}) error {
|
||||
return AssertionFailedWithDepthf(1, format, args...)
|
||||
}
|
||||
|
||||
// AssertionFailedWithDepthf creates an internal error
|
||||
// with a stack trace collected at the specified depth.
|
||||
// See the doc of `AssertionFailedf()` for more details.
|
||||
func AssertionFailedWithDepthf(depth int, format string, args ...interface{}) error {
|
||||
err := NewWithDepthf(1+depth, format, args...)
|
||||
err = assert.WithAssertionFailure(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// HandleAsAssertionFailure hides an error and turns it into
|
||||
// an assertion failure. Both details from the original error and the
|
||||
// context of the caller are preserved. The original error is not
|
||||
// visible as cause any more. The original error message is preserved.
|
||||
// See the doc of `AssertionFailedf()` for more details.
|
||||
func HandleAsAssertionFailure(origErr error) error {
|
||||
return HandleAsAssertionFailureDepth(1, origErr)
|
||||
}
|
||||
|
||||
// HandleAsAssertionFailureDepth is like HandleAsAssertionFailure but
|
||||
// the depth at which the call stack is captured can be specified.
|
||||
func HandleAsAssertionFailureDepth(depth int, origErr error) error {
|
||||
err := barriers.Handled(origErr)
|
||||
err = withstack.WithStackDepth(err, 1+depth)
|
||||
err = assert.WithAssertionFailure(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// NewAssertionErrorWithWrappedErrf wraps an error and turns it into
|
||||
// an assertion error. Both details from the original error and the
|
||||
// context of the caller are preserved. The original error is not
|
||||
// visible as cause any more. The original error message is preserved.
|
||||
// See the doc of `AssertionFailedf()` for more details.
|
||||
func NewAssertionErrorWithWrappedErrf(origErr error, format string, args ...interface{}) error {
|
||||
return NewAssertionErrorWithWrappedErrDepthf(1, origErr, format, args...)
|
||||
}
|
||||
|
||||
// NewAssertionErrorWithWrappedErrDepthf is like
|
||||
// NewAssertionErrorWithWrappedErrf but the depth at which the call
|
||||
// stack is captured can be specified.
|
||||
// See the doc of `AssertionFailedf()` for more details.
|
||||
func NewAssertionErrorWithWrappedErrDepthf(
|
||||
depth int, origErr error, format string, args ...interface{},
|
||||
) error {
|
||||
err := barriers.Handled(origErr)
|
||||
err = WrapWithDepthf(depth+1, err, format, args...)
|
||||
err = assert.WithAssertionFailure(err)
|
||||
return err
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue