(analytics): Backend enhancements for editing event query, patching verdict to event data and subscription for viewing application dashboards (#2985)
* chaos event and verdict query update for stored dashboards Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * subscription for viewing dashboards Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * fixes for prometheus schema usage and verdict patch Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * fixes Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * updated caching strategy Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * fixes for agent based dashboard listing and update dashboard vars Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * updates to dashboards with data source healthcheck Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * fixing concurrency issue Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * minor fix Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * updates for race condition Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * made the code modular Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * fixes for duplicate events and locks Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * codacy fixes Signed-off-by: ishangupta-ds <ishan@chaosnative.com> * changes after review Signed-off-by: ishangupta-ds <ishan@chaosnative.com>
This commit is contained in:
parent
dd6f81c288
commit
e328553f59
|
|
@ -643,6 +643,8 @@ github.com/litmuschaos/chaos-operator v0.0.0-20210224131102-ca6a465ed348/go.mod
|
|||
github.com/litmuschaos/chaos-scheduler v0.0.0-20210607090343-9952190ad032 h1:Nza94oOqOsao8eFWC19iFviS8XsxS2eVk7Q0a9WDKBE=
|
||||
github.com/litmuschaos/chaos-scheduler v0.0.0-20210607090343-9952190ad032/go.mod h1:7EO6kbZKeJGKzkchgQepCxywvqNFNvNHW0G+u9923AY=
|
||||
github.com/litmuschaos/elves v0.0.0-20201107015738-552d74669e3c/go.mod h1:DsbHGNUq/78NZozWVVI9Q6eBei4I+JjlkkD5aibJ3MQ=
|
||||
github.com/litmuschaos/litmus v0.0.0-20210702160150-411ccb5d572a h1:1o6+JzP0K2o68iH748VPQBhHr9VC1/IK2fHCoHz3df0=
|
||||
github.com/litmuschaos/litmus v0.0.0-20210712105501-6cb1456b1ad5 h1:rfHzsGxUlgnLViHyUpa2AdZ4XZRAjBdGL+Wc8XEwyT0=
|
||||
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||
github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04=
|
||||
|
|
|
|||
|
|
@ -60,19 +60,19 @@ input resource {
|
|||
|
||||
input updateDBInput {
|
||||
db_id: String!
|
||||
ds_id: String!
|
||||
db_name: String!
|
||||
db_type_name: String!
|
||||
db_type_id: String!
|
||||
ds_id: String
|
||||
db_name: String
|
||||
db_type_name: String
|
||||
db_type_id: String
|
||||
db_information: String
|
||||
chaos_event_query_template: String!
|
||||
chaos_verdict_query_template: String!
|
||||
chaos_event_query_template: String
|
||||
chaos_verdict_query_template: String
|
||||
application_metadata_map: [applicationMetadata]
|
||||
panel_groups: [updatePanelGroupInput]!
|
||||
end_time: String!
|
||||
start_time: String!
|
||||
cluster_id: ID!
|
||||
refresh_rate: String!
|
||||
panel_groups: [updatePanelGroupInput]
|
||||
end_time: String
|
||||
start_time: String
|
||||
cluster_id: ID
|
||||
refresh_rate: String
|
||||
}
|
||||
|
||||
input updatePanelGroupInput {
|
||||
|
|
@ -140,6 +140,24 @@ input promQueryInput {
|
|||
minstep: Int!
|
||||
}
|
||||
|
||||
input queryMapForPanel {
|
||||
panelID: String!
|
||||
queryIDs: [String!]!
|
||||
}
|
||||
|
||||
input queryMapForPanelGroup {
|
||||
panelGroupID: String!
|
||||
panelQueryMap: [queryMapForPanel!]!
|
||||
}
|
||||
|
||||
input dataVars {
|
||||
url: String!
|
||||
start: String!
|
||||
end: String!
|
||||
relative_time: Int!
|
||||
refresh_interval: Int!
|
||||
}
|
||||
|
||||
type metricsPromResponse {
|
||||
queryid: String!
|
||||
legends: [String]
|
||||
|
|
@ -151,10 +169,17 @@ type metricsTimeStampValue {
|
|||
value: Float
|
||||
}
|
||||
|
||||
type subData {
|
||||
date: Float
|
||||
value: String!
|
||||
subDataName: String!
|
||||
}
|
||||
|
||||
type annotationsPromResponse {
|
||||
queryid: String!
|
||||
legends: [String]
|
||||
tsvs: [[annotationsTimeStampValue]]
|
||||
subDataArray: [[subData]]
|
||||
}
|
||||
|
||||
type annotationsTimeStampValue {
|
||||
|
|
@ -167,6 +192,21 @@ type promResponse {
|
|||
annotationsResponse: [annotationsPromResponse]
|
||||
}
|
||||
|
||||
type metricDataForPanel {
|
||||
panelID: String!
|
||||
PanelMetricsResponse: [metricsPromResponse]
|
||||
}
|
||||
|
||||
type metricDataForPanelGroup {
|
||||
panelGroupID: String!
|
||||
panelGroupMetricsResponse: [metricDataForPanel]
|
||||
}
|
||||
|
||||
type dashboardPromResponse {
|
||||
dashboardMetricsResponse: [metricDataForPanelGroup]
|
||||
annotationsResponse: [annotationsPromResponse]
|
||||
}
|
||||
|
||||
type promSeriesResponse {
|
||||
series: String!
|
||||
labelValues: [labelValue]
|
||||
|
|
@ -198,6 +238,8 @@ type listDashboardResponse {
|
|||
cluster_name: String
|
||||
ds_name: String
|
||||
ds_type: String
|
||||
ds_url: String
|
||||
ds_health_status: String
|
||||
panel_groups: [panelGroupResponse]!
|
||||
end_time: String!
|
||||
start_time: String!
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -699,9 +699,10 @@ type WorkflowStats struct {
|
|||
}
|
||||
|
||||
type AnnotationsPromResponse struct {
|
||||
Queryid string `json:"queryid"`
|
||||
Legends []*string `json:"legends"`
|
||||
Tsvs [][]*AnnotationsTimeStampValue `json:"tsvs"`
|
||||
Queryid string `json:"queryid"`
|
||||
Legends []*string `json:"legends"`
|
||||
Tsvs [][]*AnnotationsTimeStampValue `json:"tsvs"`
|
||||
SubDataArray [][]*SubData `json:"subDataArray"`
|
||||
}
|
||||
|
||||
type AnnotationsTimeStampValue struct {
|
||||
|
|
@ -742,6 +743,19 @@ type CreateDBInput struct {
|
|||
RefreshRate string `json:"refresh_rate"`
|
||||
}
|
||||
|
||||
type DashboardPromResponse struct {
|
||||
DashboardMetricsResponse []*MetricDataForPanelGroup `json:"dashboardMetricsResponse"`
|
||||
AnnotationsResponse []*AnnotationsPromResponse `json:"annotationsResponse"`
|
||||
}
|
||||
|
||||
type DataVars struct {
|
||||
URL string `json:"url"`
|
||||
Start string `json:"start"`
|
||||
End string `json:"end"`
|
||||
RelativeTime int `json:"relative_time"`
|
||||
RefreshInterval int `json:"refresh_interval"`
|
||||
}
|
||||
|
||||
type DeleteDSInput struct {
|
||||
ForceDelete bool `json:"force_delete"`
|
||||
DsID string `json:"ds_id"`
|
||||
|
|
@ -791,6 +805,8 @@ type ListDashboardResponse struct {
|
|||
ClusterName *string `json:"cluster_name"`
|
||||
DsName *string `json:"ds_name"`
|
||||
DsType *string `json:"ds_type"`
|
||||
DsURL *string `json:"ds_url"`
|
||||
DsHealthStatus *string `json:"ds_health_status"`
|
||||
PanelGroups []*PanelGroupResponse `json:"panel_groups"`
|
||||
EndTime string `json:"end_time"`
|
||||
StartTime string `json:"start_time"`
|
||||
|
|
@ -801,6 +817,16 @@ type ListDashboardResponse struct {
|
|||
UpdatedAt *string `json:"updated_at"`
|
||||
}
|
||||
|
||||
type MetricDataForPanel struct {
|
||||
PanelID string `json:"panelID"`
|
||||
PanelMetricsResponse []*MetricsPromResponse `json:"PanelMetricsResponse"`
|
||||
}
|
||||
|
||||
type MetricDataForPanelGroup struct {
|
||||
PanelGroupID string `json:"panelGroupID"`
|
||||
PanelGroupMetricsResponse []*MetricDataForPanel `json:"panelGroupMetricsResponse"`
|
||||
}
|
||||
|
||||
type MetricsPromResponse struct {
|
||||
Queryid string `json:"queryid"`
|
||||
Legends []*string `json:"legends"`
|
||||
|
|
@ -917,6 +943,16 @@ type PromSeriesResponse struct {
|
|||
LabelValues []*LabelValue `json:"labelValues"`
|
||||
}
|
||||
|
||||
type QueryMapForPanel struct {
|
||||
PanelID string `json:"panelID"`
|
||||
QueryIDs []string `json:"queryIDs"`
|
||||
}
|
||||
|
||||
type QueryMapForPanelGroup struct {
|
||||
PanelGroupID string `json:"panelGroupID"`
|
||||
PanelQueryMap []*QueryMapForPanel `json:"panelQueryMap"`
|
||||
}
|
||||
|
||||
type Resource struct {
|
||||
Kind string `json:"kind"`
|
||||
Names []*string `json:"names"`
|
||||
|
|
@ -927,21 +963,27 @@ type ResourceResponse struct {
|
|||
Names []*string `json:"names"`
|
||||
}
|
||||
|
||||
type SubData struct {
|
||||
Date *float64 `json:"date"`
|
||||
Value string `json:"value"`
|
||||
SubDataName string `json:"subDataName"`
|
||||
}
|
||||
|
||||
type UpdateDBInput struct {
|
||||
DbID string `json:"db_id"`
|
||||
DsID string `json:"ds_id"`
|
||||
DbName string `json:"db_name"`
|
||||
DbTypeName string `json:"db_type_name"`
|
||||
DbTypeID string `json:"db_type_id"`
|
||||
DsID *string `json:"ds_id"`
|
||||
DbName *string `json:"db_name"`
|
||||
DbTypeName *string `json:"db_type_name"`
|
||||
DbTypeID *string `json:"db_type_id"`
|
||||
DbInformation *string `json:"db_information"`
|
||||
ChaosEventQueryTemplate string `json:"chaos_event_query_template"`
|
||||
ChaosVerdictQueryTemplate string `json:"chaos_verdict_query_template"`
|
||||
ChaosEventQueryTemplate *string `json:"chaos_event_query_template"`
|
||||
ChaosVerdictQueryTemplate *string `json:"chaos_verdict_query_template"`
|
||||
ApplicationMetadataMap []*ApplicationMetadata `json:"application_metadata_map"`
|
||||
PanelGroups []*UpdatePanelGroupInput `json:"panel_groups"`
|
||||
EndTime string `json:"end_time"`
|
||||
StartTime string `json:"start_time"`
|
||||
ClusterID string `json:"cluster_id"`
|
||||
RefreshRate string `json:"refresh_rate"`
|
||||
EndTime *string `json:"end_time"`
|
||||
StartTime *string `json:"start_time"`
|
||||
ClusterID *string `json:"cluster_id"`
|
||||
RefreshRate *string `json:"refresh_rate"`
|
||||
}
|
||||
|
||||
type UpdatePanelGroupInput struct {
|
||||
|
|
|
|||
|
|
@ -307,7 +307,9 @@ type Query {
|
|||
|
||||
GetPromSeriesList(ds_details: dsDetails): promSeriesListResponse! @authorized
|
||||
|
||||
ListDashboard(project_id: String!): [listDashboardResponse] @authorized
|
||||
ListDashboard(project_id: String!
|
||||
cluster_id: String
|
||||
db_id: String): [listDashboardResponse] @authorized
|
||||
|
||||
PortalDashboardData (project_id: String!, hub_name: String!) : [PortalDashboardData!]! @authorized
|
||||
|
||||
|
|
@ -418,7 +420,8 @@ type Mutation {
|
|||
|
||||
updateDataSource(datasource: DSInput!): DSResponse! @authorized
|
||||
|
||||
updateDashboard(dashboard: updateDBInput): String! @authorized
|
||||
updateDashboard(dashboard: updateDBInput!
|
||||
chaosQueryUpdate: Boolean!): String! @authorized
|
||||
|
||||
updatePanel(panelInput: [panel]): String! @authorized
|
||||
|
||||
|
|
@ -461,4 +464,8 @@ type Subscription {
|
|||
|
||||
getKubeObject(kubeObjectRequest: KubeObjectRequest!): KubeObjectResponse!
|
||||
@authorized
|
||||
|
||||
viewDashboard(promQueries: [promQueryInput!]!
|
||||
dashboardQueryMap: [queryMapForPanelGroup!]!
|
||||
dataVariables: dataVars!): dashboardPromResponse! @authorized
|
||||
}
|
||||
|
|
|
|||
|
|
@ -252,8 +252,8 @@ func (r *mutationResolver) UpdateDataSource(ctx context.Context, datasource mode
|
|||
return analyticsHandler.UpdateDataSource(datasource)
|
||||
}
|
||||
|
||||
func (r *mutationResolver) UpdateDashboard(ctx context.Context, dashboard *model.UpdateDBInput) (string, error) {
|
||||
return analyticsHandler.UpdateDashBoard(dashboard)
|
||||
func (r *mutationResolver) UpdateDashboard(ctx context.Context, dashboard model.UpdateDBInput, chaosQueryUpdate bool) (string, error) {
|
||||
return analyticsHandler.UpdateDashBoard(dashboard, chaosQueryUpdate)
|
||||
}
|
||||
|
||||
func (r *mutationResolver) UpdatePanel(ctx context.Context, panelInput []*model.Panel) (string, error) {
|
||||
|
|
@ -413,7 +413,8 @@ func (r *queryResolver) ListDataSource(ctx context.Context, projectID string) ([
|
|||
}
|
||||
|
||||
func (r *queryResolver) GetPromQuery(ctx context.Context, query *model.PromInput) (*model.PromResponse, error) {
|
||||
return analyticsHandler.GetPromQuery(query)
|
||||
promResponseData, _, err := analyticsHandler.GetPromQuery(query)
|
||||
return promResponseData, err
|
||||
}
|
||||
|
||||
func (r *queryResolver) GetPromLabelNamesAndValues(ctx context.Context, series *model.PromSeriesInput) (*model.PromSeriesResponse, error) {
|
||||
|
|
@ -424,8 +425,8 @@ func (r *queryResolver) GetPromSeriesList(ctx context.Context, dsDetails *model.
|
|||
return analyticsHandler.GetSeriesList(dsDetails)
|
||||
}
|
||||
|
||||
func (r *queryResolver) ListDashboard(ctx context.Context, projectID string) ([]*model.ListDashboardResponse, error) {
|
||||
return analyticsHandler.QueryListDashboard(projectID)
|
||||
func (r *queryResolver) ListDashboard(ctx context.Context, projectID string, clusterID *string, dbID *string) ([]*model.ListDashboardResponse, error) {
|
||||
return analyticsHandler.QueryListDashboard(projectID, clusterID, dbID)
|
||||
}
|
||||
|
||||
func (r *queryResolver) PortalDashboardData(ctx context.Context, projectID string, hubName string) ([]*model.PortalDashboardData, error) {
|
||||
|
|
@ -587,6 +588,26 @@ func (r *subscriptionResolver) GetKubeObject(ctx context.Context, kubeObjectRequ
|
|||
return kubeObjData, nil
|
||||
}
|
||||
|
||||
func (r *subscriptionResolver) ViewDashboard(ctx context.Context, promQueries []*model.PromQueryInput, dashboardQueryMap []*model.QueryMapForPanelGroup, dataVariables model.DataVars) (<-chan *model.DashboardPromResponse, error) {
|
||||
dashboardData := make(chan *model.DashboardPromResponse)
|
||||
viewID := uuid.New()
|
||||
log.Printf("Dashboard view %v created\n", viewID.String())
|
||||
data_store.Store.Mutex.Lock()
|
||||
data_store.Store.DashboardData[viewID.String()] = dashboardData
|
||||
data_store.Store.Mutex.Unlock()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Printf("Closed dashboard view %v\n", viewID.String())
|
||||
if _, ok := data_store.Store.DashboardData[viewID.String()]; ok {
|
||||
data_store.Store.Mutex.Lock()
|
||||
delete(data_store.Store.DashboardData, viewID.String())
|
||||
data_store.Store.Mutex.Unlock()
|
||||
}
|
||||
}()
|
||||
go analyticsHandler.DashboardViewer(viewID.String(), promQueries, dashboardQueryMap, dataVariables, *data_store.Store)
|
||||
return dashboardData, nil
|
||||
}
|
||||
|
||||
// Mutation returns generated.MutationResolver implementation.
|
||||
func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} }
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
|
@ -233,105 +234,11 @@ func UpdateDataSource(datasource model.DSInput) (*model.DSResponse, error) {
|
|||
}
|
||||
|
||||
// UpdateDashBoard function updates the dashboard based on it's ID
|
||||
func UpdateDashBoard(dashboard *model.UpdateDBInput) (string, error) {
|
||||
func UpdateDashBoard(dashboard model.UpdateDBInput, chaosQueryUpdate bool) (string, error) {
|
||||
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
if dashboard.DbID == "" || dashboard.DsID == "" {
|
||||
return "could not find the dashboard or the connected data source", errors.New("dashBoard ID or data source ID is nil or empty")
|
||||
}
|
||||
|
||||
var (
|
||||
newPanelGroups = make([]dbSchemaAnalytics.PanelGroup, len(dashboard.PanelGroups))
|
||||
panelsToCreate []*dbSchemaAnalytics.Panel
|
||||
panelsToUpdate []*dbSchemaAnalytics.Panel
|
||||
newApplicationMetadataMap []dbSchemaAnalytics.ApplicationMetadata
|
||||
updatedDashboardPanelIDs []string
|
||||
updatedDashboardPanelGroupIDs []string
|
||||
)
|
||||
|
||||
for _, applicationMetadata := range dashboard.ApplicationMetadataMap {
|
||||
var newApplications []*dbSchemaAnalytics.Resource
|
||||
for _, application := range applicationMetadata.Applications {
|
||||
newApplication := dbSchemaAnalytics.Resource{
|
||||
Kind: application.Kind,
|
||||
Names: application.Names,
|
||||
}
|
||||
newApplications = append(newApplications, &newApplication)
|
||||
}
|
||||
newApplicationMetadata := dbSchemaAnalytics.ApplicationMetadata{
|
||||
Namespace: applicationMetadata.Namespace,
|
||||
Applications: newApplications,
|
||||
}
|
||||
|
||||
newApplicationMetadataMap = append(newApplicationMetadataMap, newApplicationMetadata)
|
||||
}
|
||||
|
||||
for i, panelGroup := range dashboard.PanelGroups {
|
||||
|
||||
var panelGroupID string
|
||||
|
||||
if panelGroup.PanelGroupID == "" {
|
||||
panelGroupID = uuid.New().String()
|
||||
} else {
|
||||
panelGroupID = panelGroup.PanelGroupID
|
||||
updatedDashboardPanelGroupIDs = append(updatedDashboardPanelGroupIDs, panelGroup.PanelGroupID)
|
||||
}
|
||||
|
||||
newPanelGroups[i].PanelGroupID = panelGroupID
|
||||
newPanelGroups[i].PanelGroupName = panelGroup.PanelGroupName
|
||||
|
||||
for _, panel := range panelGroup.Panels {
|
||||
|
||||
var (
|
||||
panelID string
|
||||
createdAt string
|
||||
updatedAt string
|
||||
)
|
||||
|
||||
if *panel.PanelID == "" {
|
||||
panelID = uuid.New().String()
|
||||
createdAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updatedAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
} else {
|
||||
panelID = *panel.PanelID
|
||||
createdAt = *panel.CreatedAt
|
||||
updatedAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updatedDashboardPanelIDs = append(updatedDashboardPanelIDs, *panel.PanelID)
|
||||
}
|
||||
|
||||
var newPromQueries []*dbSchemaAnalytics.PromQuery
|
||||
err := copier.Copy(&newPromQueries, &panel.PromQueries)
|
||||
if err != nil {
|
||||
return "error updating queries", err
|
||||
}
|
||||
|
||||
var newPanelOptions dbSchemaAnalytics.PanelOption
|
||||
err = copier.Copy(&newPanelOptions, &panel.PanelOptions)
|
||||
if err != nil {
|
||||
return "error updating options", err
|
||||
}
|
||||
|
||||
newPanel := dbSchemaAnalytics.Panel{
|
||||
PanelID: panelID,
|
||||
PanelOptions: &newPanelOptions,
|
||||
PanelName: panel.PanelName,
|
||||
PanelGroupID: panelGroupID,
|
||||
PromQueries: newPromQueries,
|
||||
IsRemoved: false,
|
||||
XAxisDown: panel.XAxisDown,
|
||||
YAxisLeft: panel.YAxisLeft,
|
||||
YAxisRight: panel.YAxisRight,
|
||||
Unit: panel.Unit,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
|
||||
if *panel.PanelID == "" {
|
||||
panelsToCreate = append(panelsToCreate, &newPanel)
|
||||
} else {
|
||||
panelsToUpdate = append(panelsToUpdate, &newPanel)
|
||||
}
|
||||
}
|
||||
if dashboard.DbID == "" {
|
||||
return "could not find the dashboard", errors.New("dashBoard ID is nil or empty")
|
||||
}
|
||||
|
||||
query := bson.D{
|
||||
|
|
@ -339,116 +246,217 @@ func UpdateDashBoard(dashboard *model.UpdateDBInput) (string, error) {
|
|||
{"is_removed", false},
|
||||
}
|
||||
|
||||
existingDashboard, err := dbOperationsAnalytics.GetDashboard(query)
|
||||
if err != nil {
|
||||
return "error fetching dashboard details", fmt.Errorf("error on query from dashboard collection by projectid: %v", err)
|
||||
}
|
||||
var update bson.D
|
||||
|
||||
for _, panelGroup := range existingDashboard.PanelGroups {
|
||||
query := bson.D{
|
||||
{"panel_group_id", panelGroup.PanelGroupID},
|
||||
{"is_removed", false},
|
||||
}
|
||||
panels, err := dbOperationsAnalytics.ListPanel(query)
|
||||
if err != nil {
|
||||
return "error fetching panels", fmt.Errorf("error on querying from promquery collection: %v", err)
|
||||
if !chaosQueryUpdate {
|
||||
var (
|
||||
newPanelGroups = make([]dbSchemaAnalytics.PanelGroup, len(dashboard.PanelGroups))
|
||||
panelsToCreate []*dbSchemaAnalytics.Panel
|
||||
panelsToUpdate []*dbSchemaAnalytics.Panel
|
||||
newApplicationMetadataMap []dbSchemaAnalytics.ApplicationMetadata
|
||||
updatedDashboardPanelIDs []string
|
||||
updatedDashboardPanelGroupIDs []string
|
||||
)
|
||||
|
||||
for _, applicationMetadata := range dashboard.ApplicationMetadataMap {
|
||||
var newApplications []*dbSchemaAnalytics.Resource
|
||||
for _, application := range applicationMetadata.Applications {
|
||||
newApplication := dbSchemaAnalytics.Resource{
|
||||
Kind: application.Kind,
|
||||
Names: application.Names,
|
||||
}
|
||||
newApplications = append(newApplications, &newApplication)
|
||||
}
|
||||
newApplicationMetadata := dbSchemaAnalytics.ApplicationMetadata{
|
||||
Namespace: applicationMetadata.Namespace,
|
||||
Applications: newApplications,
|
||||
}
|
||||
|
||||
newApplicationMetadataMap = append(newApplicationMetadataMap, newApplicationMetadata)
|
||||
}
|
||||
|
||||
var tempPanels []*model.PanelResponse
|
||||
err = copier.Copy(&tempPanels, &panels)
|
||||
if err != nil {
|
||||
return "error fetching panel details", err
|
||||
}
|
||||
for i, panelGroup := range dashboard.PanelGroups {
|
||||
|
||||
for _, panel := range tempPanels {
|
||||
var panelGroupID string
|
||||
|
||||
if !utils.ContainsString(updatedDashboardPanelIDs, panel.PanelID) || !utils.ContainsString(updatedDashboardPanelGroupIDs, panelGroup.PanelGroupID) {
|
||||
if panelGroup.PanelGroupID == "" {
|
||||
panelGroupID = uuid.New().String()
|
||||
} else {
|
||||
panelGroupID = panelGroup.PanelGroupID
|
||||
updatedDashboardPanelGroupIDs = append(updatedDashboardPanelGroupIDs, panelGroup.PanelGroupID)
|
||||
}
|
||||
|
||||
var promQueriesInPanelToBeDeleted []*dbSchemaAnalytics.PromQuery
|
||||
err := copier.Copy(&promQueriesInPanelToBeDeleted, &panel.PromQueries)
|
||||
newPanelGroups[i].PanelGroupID = panelGroupID
|
||||
newPanelGroups[i].PanelGroupName = panelGroup.PanelGroupName
|
||||
|
||||
for _, panel := range panelGroup.Panels {
|
||||
|
||||
var (
|
||||
panelID string
|
||||
createdAt string
|
||||
updatedAt string
|
||||
)
|
||||
|
||||
if *panel.PanelID == "" {
|
||||
panelID = uuid.New().String()
|
||||
createdAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updatedAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
} else {
|
||||
panelID = *panel.PanelID
|
||||
createdAt = *panel.CreatedAt
|
||||
updatedAt = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updatedDashboardPanelIDs = append(updatedDashboardPanelIDs, *panel.PanelID)
|
||||
}
|
||||
|
||||
var newPromQueries []*dbSchemaAnalytics.PromQuery
|
||||
err := copier.Copy(&newPromQueries, &panel.PromQueries)
|
||||
if err != nil {
|
||||
return "error updating queries", err
|
||||
}
|
||||
|
||||
var panelOptionsOfPanelToBeDeleted dbSchemaAnalytics.PanelOption
|
||||
err = copier.Copy(&panelOptionsOfPanelToBeDeleted, &panel.PanelOptions)
|
||||
var newPanelOptions dbSchemaAnalytics.PanelOption
|
||||
err = copier.Copy(&newPanelOptions, &panel.PanelOptions)
|
||||
if err != nil {
|
||||
return "error updating options", err
|
||||
}
|
||||
|
||||
panelToBeDeleted := dbSchemaAnalytics.Panel{
|
||||
PanelID: panel.PanelID,
|
||||
PanelOptions: &panelOptionsOfPanelToBeDeleted,
|
||||
PanelName: *panel.PanelName,
|
||||
PanelGroupID: panelGroup.PanelGroupID,
|
||||
PromQueries: promQueriesInPanelToBeDeleted,
|
||||
IsRemoved: true,
|
||||
newPanel := dbSchemaAnalytics.Panel{
|
||||
PanelID: panelID,
|
||||
PanelOptions: &newPanelOptions,
|
||||
PanelName: panel.PanelName,
|
||||
PanelGroupID: panelGroupID,
|
||||
PromQueries: newPromQueries,
|
||||
IsRemoved: false,
|
||||
XAxisDown: panel.XAxisDown,
|
||||
YAxisLeft: panel.YAxisLeft,
|
||||
YAxisRight: panel.YAxisRight,
|
||||
Unit: panel.Unit,
|
||||
CreatedAt: *panel.CreatedAt,
|
||||
UpdatedAt: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
}
|
||||
|
||||
panelsToUpdate = append(panelsToUpdate, &panelToBeDeleted)
|
||||
|
||||
if *panel.PanelID == "" {
|
||||
panelsToCreate = append(panelsToCreate, &newPanel)
|
||||
} else {
|
||||
panelsToUpdate = append(panelsToUpdate, &newPanel)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(panelsToCreate) > 0 {
|
||||
err = dbOperationsAnalytics.InsertPanel(panelsToCreate)
|
||||
existingDashboard, err := dbOperationsAnalytics.GetDashboard(query)
|
||||
if err != nil {
|
||||
return "error creating new panels", fmt.Errorf("error while inserting panel data", err)
|
||||
return "error fetching dashboard details", fmt.Errorf("error on query from dashboard collection by projectid: %v", err)
|
||||
}
|
||||
log.Print("successfully inserted prom query into promquery-collection")
|
||||
}
|
||||
|
||||
if len(panelsToUpdate) > 0 {
|
||||
for _, panel := range panelsToUpdate {
|
||||
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
if panel.PanelID == "" && panel.PanelGroupID == "" {
|
||||
return "error getting panel and group details", errors.New("panel ID or panel group ID is nil or empty")
|
||||
for _, panelGroup := range existingDashboard.PanelGroups {
|
||||
query := bson.D{
|
||||
{"panel_group_id", panelGroup.PanelGroupID},
|
||||
{"is_removed", false},
|
||||
}
|
||||
panels, err := dbOperationsAnalytics.ListPanel(query)
|
||||
if err != nil {
|
||||
return "error fetching panels", fmt.Errorf("error on querying from promquery collection: %v", err)
|
||||
}
|
||||
|
||||
var newPanelOption dbSchemaAnalytics.PanelOption
|
||||
err := copier.Copy(&newPanelOption, &panel.PanelOptions)
|
||||
var tempPanels []*model.PanelResponse
|
||||
err = copier.Copy(&tempPanels, &panels)
|
||||
if err != nil {
|
||||
return "error updating panel option", err
|
||||
return "error fetching panel details", err
|
||||
}
|
||||
|
||||
var newPromQueries []dbSchemaAnalytics.PromQuery
|
||||
err = copier.Copy(&newPromQueries, panel.PromQueries)
|
||||
if err != nil {
|
||||
return "error updating panel queries", err
|
||||
}
|
||||
for _, panel := range tempPanels {
|
||||
|
||||
query := bson.D{{"panel_id", panel.PanelID}}
|
||||
if !utils.ContainsString(updatedDashboardPanelIDs, panel.PanelID) || !utils.ContainsString(updatedDashboardPanelGroupIDs, panelGroup.PanelGroupID) {
|
||||
|
||||
update := bson.D{{"$set", bson.D{{"panel_name", panel.PanelName}, {"is_removed", panel.IsRemoved},
|
||||
{"panel_group_id", panel.PanelGroupID}, {"panel_options", newPanelOption},
|
||||
{"prom_queries", newPromQueries}, {"updated_at", timestamp},
|
||||
{"y_axis_left", panel.YAxisLeft}, {"y_axis_right", panel.YAxisRight},
|
||||
{"x_axis_down", panel.XAxisDown}, {"unit", panel.Unit}}}}
|
||||
var promQueriesInPanelToBeDeleted []*dbSchemaAnalytics.PromQuery
|
||||
err := copier.Copy(&promQueriesInPanelToBeDeleted, &panel.PromQueries)
|
||||
if err != nil {
|
||||
return "error updating queries", err
|
||||
}
|
||||
|
||||
err = dbOperationsAnalytics.UpdatePanel(query, update)
|
||||
if err != nil {
|
||||
return "error updating panel", err
|
||||
var panelOptionsOfPanelToBeDeleted dbSchemaAnalytics.PanelOption
|
||||
err = copier.Copy(&panelOptionsOfPanelToBeDeleted, &panel.PanelOptions)
|
||||
if err != nil {
|
||||
return "error updating options", err
|
||||
}
|
||||
|
||||
panelToBeDeleted := dbSchemaAnalytics.Panel{
|
||||
PanelID: panel.PanelID,
|
||||
PanelOptions: &panelOptionsOfPanelToBeDeleted,
|
||||
PanelName: *panel.PanelName,
|
||||
PanelGroupID: panelGroup.PanelGroupID,
|
||||
PromQueries: promQueriesInPanelToBeDeleted,
|
||||
IsRemoved: true,
|
||||
XAxisDown: panel.XAxisDown,
|
||||
YAxisLeft: panel.YAxisLeft,
|
||||
YAxisRight: panel.YAxisRight,
|
||||
Unit: panel.Unit,
|
||||
CreatedAt: *panel.CreatedAt,
|
||||
UpdatedAt: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
}
|
||||
|
||||
panelsToUpdate = append(panelsToUpdate, &panelToBeDeleted)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(panelsToCreate) > 0 {
|
||||
err = dbOperationsAnalytics.InsertPanel(panelsToCreate)
|
||||
if err != nil {
|
||||
return "error creating new panels", fmt.Errorf("error while inserting panel data", err)
|
||||
}
|
||||
log.Print("successfully inserted prom query into promquery-collection")
|
||||
}
|
||||
|
||||
if len(panelsToUpdate) > 0 {
|
||||
for _, panel := range panelsToUpdate {
|
||||
timestamp := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
if panel.PanelID == "" && panel.PanelGroupID == "" {
|
||||
return "error getting panel and group details", errors.New("panel ID or panel group ID is nil or empty")
|
||||
}
|
||||
|
||||
var newPanelOption dbSchemaAnalytics.PanelOption
|
||||
err := copier.Copy(&newPanelOption, &panel.PanelOptions)
|
||||
if err != nil {
|
||||
return "error updating panel option", err
|
||||
}
|
||||
|
||||
var newPromQueries []dbSchemaAnalytics.PromQuery
|
||||
err = copier.Copy(&newPromQueries, panel.PromQueries)
|
||||
if err != nil {
|
||||
return "error updating panel queries", err
|
||||
}
|
||||
|
||||
query := bson.D{{"panel_id", panel.PanelID}}
|
||||
|
||||
update := bson.D{{"$set", bson.D{{"panel_name", panel.PanelName}, {"is_removed", panel.IsRemoved},
|
||||
{"panel_group_id", panel.PanelGroupID}, {"panel_options", newPanelOption},
|
||||
{"prom_queries", newPromQueries}, {"updated_at", timestamp},
|
||||
{"y_axis_left", panel.YAxisLeft}, {"y_axis_right", panel.YAxisRight},
|
||||
{"x_axis_down", panel.XAxisDown}, {"unit", panel.Unit}}}}
|
||||
|
||||
err = dbOperationsAnalytics.UpdatePanel(query, update)
|
||||
if err != nil {
|
||||
return "error updating panel", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update = bson.D{{"$set", bson.D{{"ds_id", dashboard.DsID}, {"db_name", dashboard.DbName},
|
||||
{"db_type_id", dashboard.DbTypeID}, {"db_type_name", dashboard.DbTypeName},
|
||||
{"db_information", dashboard.DbInformation}, {"cluster_id", dashboard.ClusterID},
|
||||
{"application_metadata_map", newApplicationMetadataMap}, {"end_time", dashboard.EndTime},
|
||||
{"start_time", dashboard.StartTime}, {"refresh_rate", dashboard.RefreshRate},
|
||||
{"panel_groups", newPanelGroups}, {"updated_at", timestamp}}}}
|
||||
} else {
|
||||
update = bson.D{{"$set", bson.D{
|
||||
{"chaos_event_query_template", dashboard.ChaosEventQueryTemplate}, {"chaos_verdict_query_template", dashboard.ChaosVerdictQueryTemplate},
|
||||
{"updated_at", timestamp}}}}
|
||||
}
|
||||
|
||||
update := bson.D{{"$set", bson.D{{"ds_id", dashboard.DsID},
|
||||
{"db_name", dashboard.DbName}, {"db_type_id", dashboard.DbTypeID},
|
||||
{"db_type_name", dashboard.DbTypeName}, {"db_information", dashboard.DbInformation},
|
||||
{"chaos_event_query_template", dashboard.ChaosEventQueryTemplate}, {"chaos_verdict_query_template", dashboard.ChaosEventQueryTemplate},
|
||||
{"cluster_id", dashboard.ClusterID}, {"application_metadata_map", newApplicationMetadataMap},
|
||||
{"end_time", dashboard.EndTime}, {"start_time", dashboard.StartTime},
|
||||
{"refresh_rate", dashboard.RefreshRate}, {"panel_groups", newPanelGroups}, {"updated_at", timestamp}}}}
|
||||
|
||||
err = dbOperationsAnalytics.UpdateDashboard(query, update)
|
||||
err := dbOperationsAnalytics.UpdateDashboard(query, update)
|
||||
if err != nil {
|
||||
return "error updating dashboard", err
|
||||
}
|
||||
|
|
@ -643,40 +651,55 @@ func QueryListDataSource(projectID string) ([]*model.DSResponse, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var newDatasources []*model.DSResponse
|
||||
copier.Copy(&newDatasources, &datasource)
|
||||
var (
|
||||
newDataSources []*model.DSResponse
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
err = copier.Copy(&newDataSources, &datasource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tsdbHealthCheckMap := make(map[string]string)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(newDatasources))
|
||||
var mutex = &sync.Mutex{}
|
||||
wg.Add(len(newDataSources))
|
||||
|
||||
for _, datasource := range newDatasources {
|
||||
for _, datasource := range newDataSources {
|
||||
datasource := datasource
|
||||
go func(val *model.DSResponse) {
|
||||
defer wg.Done()
|
||||
|
||||
tsdbHealthCheckMap[*datasource.DsID] = prometheus.TSDBHealthCheck(*datasource.DsURL, *datasource.DsType)
|
||||
|
||||
if _, ok := tsdbHealthCheckMap[*datasource.DsURL]; !ok {
|
||||
mutex.Lock()
|
||||
tsdbHealthCheckMap[*datasource.DsURL] = prometheus.TSDBHealthCheck(*datasource.DsURL, *datasource.DsType)
|
||||
mutex.Unlock()
|
||||
}
|
||||
}(datasource)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for _, datasource := range newDatasources {
|
||||
datasource.HealthStatus = tsdbHealthCheckMap[*datasource.DsID]
|
||||
for _, datasource := range newDataSources {
|
||||
datasource.HealthStatus = tsdbHealthCheckMap[*datasource.DsURL]
|
||||
}
|
||||
|
||||
return newDatasources, nil
|
||||
return newDataSources, nil
|
||||
}
|
||||
|
||||
func GetPromQuery(promInput *model.PromInput) (*model.PromResponse, error) {
|
||||
// GetPromQuery takes prometheus queries and returns response for annotations and metrics with a query map
|
||||
func GetPromQuery(promInput *model.PromInput) (*model.PromResponse, map[string]*model.MetricsPromResponse, error) {
|
||||
var (
|
||||
metrics []*model.MetricsPromResponse
|
||||
annotations []*model.AnnotationsPromResponse
|
||||
metrics []*model.MetricsPromResponse
|
||||
annotations []*model.AnnotationsPromResponse
|
||||
wg sync.WaitGroup
|
||||
verdictResponse *model.AnnotationsPromResponse
|
||||
)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
patchEventWithVerdict := false
|
||||
queryResponseMap := make(map[string]*model.MetricsPromResponse)
|
||||
var mutex = &sync.Mutex{}
|
||||
|
||||
wg.Add(len(promInput.Queries))
|
||||
for _, v := range promInput.Queries {
|
||||
go func(val *model.PromQueryInput) {
|
||||
|
|
@ -694,32 +717,59 @@ func GetPromQuery(promInput *model.PromInput) (*model.PromResponse, error) {
|
|||
cacheKey := val.Query + "-" + promInput.DsDetails.Start + "-" + promInput.DsDetails.End + "-" + promInput.DsDetails.URL
|
||||
|
||||
queryType := "metrics"
|
||||
if strings.Contains(val.Queryid, "chaos-interval") || strings.Contains(val.Queryid, "chaos-verdict") {
|
||||
if strings.Contains(val.Queryid, "chaos-event") || strings.Contains(val.Queryid, "chaos-verdict") {
|
||||
queryType = "annotation"
|
||||
cacheKey = val.Queryid + "-" + promInput.DsDetails.Start + "-" + promInput.DsDetails.End + "-" + promInput.DsDetails.URL
|
||||
}
|
||||
|
||||
if obj, isExist := AnalyticsCache.Get(cacheKey); isExist {
|
||||
if queryType == "metrics" {
|
||||
metrics = append(metrics, obj.(*model.MetricsPromResponse))
|
||||
mutex.Lock()
|
||||
queryResponseMap[val.Queryid] = obj.(*model.MetricsPromResponse)
|
||||
mutex.Unlock()
|
||||
} else {
|
||||
annotations = append(annotations, obj.(*model.AnnotationsPromResponse))
|
||||
if strings.Contains(val.Queryid, "chaos-event") {
|
||||
annotations = append(annotations, obj.(*model.AnnotationsPromResponse))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
response, err := prometheus.Query(newPromQuery, queryType)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cacheError := utils.AddCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Adding cache: %v\n", cacheError)
|
||||
}
|
||||
|
||||
if queryType == "metrics" {
|
||||
metrics = append(metrics, response.(*model.MetricsPromResponse))
|
||||
mutex.Lock()
|
||||
queryResponseMap[val.Queryid] = response.(*model.MetricsPromResponse)
|
||||
mutex.Unlock()
|
||||
cacheError := utils.AddCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
errorStr := fmt.Sprintf("%v", cacheError)
|
||||
if strings.Contains(errorStr, "already exists") {
|
||||
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Error while caching: %v\n", cacheError)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
annotations = append(annotations, response.(*model.AnnotationsPromResponse))
|
||||
if strings.Contains(val.Queryid, "chaos-event") {
|
||||
annotations = append(annotations, response.(*model.AnnotationsPromResponse))
|
||||
cacheError := utils.AddCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
errorStr := fmt.Sprintf("%v", cacheError)
|
||||
if strings.Contains(errorStr, "already exists") {
|
||||
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Error while caching: %v\n", cacheError)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if strings.Contains(val.Queryid, "chaos-verdict") {
|
||||
patchEventWithVerdict = true
|
||||
verdictResponse = response.(*model.AnnotationsPromResponse)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(v)
|
||||
|
|
@ -727,12 +777,121 @@ func GetPromQuery(promInput *model.PromInput) (*model.PromResponse, error) {
|
|||
|
||||
wg.Wait()
|
||||
|
||||
if patchEventWithVerdict == true {
|
||||
annotations = ops.PatchChaosEventWithVerdict(annotations, verdictResponse, promInput, AnalyticsCache)
|
||||
}
|
||||
|
||||
newPromResponse := model.PromResponse{
|
||||
MetricsResponse: metrics,
|
||||
AnnotationsResponse: annotations,
|
||||
}
|
||||
|
||||
return &newPromResponse, nil
|
||||
return &newPromResponse, queryResponseMap, nil
|
||||
}
|
||||
|
||||
// DashboardViewer takes a dashboard view id, prometheus queries, dashboard query map and data variables to query prometheus and send data periodically to the subscribed client
|
||||
func DashboardViewer(viewID string, promQueries []*model.PromQueryInput, dashboardQueryMap []*model.QueryMapForPanelGroup, dataVariables model.DataVars, r store.StateData) {
|
||||
if viewChan, ok := r.DashboardData[viewID]; ok {
|
||||
|
||||
currentTime := time.Now().Unix()
|
||||
startTime := strconv.FormatInt(currentTime-int64(dataVariables.RelativeTime), 10)
|
||||
endTime := strconv.FormatInt(currentTime, 10)
|
||||
relativeCheck := dataVariables.RelativeTime != 0
|
||||
|
||||
var queryType string
|
||||
|
||||
if dataVariables.Start != "" && dataVariables.End != "" {
|
||||
queryType = "fixed"
|
||||
} else if relativeCheck && dataVariables.RefreshInterval != 0 {
|
||||
queryType = "relative"
|
||||
} else if relativeCheck && dataVariables.RefreshInterval == 0 {
|
||||
queryType = "relatively-fixed"
|
||||
} else {
|
||||
queryType = "invalid"
|
||||
}
|
||||
|
||||
switch queryType {
|
||||
|
||||
case "fixed":
|
||||
dsDetails := &model.DsDetails{
|
||||
URL: dataVariables.URL,
|
||||
Start: dataVariables.Start,
|
||||
End: dataVariables.End,
|
||||
}
|
||||
|
||||
newPromInput := &model.PromInput{
|
||||
Queries: promQueries,
|
||||
DsDetails: dsDetails,
|
||||
}
|
||||
|
||||
newPromResponse, queryResponseMap, err := GetPromQuery(newPromInput)
|
||||
if err != nil {
|
||||
log.Printf("Error during data source query of the dashboard view: %v\n", viewID)
|
||||
} else {
|
||||
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
|
||||
viewChan <- dashboardResponse
|
||||
}
|
||||
|
||||
case "relative":
|
||||
for {
|
||||
dsDetails := &model.DsDetails{
|
||||
URL: dataVariables.URL,
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
}
|
||||
|
||||
newPromInput := &model.PromInput{
|
||||
Queries: promQueries,
|
||||
DsDetails: dsDetails,
|
||||
}
|
||||
|
||||
newPromResponse, queryResponseMap, err := GetPromQuery(newPromInput)
|
||||
if err != nil {
|
||||
log.Printf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
|
||||
break
|
||||
} else {
|
||||
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
|
||||
viewChan <- dashboardResponse
|
||||
time.Sleep(time.Duration(int64(dataVariables.RefreshInterval)) * time.Second)
|
||||
currentTime = time.Now().Unix()
|
||||
startTime = strconv.FormatInt(currentTime-int64(dataVariables.RelativeTime), 10)
|
||||
endTime = strconv.FormatInt(currentTime, 10)
|
||||
}
|
||||
|
||||
if _, ok := r.DashboardData[viewID]; !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case "relatively-fixed":
|
||||
dsDetails := &model.DsDetails{
|
||||
URL: dataVariables.URL,
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
}
|
||||
|
||||
newPromInput := &model.PromInput{
|
||||
Queries: promQueries,
|
||||
DsDetails: dsDetails,
|
||||
}
|
||||
|
||||
newPromResponse, queryResponseMap, err := GetPromQuery(newPromInput)
|
||||
if err != nil {
|
||||
log.Printf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
|
||||
} else {
|
||||
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
|
||||
viewChan <- dashboardResponse
|
||||
}
|
||||
|
||||
case "invalid":
|
||||
log.Printf("Wrong parameters for the dashboard view: %v\n", viewID)
|
||||
}
|
||||
|
||||
close(viewChan)
|
||||
r.Mutex.Lock()
|
||||
delete(r.DashboardData, viewID)
|
||||
r.Mutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func GetLabelNamesAndValues(promSeriesInput *model.PromSeriesInput) (*model.PromSeriesResponse, error) {
|
||||
|
|
@ -753,7 +912,13 @@ func GetLabelNamesAndValues(promSeriesInput *model.PromSeriesInput) (*model.Prom
|
|||
|
||||
cacheError := utils.AddCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Adding cache: %v\n", cacheError)
|
||||
errorStr := fmt.Sprintf("%v", cacheError)
|
||||
if strings.Contains(errorStr, "already exists") {
|
||||
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Error while caching: %v\n", cacheError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newPromSeriesResponse = response
|
||||
|
|
@ -781,7 +946,13 @@ func GetSeriesList(promSeriesListInput *model.DsDetails) (*model.PromSeriesListR
|
|||
|
||||
cacheError := utils.AddCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Adding cache: %v\n", cacheError)
|
||||
errorStr := fmt.Sprintf("%v", cacheError)
|
||||
if strings.Contains(errorStr, "already exists") {
|
||||
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
|
||||
if cacheError != nil {
|
||||
log.Printf("Error while caching: %v\n", cacheError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newPromSeriesListResponse = response
|
||||
|
|
@ -791,10 +962,32 @@ func GetSeriesList(promSeriesListInput *model.DsDetails) (*model.PromSeriesListR
|
|||
}
|
||||
|
||||
// QueryListDashboard lists all the dashboards present in a project using the projectID
|
||||
func QueryListDashboard(projectID string) ([]*model.ListDashboardResponse, error) {
|
||||
query := bson.D{
|
||||
{"project_id", projectID},
|
||||
{"is_removed", false},
|
||||
func QueryListDashboard(projectID string, clusterID *string, dbID *string) ([]*model.ListDashboardResponse, error) {
|
||||
|
||||
var (
|
||||
query bson.D
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
if clusterID == nil || *clusterID == "" {
|
||||
if dbID != nil && *dbID != "" {
|
||||
query = bson.D{
|
||||
{"project_id", projectID},
|
||||
{"db_id", dbID},
|
||||
{"is_removed", false},
|
||||
}
|
||||
} else {
|
||||
query = bson.D{
|
||||
{"project_id", projectID},
|
||||
{"is_removed", false},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
query = bson.D{
|
||||
{"project_id", projectID},
|
||||
{"cluster_id", clusterID},
|
||||
{"is_removed", false},
|
||||
}
|
||||
}
|
||||
|
||||
dashboards, err := dbOperationsAnalytics.ListDashboard(query)
|
||||
|
|
@ -808,12 +1001,68 @@ func QueryListDashboard(projectID string) ([]*model.ListDashboardResponse, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
for _, dashboard := range newListDashboard {
|
||||
datasource, err := dbOperationsAnalytics.GetDataSourceByID(dashboard.DsID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error on querying from datasource collection: %v\n", err)
|
||||
dataSourceMap := make(map[string]*dbSchemaAnalytics.DataSource)
|
||||
dataSourceHealthCheckMap := make(map[string]string)
|
||||
|
||||
if clusterID != nil && *clusterID != "" {
|
||||
var mutex = &sync.Mutex{}
|
||||
wg.Add(len(newListDashboard))
|
||||
|
||||
for _, dashboard := range newListDashboard {
|
||||
if _, ok := dataSourceMap[dashboard.DsID]; !ok {
|
||||
datasource, err := dbOperationsAnalytics.GetDataSourceByID(dashboard.DsID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error on querying from datasource collection: %v\n", err)
|
||||
}
|
||||
dataSourceMap[dashboard.DsID] = datasource
|
||||
go func(val *dbSchemaAnalytics.DataSource) {
|
||||
defer wg.Done()
|
||||
|
||||
if _, ok := dataSourceHealthCheckMap[val.DsID]; !ok {
|
||||
dataSourceStatus := prometheus.TSDBHealthCheck(datasource.DsURL, datasource.DsType)
|
||||
mutex.Lock()
|
||||
dataSourceHealthCheckMap[val.DsID] = dataSourceStatus
|
||||
mutex.Unlock()
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}(datasource)
|
||||
} else {
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
for _, dashboard := range newListDashboard {
|
||||
|
||||
var datasource *dbSchemaAnalytics.DataSource
|
||||
|
||||
if clusterID != nil && *clusterID != "" {
|
||||
if dataSourceInfo, ok := dataSourceMap[dashboard.DsID]; ok {
|
||||
datasource = dataSourceInfo
|
||||
} else {
|
||||
return nil, fmt.Errorf("error on querying from datasource collection")
|
||||
}
|
||||
if dataSourceHealthStatus, ok := dataSourceHealthCheckMap[dashboard.DsID]; ok {
|
||||
dashboard.DsHealthStatus = &dataSourceHealthStatus
|
||||
} else {
|
||||
return nil, fmt.Errorf("error while checking data source health status")
|
||||
}
|
||||
} else {
|
||||
if dataSourceInfo, ok := dataSourceMap[dashboard.DsID]; !ok {
|
||||
datasource, err = dbOperationsAnalytics.GetDataSourceByID(dashboard.DsID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error on querying from datasource collection: %v\n", err)
|
||||
}
|
||||
dataSourceMap[dashboard.DsID] = datasource
|
||||
} else {
|
||||
datasource = dataSourceInfo
|
||||
}
|
||||
}
|
||||
|
||||
dashboard.DsURL = &datasource.DsURL
|
||||
dashboard.DsType = &datasource.DsType
|
||||
dashboard.DsName = &datasource.DsName
|
||||
|
||||
|
|
|
|||
|
|
@ -3,10 +3,15 @@ package ops
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model"
|
||||
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils"
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
func CreateDateMap(updatedAt string, filter model.TimeFrequency, statsMap map[string]model.WorkflowStats) error {
|
||||
|
|
@ -49,3 +54,165 @@ func CreateDateMap(updatedAt string, filter model.TimeFrequency, statsMap map[st
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchChaosEventWithVerdict takes annotations with chaos events, chaos verdict prometheus response, prometheus queries and cache object to patch and update chaos events with chaos verdict
|
||||
func PatchChaosEventWithVerdict(annotations []*model.AnnotationsPromResponse, verdictResponse *model.AnnotationsPromResponse, promInput *model.PromInput, AnalyticsCache *cache.Cache) []*model.AnnotationsPromResponse {
|
||||
var existingAnnotations []*model.AnnotationsPromResponse
|
||||
err := copier.Copy(&existingAnnotations, &annotations)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing existing annotations %v\n", err)
|
||||
}
|
||||
|
||||
for annotationIndex, annotation := range existingAnnotations {
|
||||
var existingAnnotation model.AnnotationsPromResponse
|
||||
err := copier.Copy(&existingAnnotation, &annotation)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing existing annotation %v\n", err)
|
||||
}
|
||||
|
||||
if strings.Contains(existingAnnotation.Queryid, "chaos-event") {
|
||||
var newAnnotation model.AnnotationsPromResponse
|
||||
err := copier.Copy(&newAnnotation, &verdictResponse)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing new annotation %v\n", err)
|
||||
}
|
||||
|
||||
duplicateEventIndices := make(map[int]int)
|
||||
var duplicateEventOffset = 0
|
||||
for verdictLegendIndex, verdictLegend := range newAnnotation.Legends {
|
||||
verdictLegendName := func(str *string) string { return *str }(verdictLegend)
|
||||
var (
|
||||
eventFound = false
|
||||
duplicateEventsFound = false
|
||||
firstEventFoundAtIndex = 0
|
||||
)
|
||||
|
||||
for eventLegendIndex, eventLegend := range existingAnnotation.Legends {
|
||||
eventLegendName := func(str *string) string { return *str }(eventLegend)
|
||||
|
||||
if verdictLegendName == eventLegendName {
|
||||
if !eventFound {
|
||||
firstEventFoundAtIndex = eventLegendIndex
|
||||
} else {
|
||||
duplicateEventsFound = true
|
||||
if _, ok := duplicateEventIndices[eventLegendIndex]; !ok {
|
||||
duplicateEventIndices[eventLegendIndex] = duplicateEventOffset
|
||||
duplicateEventOffset++
|
||||
}
|
||||
}
|
||||
eventFound = true
|
||||
var newVerdictSubData []*model.SubData
|
||||
|
||||
for _, verdictSubData := range verdictResponse.SubDataArray[verdictLegendIndex] {
|
||||
verdictSubDataDate := func(date *float64) float64 { return *date }(verdictSubData.Date)
|
||||
var subDataFound = false
|
||||
for eventSubDataIndex, eventSubData := range annotation.SubDataArray[eventLegendIndex] {
|
||||
if eventSubData != nil {
|
||||
eventSubDataDate := func(date *float64) float64 { return *date }(eventSubData.Date)
|
||||
if eventSubData.SubDataName == verdictSubData.SubDataName && eventSubDataDate == verdictSubDataDate {
|
||||
subDataFound = true
|
||||
annotations[annotationIndex].SubDataArray[eventLegendIndex][eventSubDataIndex].Value = verdictSubData.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !subDataFound && verdictSubDataDate > 0 {
|
||||
newVerdictSubData = append(newVerdictSubData, verdictSubData)
|
||||
}
|
||||
}
|
||||
annotations[annotationIndex].SubDataArray[eventLegendIndex] = append(annotations[annotationIndex].SubDataArray[eventLegendIndex], newVerdictSubData...)
|
||||
|
||||
if duplicateEventsFound {
|
||||
existingDates := make(map[float64]bool)
|
||||
for _, tsv := range annotations[annotationIndex].Tsvs[firstEventFoundAtIndex] {
|
||||
existingDates[func(date *float64) float64 { return *date }(tsv.Date)] = true
|
||||
}
|
||||
|
||||
if _, ok := existingDates[func(date *float64) float64 { return *date }(annotations[annotationIndex].Tsvs[eventLegendIndex][0].Date)]; !ok {
|
||||
annotations[annotationIndex].Tsvs[firstEventFoundAtIndex] = append(annotations[annotationIndex].Tsvs[firstEventFoundAtIndex], annotations[annotationIndex].Tsvs[eventLegendIndex]...)
|
||||
}
|
||||
annotations[annotationIndex].SubDataArray[firstEventFoundAtIndex] = annotations[annotationIndex].SubDataArray[eventLegendIndex]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !eventFound {
|
||||
verdictValid := false
|
||||
for _, tsv := range verdictResponse.Tsvs[verdictLegendIndex] {
|
||||
if !verdictValid && func(val *int) int { return *val }(tsv.Value) == 1 {
|
||||
verdictValid = true
|
||||
}
|
||||
}
|
||||
|
||||
if verdictValid {
|
||||
annotations[annotationIndex].Legends = append(annotations[annotationIndex].Legends, verdictLegend)
|
||||
annotations[annotationIndex].SubDataArray = append(annotations[annotationIndex].SubDataArray, verdictResponse.SubDataArray[verdictLegendIndex])
|
||||
annotations[annotationIndex].Tsvs = append(annotations[annotationIndex].Tsvs, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if duplicateEventOffset != 0 {
|
||||
numberOfEvents := len(annotations[annotationIndex].Legends)
|
||||
for i := 0; i < numberOfEvents; i++ {
|
||||
if offset, ok := duplicateEventIndices[i]; ok {
|
||||
annotations[annotationIndex].Legends = append(annotations[annotationIndex].Legends[:i-offset], annotations[annotationIndex].Legends[i-offset+1:]...)
|
||||
annotations[annotationIndex].Tsvs = append(annotations[annotationIndex].Tsvs[:i-offset], annotations[annotationIndex].Tsvs[i-offset+1:]...)
|
||||
annotations[annotationIndex].SubDataArray = append(annotations[annotationIndex].SubDataArray[:i-offset], annotations[annotationIndex].SubDataArray[i-offset+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eventCacheKey := annotation.Queryid + "-" + promInput.DsDetails.Start + "-" + promInput.DsDetails.End + "-" + promInput.DsDetails.URL
|
||||
cacheError := utils.AddCache(AnalyticsCache, eventCacheKey, annotations[annotationIndex])
|
||||
if cacheError != nil {
|
||||
errorStr := fmt.Sprintf("%v", cacheError)
|
||||
if strings.Contains(errorStr, "already exists") {
|
||||
cacheError = utils.UpdateCache(AnalyticsCache, eventCacheKey, annotations[annotationIndex])
|
||||
if cacheError != nil {
|
||||
log.Printf("Error while caching: %v\n", cacheError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
// MapMetricsToDashboard takes dashboard query map, prometheus response and query response map for mapping metrics to the panels for a dashboard
|
||||
func MapMetricsToDashboard(dashboardQueryMap []*model.QueryMapForPanelGroup, newPromResponse *model.PromResponse, queryResponseMap map[string]*model.MetricsPromResponse) *model.DashboardPromResponse {
|
||||
var dashboardMetrics []*model.MetricDataForPanelGroup
|
||||
|
||||
for _, panelGroupQueryMap := range dashboardQueryMap {
|
||||
var panelGroupMetrics []*model.MetricDataForPanel
|
||||
for _, panelQueryMap := range panelGroupQueryMap.PanelQueryMap {
|
||||
var panelQueries []*model.MetricsPromResponse
|
||||
for _, queryID := range panelQueryMap.QueryIDs {
|
||||
panelQueries = append(panelQueries, queryResponseMap[queryID])
|
||||
}
|
||||
panelMetricsData := &model.MetricDataForPanel{
|
||||
PanelID: panelQueryMap.PanelID,
|
||||
PanelMetricsResponse: panelQueries,
|
||||
}
|
||||
panelGroupMetrics = append(panelGroupMetrics, panelMetricsData)
|
||||
}
|
||||
panelGroupMetricsData := &model.MetricDataForPanelGroup{
|
||||
PanelGroupID: panelGroupQueryMap.PanelGroupID,
|
||||
PanelGroupMetricsResponse: panelGroupMetrics,
|
||||
}
|
||||
dashboardMetrics = append(dashboardMetrics, panelGroupMetricsData)
|
||||
}
|
||||
|
||||
var promResponse model.PromResponse
|
||||
err := copier.Copy(&promResponse, &newPromResponse)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing annotations %v\n", err)
|
||||
}
|
||||
dashboardResponse := &model.DashboardPromResponse{
|
||||
DashboardMetricsResponse: dashboardMetrics,
|
||||
AnnotationsResponse: promResponse.AnnotationsResponse,
|
||||
}
|
||||
|
||||
return dashboardResponse
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
apiV1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
md "github.com/prometheus/common/model"
|
||||
|
|
@ -71,47 +72,88 @@ func Query(prom analytics.PromQuery, queryType string) (interface{}, error) {
|
|||
log.Printf("Unsupported result format: %s", value.Type().String())
|
||||
}
|
||||
|
||||
chaosEventLabels := map[string]string{
|
||||
"workflow_name": "Workflow",
|
||||
"chaosengine_context": "Engine context",
|
||||
}
|
||||
|
||||
chaosVerdictLabels := map[string]string{
|
||||
"workflow_name": "Workflow",
|
||||
"chaosengine_context": "Engine context",
|
||||
"app_namespace": "App namespace",
|
||||
"app_label": "App label",
|
||||
"app_kind": "App kind",
|
||||
"chaosresult_verdict": "Experiment verdict",
|
||||
"probe_success_percentage": "Probe success percentage",
|
||||
}
|
||||
|
||||
var (
|
||||
newMetrics analytics.MetricsResponse
|
||||
newAnnotations analytics.AnnotationsResponse
|
||||
newMetricsTSVs [][]*analytics.MetricsTimeStampValue
|
||||
newAnnotationsTSVs [][]*analytics.AnnotationsTimeStampValue
|
||||
newMetrics model.MetricsPromResponse
|
||||
newAnnotations model.AnnotationsPromResponse
|
||||
newMetricsTSVs [][]*model.MetricsTimeStampValue
|
||||
newAnnotationsTSVs [][]*model.AnnotationsTimeStampValue
|
||||
newLegends []*string
|
||||
newSubDataArray [][]*model.SubData
|
||||
)
|
||||
|
||||
for _, v := range data {
|
||||
|
||||
var (
|
||||
tempMetricsTSV []*analytics.MetricsTimeStampValue
|
||||
tempAnnotationsTSV []*analytics.AnnotationsTimeStampValue
|
||||
tempMetricsTSV []*model.MetricsTimeStampValue
|
||||
tempAnnotationsTSV []*model.AnnotationsTimeStampValue
|
||||
tempLegends []*string
|
||||
tempSubDataArray []*model.SubData
|
||||
)
|
||||
|
||||
if queryType == "metrics" {
|
||||
for _, value := range v.Values {
|
||||
temp := &analytics.MetricsTimeStampValue{
|
||||
Date: func(timestamp float64) *float64 { return ×tamp }(map[bool]float64{true: float64(value.Timestamp), false: 0}[float64(value.Timestamp) >= 0.0]),
|
||||
Value: func(val float64) *float64 { return &val }(map[bool]float64{true: float64(value.Value), false: 0.0}[float64(value.Value) >= 0.0]),
|
||||
}
|
||||
|
||||
tempMetricsTSV = append(tempMetricsTSV, temp)
|
||||
}
|
||||
newMetricsTSVs = append(newMetricsTSVs, tempMetricsTSV)
|
||||
} else {
|
||||
for _, value := range v.Values {
|
||||
temp := &analytics.AnnotationsTimeStampValue{
|
||||
Date: func(timestamp float64) *float64 { return ×tamp }(map[bool]float64{true: float64(value.Timestamp), false: 0}[float64(value.Timestamp) >= 0.0]),
|
||||
Value: func(val int) *int { return &val }(map[bool]int{true: int(value.Value), false: 0}[int(value.Value) >= 0]),
|
||||
}
|
||||
|
||||
tempAnnotationsTSV = append(tempAnnotationsTSV, temp)
|
||||
}
|
||||
newAnnotationsTSVs = append(newAnnotationsTSVs, tempAnnotationsTSV)
|
||||
}
|
||||
eventValid := false
|
||||
|
||||
if prom.Legend == nil || *prom.Legend == "" {
|
||||
tempLegends = append(tempLegends, func(str string) *string { return &str }(fmt.Sprint(v.Metric.String())))
|
||||
} else {
|
||||
if strings.Contains(prom.Queryid, "chaos-event") || strings.Contains(prom.Queryid, "chaos-verdict") {
|
||||
var checkMap map[string]string
|
||||
|
||||
if strings.Contains(prom.Queryid, "chaos-event") {
|
||||
checkMap = chaosEventLabels
|
||||
} else if strings.Contains(prom.Queryid, "chaos-verdict") {
|
||||
checkMap = chaosVerdictLabels
|
||||
}
|
||||
|
||||
var baseString = utils.Split(v.Metric.String(), "{", "}")
|
||||
var keyValueMap = utils.GetKeyValueMapFromQuotedString(baseString)
|
||||
var timeStamp float64
|
||||
if keyValueMap["chaos_injection_time"] != "" {
|
||||
timeStamp, err = strconv.ParseFloat(keyValueMap["chaos_injection_time"], 64)
|
||||
timeStampInteger := int64(timeStamp)
|
||||
if strings.Contains(prom.Queryid, "chaos-event") && timeStampInteger >= startTime && timeStampInteger <= endTime {
|
||||
eventValid = true
|
||||
}
|
||||
} else {
|
||||
timeStamp = 0
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error parsing chaos injection time: %v\n", err)
|
||||
} else {
|
||||
for key, value := range keyValueMap {
|
||||
if nameVal, ok := checkMap[key]; ok {
|
||||
tempSubData := &model.SubData{
|
||||
Date: func(timestamp float64) *float64 { return ×tamp }(map[bool]float64{true: timeStamp * 1000, false: 0}[timeStamp >= 0.0]),
|
||||
Value: value,
|
||||
SubDataName: nameVal,
|
||||
}
|
||||
tempSubDataArray = append(tempSubDataArray, tempSubData)
|
||||
}
|
||||
}
|
||||
if strings.Contains(prom.Queryid, "chaos-event") {
|
||||
if eventValid {
|
||||
newSubDataArray = append(newSubDataArray, tempSubDataArray)
|
||||
}
|
||||
} else {
|
||||
newSubDataArray = append(newSubDataArray, tempSubDataArray)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r, _ := regexp.Compile(`\{{(.*?)\}}`)
|
||||
elements := r.FindAllString(*prom.Legend, -1)
|
||||
|
||||
|
|
@ -131,7 +173,43 @@ func Query(prom analytics.PromQuery, queryType string) (interface{}, error) {
|
|||
|
||||
tempLegends = append(tempLegends, func(str string) *string { return &str }(fmt.Sprint(filterResponse)))
|
||||
}
|
||||
newLegends = append(newLegends, tempLegends...)
|
||||
|
||||
if strings.Contains(prom.Queryid, "chaos-event") {
|
||||
if eventValid {
|
||||
newLegends = append(newLegends, tempLegends...)
|
||||
}
|
||||
} else {
|
||||
newLegends = append(newLegends, tempLegends...)
|
||||
}
|
||||
|
||||
if queryType == "metrics" {
|
||||
for _, value := range v.Values {
|
||||
temp := &model.MetricsTimeStampValue{
|
||||
Date: func(timestamp float64) *float64 { return ×tamp }(map[bool]float64{true: float64(value.Timestamp), false: 0}[float64(value.Timestamp) >= 0.0]),
|
||||
Value: func(val float64) *float64 { return &val }(map[bool]float64{true: float64(value.Value), false: 0.0}[float64(value.Value) >= 0.0]),
|
||||
}
|
||||
|
||||
tempMetricsTSV = append(tempMetricsTSV, temp)
|
||||
}
|
||||
newMetricsTSVs = append(newMetricsTSVs, tempMetricsTSV)
|
||||
} else {
|
||||
for _, value := range v.Values {
|
||||
temp := &model.AnnotationsTimeStampValue{
|
||||
Date: func(timestamp float64) *float64 { return ×tamp }(map[bool]float64{true: float64(value.Timestamp), false: 0}[float64(value.Timestamp) >= 0.0]),
|
||||
Value: func(val int) *int { return &val }(map[bool]int{true: int(value.Value), false: 0}[int(value.Value) >= 0]),
|
||||
}
|
||||
|
||||
tempAnnotationsTSV = append(tempAnnotationsTSV, temp)
|
||||
}
|
||||
|
||||
if strings.Contains(prom.Queryid, "chaos-event") {
|
||||
if eventValid {
|
||||
newAnnotationsTSVs = append(newAnnotationsTSVs, tempAnnotationsTSV)
|
||||
}
|
||||
} else {
|
||||
newAnnotationsTSVs = append(newAnnotationsTSVs, tempAnnotationsTSV)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if queryType == "metrics" {
|
||||
|
|
@ -153,13 +231,12 @@ func Query(prom analytics.PromQuery, queryType string) (interface{}, error) {
|
|||
newAnnotations.Tsvs = newAnnotationsTSVs
|
||||
newAnnotations.Queryid = prom.Queryid
|
||||
newAnnotations.Legends = newLegends
|
||||
newAnnotations.SubDataArray = newSubDataArray
|
||||
|
||||
var resp model.AnnotationsPromResponse
|
||||
if len(newLegends) != 0 {
|
||||
err := copier.Copy(&resp, &newAnnotations)
|
||||
if err != nil {
|
||||
return &model.AnnotationsPromResponse{}, err
|
||||
}
|
||||
err := copier.Copy(&resp, &newAnnotations)
|
||||
if err != nil {
|
||||
return &model.AnnotationsPromResponse{}, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
|
|
@ -194,8 +271,8 @@ func LabelNamesAndValues(prom analytics.PromSeries) (*model.PromSeriesResponse,
|
|||
}
|
||||
|
||||
var (
|
||||
newResponse analytics.PromSeriesResponse
|
||||
newLabelValues []*analytics.LabelValue
|
||||
newResponse model.PromSeriesResponse
|
||||
newLabelValues []*model.LabelValue
|
||||
)
|
||||
|
||||
if len(labelNames) >= 1 {
|
||||
|
|
@ -205,17 +282,17 @@ func LabelNamesAndValues(prom analytics.PromSeries) (*model.PromSeriesResponse,
|
|||
if index != 0 {
|
||||
go func(index int, label string) {
|
||||
defer wg.Done()
|
||||
var newValues []*analytics.Option
|
||||
var newValues []*model.Option
|
||||
values, _, err := client.LabelValues(context.TODO(), label, matcher, start, end)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
newValues = append(newValues, func(str string) *analytics.Option { return &analytics.Option{Name: str} }(fmt.Sprint(value)))
|
||||
newValues = append(newValues, func(str string) *model.Option { return &model.Option{Name: str} }(fmt.Sprint(value)))
|
||||
}
|
||||
|
||||
tempLabelValues := &analytics.LabelValue{
|
||||
tempLabelValues := &model.LabelValue{
|
||||
Label: label,
|
||||
Values: newValues,
|
||||
}
|
||||
|
|
@ -261,7 +338,7 @@ func SeriesList(prom analytics.PromDSDetails) (*model.PromSeriesListResponse, er
|
|||
var (
|
||||
matcher []string
|
||||
newValues []*string
|
||||
newResponse analytics.PromSeriesListResponse
|
||||
newResponse model.PromSeriesListResponse
|
||||
)
|
||||
|
||||
labelValues, _, err := client.LabelValues(context.TODO(), "__name__", matcher, start, end)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,12 @@ package analytics
|
|||
|
||||
type STATE string
|
||||
|
||||
type PromDSDetails struct {
|
||||
URL string
|
||||
Start string
|
||||
End string
|
||||
}
|
||||
|
||||
type PromQuery struct {
|
||||
Queryid string
|
||||
Query string
|
||||
|
|
@ -11,57 +17,11 @@ type PromQuery struct {
|
|||
DSdetails *PromDSDetails
|
||||
}
|
||||
|
||||
type MetricsResponse struct {
|
||||
Queryid string `json:"queryid"`
|
||||
Legends []*string `json:"legends"`
|
||||
Tsvs [][]*MetricsTimeStampValue `json:"tsvs"`
|
||||
}
|
||||
|
||||
type MetricsTimeStampValue struct {
|
||||
Date *float64 `json:"date"`
|
||||
Value *float64 `json:"value"`
|
||||
}
|
||||
|
||||
type AnnotationsResponse struct {
|
||||
Queryid string `json:"queryid"`
|
||||
Legends []*string `json:"legends"`
|
||||
Tsvs [][]*AnnotationsTimeStampValue `json:"tsvs"`
|
||||
}
|
||||
|
||||
type AnnotationsTimeStampValue struct {
|
||||
Date *float64 `json:"date"`
|
||||
Value *int `json:"value"`
|
||||
}
|
||||
|
||||
type PromSeries struct {
|
||||
Series string
|
||||
DSdetails *PromDSDetails
|
||||
}
|
||||
|
||||
type PromDSDetails struct {
|
||||
URL string
|
||||
Start string
|
||||
End string
|
||||
}
|
||||
|
||||
type PromSeriesListResponse struct {
|
||||
SeriesList []*string `json:"seriesList"`
|
||||
}
|
||||
|
||||
type LabelValue struct {
|
||||
Label string `json:"label"`
|
||||
Values []*Option `json:"values"`
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type PromSeriesResponse struct {
|
||||
Series string `json:"series"`
|
||||
LabelValues []*LabelValue `json:"labelValues"`
|
||||
}
|
||||
|
||||
//Portal Dashboard Types
|
||||
type PortalDashboard struct {
|
||||
DashboardID string `json:"dashboardID"`
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ type StateData struct {
|
|||
WorkflowEventPublish map[string][]chan *model.WorkflowRun
|
||||
WorkflowLog map[string]chan *model.PodLogResponse
|
||||
KubeObjectData map[string]chan *model.KubeObjectResponse
|
||||
DashboardData map[string]chan *model.DashboardPromResponse
|
||||
Mutex *sync.Mutex
|
||||
}
|
||||
|
||||
|
|
@ -23,6 +24,7 @@ func NewStore() *StateData {
|
|||
WorkflowEventPublish: make(map[string][]chan *model.WorkflowRun),
|
||||
WorkflowLog: make(map[string]chan *model.PodLogResponse),
|
||||
KubeObjectData: make(map[string]chan *model.KubeObjectResponse),
|
||||
DashboardData: make(map[string]chan *model.DashboardPromResponse),
|
||||
Mutex: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,11 @@ func AddCache(c *cache.Cache, k string, x interface{}) error {
|
|||
return c.Add(k, x, cacheExpiration)
|
||||
}
|
||||
|
||||
// UpdateCache function takes a string and an object to be cached
|
||||
func UpdateCache(c *cache.Cache, k string, x interface{}) error {
|
||||
return c.Replace(k, x, cacheExpiration)
|
||||
}
|
||||
|
||||
// NewCache initializes a new cache with a given expiration period and cleanup interval
|
||||
func NewCache() *cache.Cache {
|
||||
return cache.New(cacheExpiration, cleanupInterval)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
dbSchemaCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster"
|
||||
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/types"
|
||||
|
|
@ -130,3 +131,45 @@ func ContainsString(s []string, str string) bool {
|
|||
func Truncate(num float64) float64 {
|
||||
return float64(int(num*100)) / 100
|
||||
}
|
||||
|
||||
// Split returns the string in between a before sub-string and an after sub-string
|
||||
func Split(str, before, after string) string {
|
||||
a := strings.SplitAfterN(str, before, 2)
|
||||
b := strings.SplitAfterN(a[len(a)-1], after, 2)
|
||||
if 1 == len(b) {
|
||||
return b[0]
|
||||
}
|
||||
return b[0][0 : len(b[0])-len(after)]
|
||||
}
|
||||
|
||||
// GetKeyValueMapFromQuotedString returns key value pairs from a string with quotes
|
||||
func GetKeyValueMapFromQuotedString(quotedString string) map[string]string {
|
||||
lastQuote := rune(0)
|
||||
f := func(c rune) bool {
|
||||
switch {
|
||||
case c == lastQuote:
|
||||
lastQuote = rune(0)
|
||||
return false
|
||||
case lastQuote != rune(0):
|
||||
return false
|
||||
case unicode.In(c, unicode.Quotation_Mark):
|
||||
lastQuote = c
|
||||
return false
|
||||
default:
|
||||
return unicode.IsSpace(c)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// splitting string by space but considering quoted section
|
||||
items := strings.FieldsFunc(quotedString, f)
|
||||
|
||||
// create and fill the map
|
||||
m := make(map[string]string)
|
||||
for _, item := range items {
|
||||
x := strings.Split(item, "=")
|
||||
m[x[0]] = x[1][1 : len(x[1])-2]
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue