Removed k8s client go dependencies from graphql (#4747)

* removed k8s client-go dependencies

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* Update push.yml

* removed k8s client-go dependencies

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* removed token issues

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* fixed imports

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* reverted changes for push pipeline

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* updated go mod

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* updated mocks

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* updated manifests

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* updated manifest

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

* updated manifest

Signed-off-by: Saranya-jena <saranya.jena@harness.io>

---------

Signed-off-by: Saranya-jena <saranya.jena@harness.io>
This commit is contained in:
Saranya Jena 2024-07-08 12:52:45 +05:30 committed by GitHub
parent 9d58d8b584
commit a00691fe8b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 209 additions and 1021 deletions

View File

@ -279,4 +279,4 @@ jobs:
source env-vars
FRONTEND_IMAGE=${{ matrix.frontend.image_name }}
timestamp=`date "+%s"`
make push-frontend
make push-frontend

View File

@ -5,10 +5,6 @@ import (
"regexp"
"strings"
"github.com/sirupsen/logrus"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_infrastructure"
"github.com/gin-gonic/gin"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
)
@ -29,13 +25,7 @@ func ValidateCors() gin.HandlerFunc {
}
validOrigin := false
endpoint, err := chaos_infrastructure.GetEndpoint("external")
if err != nil {
logrus.Error(err)
} else if endpoint != "" {
allowedOrigins = append(allowedOrigins, endpoint)
}
for _, allowedOrigin := range allowedOrigins {
match, err := regexp.MatchString(allowedOrigin, origin)
if err == nil && match {

View File

@ -30,7 +30,6 @@ require (
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.26.0
k8s.io/apimachinery v0.26.0
k8s.io/client-go v12.0.0+incompatible
sigs.k8s.io/yaml v1.4.0
)
@ -69,10 +68,8 @@ require (
github.com/golang/snappy v0.0.1 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@ -92,7 +89,6 @@ require (
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/skeema/knownhosts v1.2.2 // indirect
github.com/sosodev/duration v1.3.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
@ -118,6 +114,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/client-go v12.0.0+incompatible // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect

View File

@ -324,8 +324,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -622,7 +620,6 @@ github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
@ -698,7 +695,6 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=

View File

@ -30,7 +30,13 @@ func (r *mutationResolver) CreateChaosExperiment(ctx context.Context, request mo
return nil, err
}
uiResponse, err := r.chaosExperimentHandler.CreateChaosExperiment(ctx, &request, projectID, ctx.Value("username").(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
uiResponse, err := r.chaosExperimentHandler.CreateChaosExperiment(ctx, &request, projectID, username)
if err != nil {
return nil, errors.New("could not create experiment, error: " + err.Error())
}
@ -83,7 +89,13 @@ func (r *mutationResolver) SaveChaosExperiment(ctx context.Context, request mode
var uiResponse string
uiResponse, err = r.chaosExperimentHandler.SaveChaosExperiment(ctx, request, projectID, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return "", err
}
uiResponse, err = r.chaosExperimentHandler.SaveChaosExperiment(ctx, request, projectID, username)
if err != nil {
logrus.WithFields(logFields).Error(err)
return "", err
@ -107,7 +119,13 @@ func (r *mutationResolver) UpdateChaosExperiment(ctx context.Context, request mo
return nil, err
}
uiResponse, err := r.chaosExperimentHandler.UpdateChaosExperiment(ctx, request, projectID, data_store.Store, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
uiResponse, err := r.chaosExperimentHandler.UpdateChaosExperiment(ctx, request, projectID, data_store.Store, username)
if err != nil {
logrus.WithFields(logFields).Error(err)
return nil, err
@ -132,7 +150,13 @@ func (r *mutationResolver) DeleteChaosExperiment(ctx context.Context, experiment
return false, err
}
uiResponse, err := r.chaosExperimentHandler.DeleteChaosExperiment(ctx, projectID, experimentID, experimentRunID, data_store.Store, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return false, err
}
uiResponse, err := r.chaosExperimentHandler.DeleteChaosExperiment(ctx, projectID, experimentID, experimentRunID, data_store.Store, username)
if err != nil {
logrus.WithFields(logFields).Error(err)
return false, err
@ -156,7 +180,13 @@ func (r *mutationResolver) UpdateCronExperimentState(ctx context.Context, experi
return false, err
}
uiResponse, err := r.chaosExperimentHandler.UpdateCronExperimentState(ctx, experimentID, disable, projectID, data_store.Store, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return false, err
}
uiResponse, err := r.chaosExperimentHandler.UpdateCronExperimentState(ctx, experimentID, disable, projectID, data_store.Store, username)
if err != nil {
logrus.WithFields(logFields).Error(err)
return false, err

View File

@ -7,6 +7,9 @@ package graph
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"time"
"github.com/google/uuid"
@ -80,8 +83,22 @@ func (r *mutationResolver) GetManifestWithInfraID(ctx context.Context, projectID
"chaosInfraId": infraID,
}
reqHeader, ok := ctx.Value("request-header").(http.Header)
if !ok {
return "", fmt.Errorf("unable to parse request header")
}
referrer := reqHeader.Get("Referer")
if referrer == "" {
return "", fmt.Errorf("unable to parse referer header")
}
referrerURL, err := url.Parse(referrer)
if err != nil {
return "", err
}
logrus.WithFields(logFields).Info("request received to get chaos infrastructure installation manifest")
manifest, err := r.chaosInfrastructureService.GetManifestWithInfraID(infraID, accessKey)
manifest, err := r.chaosInfrastructureService.GetManifestWithInfraID(fmt.Sprintf("%s://%s", referrerURL.Scheme, referrerURL.Host), infraID, accessKey)
if err != nil {
return "", err
}
@ -165,8 +182,24 @@ func (r *queryResolver) GetInfraManifest(ctx context.Context, infraID string, up
logFields := logrus.Fields{
"projectId": projectID,
}
reqHeader, ok := ctx.Value("request-header").(http.Header)
if !ok {
return "", fmt.Errorf("unable to parse request header")
}
referrer := reqHeader.Get("Referer")
if referrer == "" {
return "", fmt.Errorf("unable to parse referer header")
}
referrerURL, err := url.Parse(referrer)
if err != nil {
return "", err
}
logrus.WithFields(logFields).Info("request received to get chaos infrastructure manifest")
err := authorization.ValidateRole(ctx, projectID,
err = authorization.ValidateRole(ctx, projectID,
authorization.MutationRbacRules[authorization.GetManifest],
model.InvitationAccepted.String())
if err != nil {
@ -178,7 +211,7 @@ func (r *queryResolver) GetInfraManifest(ctx context.Context, infraID string, up
return "", err
}
gcaResponse, err := chaos_infrastructure.GetK8sInfraYaml(getInfra)
gcaResponse, err := chaos_infrastructure.GetK8sInfraYaml(referrerURL.Host, getInfra)
if err != nil {
logrus.WithFields(logFields).Error(err)
return "", err

View File

@ -14,6 +14,7 @@ import (
// AddChaosHub is the resolver for the addChaosHub field.
func (r *mutationResolver) AddChaosHub(ctx context.Context, projectID string, request model.CreateChaosHubRequest) (*model.ChaosHub, error) {
if err := authorization.ValidateRole(ctx, projectID,
authorization.MutationRbacRules[authorization.AddChaosHub],
model.InvitationAccepted.String()); err != nil {

View File

@ -25,7 +25,13 @@ func (r *mutationResolver) CreateEnvironment(ctx context.Context, projectID stri
return nil, err
}
return r.environmentService.CreateEnvironment(ctx, projectID, request, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
return r.environmentService.CreateEnvironment(ctx, projectID, request, username)
}
// UpdateEnvironment is the resolver for the updateEnvironment field.
@ -42,7 +48,13 @@ func (r *mutationResolver) UpdateEnvironment(ctx context.Context, projectID stri
return "", err
}
return r.environmentService.UpdateEnvironment(ctx, projectID, request, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return "", err
}
return r.environmentService.UpdateEnvironment(ctx, projectID, request, username)
}
// DeleteEnvironment is the resolver for the deleteEnvironment field.
@ -59,7 +71,13 @@ func (r *mutationResolver) DeleteEnvironment(ctx context.Context, projectID stri
return "", err
}
return r.environmentService.DeleteEnvironment(ctx, projectID, environmentID, ctx.Value(authorization.AuthKey).(string))
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return "", err
}
return r.environmentService.DeleteEnvironment(ctx, projectID, environmentID, username)
}
// GetEnvironment is the resolver for the getEnvironment field.

View File

@ -94,11 +94,10 @@ func NewConfig(mongodbOperator mongodb.MongoOperator) generated.Config {
if err != nil {
return nil, err
}
newCtx := context.WithValue(ctx, authorization.UserClaim, user)
newCtx = context.WithValue(ctx, "username", user["username"])
return next(newCtx)
}
return config
}

View File

@ -34,8 +34,10 @@ func Middleware(handler http.Handler, mongoClient *mongo.Client) gin.HandlerFunc
c.Writer.Write([]byte("Error verifying JWT token: Token is revoked"))
return
}
ctx := context.WithValue(c.Request.Context(), AuthKey, jwt)
c.Request = c.Request.WithContext(ctx)
ctx1 := context.WithValue(ctx, "request-header", c.Request.Header)
c.Request = c.Request.WithContext(ctx1)
handler.ServeHTTP(c.Writer, c.Request)
}
}

View File

@ -11,11 +11,12 @@ import (
"strings"
"time"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization"
probeUtils "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/probe/utils"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_infrastructure"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/gitops"
"go.mongodb.org/mongo-driver/mongo/options"
@ -793,6 +794,12 @@ func (c *ChaosExperimentRunHandler) RunChaosWorkFlow(ctx context.Context, projec
txnOpts = options.Transaction().SetWriteConcern(wc).SetReadConcern(rc)
)
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
session, err := mongodb.MgoClient.StartSession()
if err != nil {
logrus.Errorf("failed to start mongo session %v", err)
@ -815,11 +822,11 @@ func (c *ChaosExperimentRunHandler) RunChaosWorkFlow(ctx context.Context, projec
IsRemoved: false,
CreatedAt: currentTime,
CreatedBy: mongodb.UserDetailResponse{
Username: ctx.Value(authorization.AuthKey).(string),
Username: username,
},
UpdatedAt: currentTime,
UpdatedBy: mongodb.UserDetailResponse{
Username: ctx.Value(authorization.AuthKey).(string),
Username: username,
},
},
},
@ -861,11 +868,11 @@ func (c *ChaosExperimentRunHandler) RunChaosWorkFlow(ctx context.Context, projec
IsRemoved: false,
CreatedAt: currentTime,
CreatedBy: mongodb.UserDetailResponse{
Username: ctx.Value(authorization.AuthKey).(string),
Username: username,
},
UpdatedAt: currentTime,
UpdatedBy: mongodb.UserDetailResponse{
Username: ctx.Value(authorization.AuthKey).(string),
Username: username,
},
},
NotifyID: &notifyID,
@ -908,7 +915,7 @@ func (c *ChaosExperimentRunHandler) RunChaosWorkFlow(ctx context.Context, projec
if err != nil {
return nil, fmt.Errorf("failed to generate probes in workflow manifest, err: %v", err)
}
username := ctx.Value(authorization.AuthKey).(string)
manifest, err := yaml.Marshal(workflowManifest)
if err != nil {
return nil, err
@ -991,7 +998,11 @@ func (c *ChaosExperimentRunHandler) RunCronExperiment(ctx context.Context, proje
return err
}
username := ctx.Value(authorization.AuthKey).(string)
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return err
}
if r != nil {
chaos_infrastructure.SendExperimentToSubscriber(projectID, &model.ChaosExperimentRequest{

View File

@ -9,7 +9,6 @@ import (
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model"
store "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/data-store"
dbChaosInfra "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/k8s"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -20,42 +19,25 @@ type SubscriberConfigurations struct {
TLSCert string
}
func GetEndpoint(agentType string) (string, error) {
func GetEndpoint(host string) (string, error) {
// returns endpoint from env, if provided by user
if utils.Config.ChaosCenterUiEndpoint != "" {
return utils.Config.ChaosCenterUiEndpoint + "/ws/query", nil
return utils.Config.ChaosCenterUiEndpoint + "/api/query", nil
}
// generating endpoint based on ChaosCenter Scope & InfraType (Self or External)
agentEndpoint, err := k8s.GetServerEndpoint(utils.Config.ChaosCenterScope, agentType)
if agentEndpoint == "" || err != nil {
return "", fmt.Errorf("failed to retrieve the server endpoint %v", err)
}
return agentEndpoint, err
return host + "/api/query", nil
}
func GetK8sInfraYaml(infra dbChaosInfra.ChaosInfra) ([]byte, error) {
func GetK8sInfraYaml(host string, infra dbChaosInfra.ChaosInfra) ([]byte, error) {
var config SubscriberConfigurations
endpoint, err := GetEndpoint(infra.InfraType)
endpoint, err := GetEndpoint(host)
if err != nil {
return nil, err
}
config.ServerEndpoint = endpoint
var scope = utils.Config.ChaosCenterScope
if scope == ClusterScope && utils.Config.TlsSecretName != "" {
config.TLSCert, err = k8s.GetTLSCert(utils.Config.TlsSecretName)
if err != nil {
return nil, err
}
}
if scope == NamespaceScope {
config.TLSCert = utils.Config.TlsCertB64
}
config.TLSCert = utils.Config.TlsCertB64
var respData []byte
if infra.InfraScope == ClusterScope {
@ -221,13 +203,6 @@ func ManifestParser(infra dbChaosInfra.ChaosInfra, rootPath string, config *Subs
// SendRequestToSubscriber sends events from the graphQL server to the subscribers listening for the requests
func SendRequestToSubscriber(subscriberRequest SubscriberRequests, r store.StateData) {
if utils.Config.ChaosCenterScope == string(model.InfraScopeCluster) {
/*
namespace = Obtain from WorkflowManifest or
from frontend as a separate workflowNamespace field under ChaosWorkFlowRequest model
for CreateChaosWorkflow mutation to be passed to this function.
*/
}
newAction := &model.InfraActionResponse{
ProjectID: subscriberRequest.ProjectID,
Action: &model.ActionPayload{

View File

@ -48,13 +48,8 @@ func (s *InfraService) SendInfraEvent(eventType, eventName, description string,
s.Called(eventType, eventName, description, infra, r)
}
func (s *InfraService) GetManifest(token string) ([]byte, int, error) {
args := s.Called(token)
return args.Get(0).([]byte), args.Int(1), args.Error(2)
}
func (s *InfraService) GetManifestWithInfraID(infraID string, accessKey string) ([]byte, error) {
args := s.Called(infraID, accessKey)
func (s *InfraService) GetManifestWithInfraID(host string, infraID string, accessKey string) ([]byte, error) {
args := s.Called(host, infraID, accessKey)
return args.Get(0).([]byte), args.Error(1)
}

View File

@ -7,6 +7,7 @@ import (
"log"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"time"
@ -15,7 +16,6 @@ import (
store "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/data-store"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/config"
dbEnvironments "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/environments"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/k8s"
"github.com/sirupsen/logrus"
"github.com/google/uuid"
@ -40,13 +40,11 @@ type Service interface {
RegisterInfra(c context.Context, projectID string, input model.RegisterInfraRequest) (*model.RegisterInfraResponse, error)
ConfirmInfraRegistration(request model.InfraIdentity, r store.StateData) (*model.ConfirmInfraRegistrationResponse, error)
VerifyInfra(identity model.InfraIdentity) (*dbChaosInfra.ChaosInfra, error)
//NewClusterEvent(request model.NewClusterEventRequest, r store.StateData) (string, error)
DeleteInfra(ctx context.Context, projectID string, infraId string, r store.StateData) (string, error)
ListInfras(projectID string, request *model.ListInfraRequest) (*model.ListInfraResponse, error)
GetInfraDetails(ctx context.Context, infraID string, projectID string) (*model.Infra, error)
SendInfraEvent(eventType, eventName, description string, infra model.Infra, r store.StateData)
GetManifest(token string) ([]byte, int, error)
GetManifestWithInfraID(infraID string, accessKey string) ([]byte, error)
GetManifestWithInfraID(host string, infraID string, accessKey string) ([]byte, error)
GetInfra(ctx context.Context, projectID string, infraID string) (*model.Infra, error)
GetInfraStats(ctx context.Context, projectID string) (*model.GetInfraStatsResponse, error)
GetVersionDetails() (*model.InfraVersionDetails, error)
@ -99,8 +97,12 @@ func (in *infraService) RegisterInfra(c context.Context, projectID string, input
infraID = uuid.New().String()
currentTime = time.Now()
)
tkn := c.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
token, err := InfraCreateJWT(infraID)
if err != nil {
@ -187,7 +189,22 @@ func (in *infraService) RegisterInfra(c context.Context, projectID string, input
return nil, err
}
manifestYaml, err := GetK8sInfraYaml(newInfra)
reqHeader, ok := c.Value("request-header").(http.Header)
if !ok {
return nil, fmt.Errorf("unable to parse request header")
}
referrer := reqHeader.Get("Referer")
if referrer == "" {
return nil, fmt.Errorf("unable to parse referer header")
}
referrerURL, err := url.Parse(referrer)
if err != nil {
return nil, err
}
manifestYaml, err := GetK8sInfraYaml(fmt.Sprintf("%s://%s", referrerURL.Scheme, referrerURL.Host), newInfra)
if err != nil {
return nil, err
}
@ -204,6 +221,10 @@ func (in *infraService) RegisterInfra(c context.Context, projectID string, input
func (in *infraService) DeleteInfra(ctx context.Context, projectID string, infraId string, r store.StateData) (string, error) {
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return "", err
}
query := bson.D{
{"infra_id", infraId},
{"project_id", projectID},
@ -285,6 +306,9 @@ func (in *infraService) GetInfra(ctx context.Context, projectID string, infraID
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return nil, err
}
var pipeline mongo.Pipeline
@ -1061,57 +1085,8 @@ func (in *infraService) VerifyInfra(identity model.InfraIdentity) (*dbChaosInfra
return &infra, nil
}
func (in *infraService) GetManifest(token string) ([]byte, int, error) {
infraID, err := InfraValidateJWT(token)
if err != nil {
return nil, http.StatusNotFound, err
}
reqinfra, err := in.infraOperator.GetInfra(infraID)
if err != nil {
return nil, http.StatusInternalServerError, err
}
var configurations SubscriberConfigurations
configurations.ServerEndpoint, err = GetEndpoint(reqinfra.InfraType)
if err != nil {
return nil, http.StatusInternalServerError, err
}
var scope = utils.Config.ChaosCenterScope
if scope == ClusterScope && utils.Config.TlsSecretName != "" {
configurations.TLSCert, err = k8s.GetTLSCert(utils.Config.TlsSecretName)
if err != nil {
return nil, http.StatusInternalServerError, err
}
}
if scope == NamespaceScope {
configurations.TLSCert = utils.Config.TlsCertB64
}
if !reqinfra.IsRegistered {
var respData []byte
if reqinfra.InfraScope == "cluster" {
respData, err = ManifestParser(reqinfra, "manifests/cluster", &configurations)
} else if reqinfra.InfraScope == "namespace" {
respData, err = ManifestParser(reqinfra, "manifests/namespace", &configurations)
} else {
logrus.Error("INFRA_SCOPE env is empty!")
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return respData, http.StatusOK, nil
} else {
return []byte("infra is already registered"), http.StatusConflict, nil
}
}
// GetManifestWithInfraID returns manifest for a given infra
func (in *infraService) GetManifestWithInfraID(infraID string, accessKey string) ([]byte, error) {
func (in *infraService) GetManifestWithInfraID(host string, infraID string, accessKey string) ([]byte, error) {
reqinfra, err := in.infraOperator.GetInfra(infraID)
if err != nil {
return nil, fmt.Errorf("failed to retrieve the infra %v", err)
@ -1123,22 +1098,12 @@ func (in *infraService) GetManifestWithInfraID(infraID string, accessKey string)
}
var configurations SubscriberConfigurations
configurations.ServerEndpoint, err = GetEndpoint(reqinfra.InfraType)
configurations.ServerEndpoint, err = GetEndpoint(host)
if err != nil {
return nil, fmt.Errorf("failed to retrieve the server endpoint %v", err)
}
var scope = utils.Config.ChaosCenterScope
if scope == ClusterScope && utils.Config.TlsSecretName != "" {
configurations.TLSCert, err = k8s.GetTLSCert(utils.Config.TlsSecretName)
if err != nil {
return nil, fmt.Errorf("failed to retrieve the tls cert %v", err)
}
}
if scope == NamespaceScope {
configurations.TLSCert = utils.Config.TlsCertB64
}
configurations.TLSCert = utils.Config.TlsCertB64
var respData []byte
if reqinfra.InfraScope == ClusterScope {

View File

@ -320,7 +320,6 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
SSHPrivateKey: chaosHub.SSHPrivateKey,
IsDefault: false,
}
fmt.Println(chaosHub.SSHPrivateKey)
prevChaosHub, err := c.chaosHubOperator.GetHubByID(ctx, chaosHub.ID, projectID)
if err != nil {
return nil, err
@ -405,6 +404,9 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
func (c *chaosHubService) DeleteChaosHub(ctx context.Context, hubID string, projectID string) (bool, error) {
tkn := ctx.Value(authorization.AuthKey).(string)
username, err := authorization.GetUsername(tkn)
if err != nil {
return false, err
}
chaosHub, err := c.chaosHubOperator.GetHubByID(ctx, hubID, projectID)
if err != nil {
log.Error(err)

View File

@ -2,7 +2,6 @@ package probe
import (
"encoding/json"
"fmt"
"strconv"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model"
@ -230,7 +229,6 @@ func (probe *Probe) GetOutputProbe() *model.Probe {
if probe.KubernetesCMDProperties.Source != nil {
jsonSource, _ := json.Marshal(probe.KubernetesCMDProperties.Source)
source := string(jsonSource)
fmt.Println("string source", source)
probeResponse.KubernetesCMDProperties.Source = &source
}

View File

@ -1,6 +1,9 @@
package handlers
import (
"fmt"
"net/http"
"net/url"
"strings"
"github.com/gin-gonic/gin"
@ -30,7 +33,29 @@ func FileHandler(mongodbOperator mongodb.MongoOperator) gin.HandlerFunc {
utils.WriteHeaders(&c.Writer, 500)
c.Writer.Write([]byte(err.Error()))
}
response, err := chaos_infrastructure.GetK8sInfraYaml(infra)
reqHeader, ok := c.Value("request-header").(http.Header)
if !ok {
logrus.Error("unable to parse referer header")
utils.WriteHeaders(&c.Writer, 500)
c.Writer.Write([]byte("unable to parse referer header"))
}
referrer := reqHeader.Get("Referer")
if referrer == "" {
logrus.Error("unable to parse referer header")
utils.WriteHeaders(&c.Writer, 500)
c.Writer.Write([]byte("unable to parse referer header"))
}
referrerURL, err := url.Parse(referrer)
if err != nil {
logrus.Error(err)
utils.WriteHeaders(&c.Writer, 500)
c.Writer.Write([]byte(err.Error()))
}
response, err := chaos_infrastructure.GetK8sInfraYaml(fmt.Sprintf("%s://%s", referrerURL.Scheme, referrerURL.Host), infra)
if err != nil {
logrus.Error(err)
utils.WriteHeaders(&c.Writer, 500)

View File

@ -1,52 +0,0 @@
package k8s
import (
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func GetKubeConfig() (*rest.Config, error) {
kubeConfig := utils.Config.KubeConfigFilePath
// Use in-cluster config if kubeconfig path is not specified
if kubeConfig == "" {
return rest.InClusterConfig()
}
return clientcmd.BuildConfigFromFlags("", kubeConfig)
}
func GetGenericK8sClient() (*kubernetes.Clientset, error) {
config, err := GetKubeConfig()
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
// GetDynamicAndDiscoveryClient This function returns dynamic client and discovery client
func GetDynamicAndDiscoveryClient() (discovery.DiscoveryInterface, dynamic.Interface, error) {
// returns a config object which uses the service account kubernetes gives to pods
config, err := GetKubeConfig()
if err != nil {
return nil, nil, err
}
// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
return nil, nil, err
}
// NewForConfig creates a new dynamic client or returns an error.
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, nil, err
}
return discoveryClient, dynamicClient, nil
}

View File

@ -1,267 +0,0 @@
package k8s
import (
"context"
"encoding/base64"
"errors"
"fmt"
"log"
"strconv"
"strings"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/serializer/yaml"
memory "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
)
var (
decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
dr dynamic.ResourceInterface
)
// InfraResource This function handles cluster operations
func InfraResource(manifest string, namespace string) (*unstructured.Unstructured, error) {
// Getting dynamic and discovery client
ctx := context.TODO()
discoveryClient, dynamicClient, err := GetDynamicAndDiscoveryClient()
if err != nil {
return nil, err
}
// Create a mapper using dynamic client
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(discoveryClient))
// Decode YAML manifest into unstructured.Unstructured
obj := &unstructured.Unstructured{}
_, gvk, err := decUnstructured.Decode([]byte(manifest), nil, obj)
if err != nil {
return nil, err
}
// Find GVR
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, err
}
// Obtain REST interface for the GVR
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
// namespaced resources should specify the namespace
dr = dynamicClient.Resource(mapping.Resource).Namespace(namespace)
} else {
// for cluster-wide resources
dr = dynamicClient.Resource(mapping.Resource)
}
response, err := dr.Create(ctx, obj, metaV1.CreateOptions{})
if k8serrors.IsAlreadyExists(err) {
// This doesnt ever happen even if it does already exist
log.Print("Already exists")
return nil, nil
}
if err != nil {
return nil, err
}
log.Println("Resource successfully created")
return response, nil
}
/*
This function returns the endpoint of the server by which external agents can communicate.
The order of generating the endpoint is based on different network type:
- Ingress
- LoadBalancer > NodePort > ClusterIP
*/
func GetServerEndpoint(portalScope, agentType string) (string, error) {
var (
NodePort int32
Port int32
InternalIP string
IngressPath string
IPAddress string
Scheme string
FinalUrl string
ServerServiceName = utils.Config.ServerServiceName
NodeName = utils.Config.NodeName
LitmusPortalNS = utils.Config.LitmusPortalNamespace
Ingress = utils.Config.Ingress
IngressName = utils.Config.IngressName
)
ctx := context.TODO()
clientset, err := GetGenericK8sClient()
if err != nil {
return "", err
}
svc, err := clientset.CoreV1().Services(LitmusPortalNS).Get(ctx, ServerServiceName, metaV1.GetOptions{})
if err != nil {
return "", err
}
for _, port := range svc.Spec.Ports {
if port.Name == "graphql-server" {
NodePort = port.NodePort
Port = port.Port
}
}
// If current agent is self-agent, then servicename FQDN will be used irrespective of service type.
if agentType == "internal" {
FinalUrl = "http://" + ServerServiceName + "." + LitmusPortalNS + ":" + strconv.Itoa(int(Port)) + "/query"
return FinalUrl, nil
}
// Ingress endpoint will be generated for external agents only.
if Ingress == "true" {
getIng, err := clientset.NetworkingV1().Ingresses(LitmusPortalNS).Get(ctx, IngressName, metaV1.GetOptions{})
if err != nil {
return "", err
}
/*
Priorities of retrieving Ingress endpoint
1. hostname
2. IPAddress
*/
if len(getIng.Spec.Rules) > 0 && getIng.Spec.Rules[0].Host != "" {
IPAddress = getIng.Spec.Rules[0].Host
} else if len(getIng.Status.LoadBalancer.Ingress) > 0 && getIng.Status.LoadBalancer.Ingress[0].IP != "" {
IPAddress = getIng.Status.LoadBalancer.Ingress[0].IP
} else if len(getIng.Status.LoadBalancer.Ingress) > 0 && getIng.Status.LoadBalancer.Ingress[0].Hostname != "" {
IPAddress = getIng.Status.LoadBalancer.Ingress[0].Hostname
} else {
return "", errors.New("IP Address or HostName not generated")
}
if IPAddress == "" {
return "", errors.New("IP Address or Hostname is not available in the ingress of " + IngressName)
}
for _, rule := range getIng.Spec.Rules {
for _, path := range rule.HTTP.Paths {
if path.Backend.Service.Name == ServerServiceName {
f := func(c rune) bool {
return c == '/'
}
path_arr := strings.FieldsFunc(path.Path, f)
if len(path_arr) > 0 {
if path_arr[len(path_arr)-1] == "(.*)" {
path_arr[len(path_arr)-1] = "query"
} else {
path_arr = append(path_arr, "query")
}
} else {
path_arr = append(path_arr, "query")
}
IngressPath = strings.Join(path_arr[:], "/")
}
}
}
if len(getIng.Spec.TLS) > 0 {
Scheme = "https"
} else {
Scheme = "http"
}
FinalUrl = Scheme + "://" + wrapIPV6(IPAddress) + "/" + IngressPath
} else if Ingress == "false" || Ingress == "" {
exp := strings.ToLower(string(svc.Spec.Type))
switch exp {
case "loadbalancer":
if len(svc.Status.LoadBalancer.Ingress) > 0 {
if svc.Status.LoadBalancer.Ingress[0].Hostname != "" {
IPAddress = svc.Status.LoadBalancer.Ingress[0].Hostname
} else if svc.Status.LoadBalancer.Ingress[0].IP != "" {
IPAddress = svc.Status.LoadBalancer.Ingress[0].IP
} else {
return "", errors.New("LoadBalancerIP/Hostname not present for loadbalancer service type")
}
} else {
return "", errors.New("LoadBalancerIP/Hostname not present for loadbalancer service type")
}
FinalUrl = "http://" + wrapIPV6(IPAddress) + ":" + strconv.Itoa(int(Port)) + "/query"
case "nodeport":
// Cannot fetch Node Ip Address when ChaosCenter is installed in Namespaced scope
if portalScope == "namespace" {
return "", errors.New("Cannot get NodeIP in namespaced mode")
}
nodeIP, err := clientset.CoreV1().Nodes().Get(ctx, NodeName, metaV1.GetOptions{})
if err != nil {
return "", err
}
for _, addr := range nodeIP.Status.Addresses {
if strings.ToLower(string(addr.Type)) == "externalip" && addr.Address != "" {
IPAddress = addr.Address
} else if strings.ToLower(string(addr.Type)) == "internalip" && addr.Address != "" {
InternalIP = addr.Address
}
}
// Whichever one of External IP and Internal IP is present, that will be selected for Server Endpoint
if IPAddress != "" {
FinalUrl = "http://" + wrapIPV6(IPAddress) + ":" + strconv.Itoa(int(NodePort)) + "/query"
} else if InternalIP != "" {
FinalUrl = "http://" + wrapIPV6(InternalIP) + ":" + strconv.Itoa(int(NodePort)) + "/query"
} else {
return "", errors.New("Both ExternalIP and InternalIP aren't present for NodePort service type")
}
case "clusterip":
log.Print("External agents can't be connected to the server if the service type is set to ClusterIP\n")
if svc.Spec.ClusterIP == "" {
return "", errors.New("ClusterIP is not present")
}
FinalUrl = "http://" + wrapIPV6(svc.Spec.ClusterIP) + ":" + strconv.Itoa(int(Port)) + "/query"
default:
return "", errors.New("No service type found")
}
} else {
return "", errors.New("Ingress value is not correct")
}
log.Print("Server endpoint: ", FinalUrl)
return FinalUrl, nil
}
func wrapIPV6(addr string) string {
if strings.Count(addr, ":") > 0 {
return "[" + addr + "]"
}
return addr
}
func GetTLSCert(secretName string) (string, error) {
clientset, err := GetGenericK8sClient()
if err != nil {
return "", err
}
secret, err := clientset.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Get(context.Background(), secretName, metaV1.GetOptions{})
if err != nil {
return "", err
}
if cert, ok := secret.Data["tls.crt"]; ok {
return base64.StdEncoding.EncodeToString(cert), nil
}
return "", fmt.Errorf("could not find tls.crt value in provided TLS Secret %v", secretName)
}

View File

@ -8,10 +8,8 @@ type Configuration struct {
Version string `required:"true"`
InfraDeployments string `required:"true" split_words:"true"`
DbServer string `required:"true" split_words:"true"`
LitmusPortalNamespace string `required:"true" split_words:"true"`
DbUser string `required:"true" split_words:"true"`
DbPassword string `required:"true" split_words:"true"`
ChaosCenterScope string `required:"true" split_words:"true"`
SubscriberImage string `required:"true" split_words:"true"`
EventTrackerImage string `required:"true" split_words:"true"`
ArgoWorkflowControllerImage string `required:"true" split_words:"true"`
@ -21,20 +19,13 @@ type Configuration struct {
LitmusChaosExporterImage string `required:"true" split_words:"true"`
ContainerRuntimeExecutor string `required:"true" split_words:"true"`
WorkflowHelperImageVersion string `required:"true" split_words:"true"`
ServerServiceName string `split_words:"true"`
NodeName string `split_words:"true"`
Ingress string `split_words:"true"`
IngressName string `split_words:"true"`
ChaosCenterUiEndpoint string `split_words:"true" default:"localhost:8080"`
ChaosCenterUiEndpoint string `split_words:"true" default:"https://localhost:8080"`
TlsCertB64 string `split_words:"true"`
TlsSecretName string `split_words:"true"`
LitmusAuthGrpcEndpoint string `split_words:"true" default:"localhost"`
LitmusAuthGrpcPort string `split_words:"true" default:":3030"`
KubeConfigFilePath string `split_words:"true"`
RemoteHubMaxSize string `split_words:"true"`
SkipSslVerify string `split_words:"true"`
SelfInfraNodeSelector string `split_words:"true"`
SelfInfraTolerations string `split_words:"true"`
HttpPort string `split_words:"true" default:"8080"`
RpcPort string `split_words:"true" default:"8000"`
InfraCompatibleVersions string `required:"true" split_words:"true"`

View File

@ -1,414 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: litmus-server-cr
rules:
- apiGroups: [networking.k8s.io, extensions]
resources: [ingresses]
verbs: [get]
- apiGroups: [""]
resources: [services, nodes, pods/log] # Will have to remove this for namespaced scope
verbs: [get, watch]
- apiGroups: [""] # To get TLS Cert from secrets incase of cluster scope
resources: [secrets]
verbs: [get]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: litmus-server-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: litmus-server-cr
subjects:
- kind: ServiceAccount
name: litmus-server-account
namespace: litmus
## Control plane manifests
---
apiVersion: v1
kind: Namespace
metadata:
name: litmus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: litmus-server-account
namespace: litmus
---
apiVersion: v1
kind: Secret
metadata:
name: litmus-portal-admin-secret
namespace: litmus
stringData:
JWT_SECRET: "litmus-portal@123"
DB_USER: "root"
DB_PASSWORD: "1234"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmus-portal-admin-config
namespace: litmus
data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "ci"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmusportal-frontend-nginx-configuration
namespace: litmus
data:
nginx.conf: |
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
include /etc/nginx/mime.types;
gzip on;
gzip_disable "msie6";
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
server {
listen 8185 default_server;
root /opt/chaos;
location /health {
return 200;
}
location / {
proxy_http_version 1.1;
add_header Cache-Control "no-cache";
try_files $uri /index.html;
autoindex on;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location /auth/ {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "http://litmusportal-auth-server-service:9003/";
}
location /api/ {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "http://litmusportal-server-service:9002/";
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-frontend
namespace: litmus
labels:
component: litmusportal-frontend
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-frontend
template:
metadata:
labels:
component: litmusportal-frontend
spec:
automountServiceAccountToken: false
containers:
- name: litmusportal-frontend
image: litmuschaos/litmusportal-frontend:ci
imagePullPolicy: Always
# securityContext:
# runAsUser: 2000
# allowPrivilegeEscalation: false
# runAsNonRoot: true
ports:
- containerPort: 8185
resources:
requests:
memory: "150Mi"
cpu: "125m"
ephemeral-storage: "500Mi"
limits:
memory: "512Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: nginx-config
configMap:
name: litmusportal-frontend-nginx-configuration
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-frontend-service
namespace: litmus
spec:
type: NodePort
ports:
- name: http
port: 9091
targetPort: 8185
selector:
component: litmusportal-frontend
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-server
namespace: litmus
labels:
component: litmusportal-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-server
template:
metadata:
labels:
component: litmusportal-server
spec:
volumes:
- name: gitops-storage
emptyDir: {}
- name: hub-storage
emptyDir: {}
containers:
- name: graphql-server
image: litmuschaos/litmusportal-server:ci
volumeMounts:
- mountPath: /tmp/
name: gitops-storage
- mountPath: /tmp/version
name: hub-storage
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
# if self-signed certificate are used pass the k8s tls secret name created in portal ns, to allow agents to use tls for communication
- name: TLS_SECRET_NAME
value: ""
- name: LITMUS_PORTAL_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CHAOS_CENTER_SCOPE
value: "cluster"
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: SUBSCRIBER_IMAGE
value: "litmuschaos/litmusportal-subscriber:ci"
- name: EVENT_TRACKER_IMAGE
value: "litmuschaos/litmusportal-event-tracker:ci"
- name: ARGO_WORKFLOW_CONTROLLER_IMAGE
value: "litmuschaos/workflow-controller:v3.3.1"
- name: ARGO_WORKFLOW_EXECUTOR_IMAGE
value: "litmuschaos/argoexec:v3.3.1"
- name: LITMUS_CHAOS_OPERATOR_IMAGE
value: "litmuschaos/chaos-operator:ci"
- name: LITMUS_CHAOS_RUNNER_IMAGE
value: "litmuschaos/chaos-runner:ci"
- name: LITMUS_CHAOS_EXPORTER_IMAGE
value: "litmuschaos/chaos-exporter:ci"
- name: SERVER_SERVICE_NAME
value: "litmusportal-server-service"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: INGRESS
value: "false"
- name: INGRESS_NAME
value: "litmus-ingress"
- name: CONTAINER_RUNTIME_EXECUTOR
value: "k8sapi"
- name: DEFAULT_HUB_BRANCH_NAME
value: "master"
- name: LITMUS_AUTH_GRPC_ENDPOINT
value: "litmusportal-auth-server-service"
- name: LITMUS_AUTH_GRPC_PORT
value: ":3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.7.0"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["ci"]'
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
ports:
- containerPort: 8080
- containerPort: 8000
imagePullPolicy: Always
resources:
requests:
memory: "250Mi"
cpu: "225m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
serviceAccountName: litmus-server-account
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-server-service
namespace: litmus
spec:
type: NodePort
ports:
- name: graphql-server
port: 9002
targetPort: 8080
- name: graphql-rpc-server
port: 8000
targetPort: 8000
selector:
component: litmusportal-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-auth-server
namespace: litmus
labels:
component: litmusportal-auth-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-auth-server
template:
metadata:
labels:
component: litmusportal-auth-server
spec:
automountServiceAccountToken: false
containers:
- name: auth-server
image: litmuschaos/litmusportal-auth-server:ci
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
- name: STRICT_PASSWORD_POLICY
value: "false"
- name: ADMIN_USERNAME
value: "admin"
- name: ADMIN_PASSWORD
value: "litmus"
- name: LITMUS_GQL_GRPC_ENDPOINT
value: "litmusportal-server-service"
- name: LITMUS_GQL_GRPC_PORT
value: ":8000"
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-auth-server-service(:[0-9]+|)?
resources:
requests:
memory: "250Mi"
cpu: "225m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
ports:
- containerPort: 3000
- containerPort: 3030
imagePullPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-auth-server-service
namespace: litmus
spec:
type: NodePort
ports:
- name: auth-server
port: 9003
targetPort: 3000
- name: auth-rpc-server
port: 3030
targetPort: 3030
selector:
component: litmusportal-auth-server

View File

@ -1,33 +1,4 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: litmus-server-role
rules:
- apiGroups: [networking.k8s.io, extensions]
resources: [ingresses]
verbs: [get]
- apiGroups: [""]
resources: [services, pods/log]
verbs: [get, watch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: litmus-server-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: litmus-server-role
subjects:
- kind: ServiceAccount
name: litmus-server-account
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: litmus-server-account
---
apiVersion: v1
kind: Secret
metadata:
@ -45,6 +16,13 @@ data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "ci"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
@ -203,6 +181,7 @@ spec:
labels:
component: litmusportal-server
spec:
automountServiceAccountToken: false
volumes:
- name: gitops-storage
emptyDir: {}
@ -227,25 +206,13 @@ spec:
- secretRef:
name: litmus-portal-admin-secret
env:
- name: LITMUS_PORTAL_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INFRA_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# if self-signed certificate are used pass the base64 tls certificate, to allow agents to use tls for communication
- name: TLS_CERT_B64
value: ""
- name: CHAOS_CENTER_SCOPE
value: "namespace"
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: SERVER_SERVICE_NAME
value: "litmusportal-server-service"
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: SUBSCRIBER_IMAGE
@ -271,17 +238,13 @@ spec:
- name: LITMUS_AUTH_GRPC_PORT
value: ":3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.7.0"
value: "ci"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INGRESS
value: "false"
- name: INGRESS_NAME
value: "litmus-ingress"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["ci"]'
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
value: ".*"
ports:
- containerPort: 8080
- containerPort: 8000
@ -295,7 +258,6 @@ spec:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
serviceAccountName: litmus-server-account
---
apiVersion: v1
kind: Service
@ -355,7 +317,7 @@ spec:
- name: LITMUS_GQL_GRPC_PORT
value: ":8000"
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-auth-server-service(:[0-9]+|)?
value: ".*"
ports:
- containerPort: 3000
- containerPort: 3030

View File

@ -1,46 +1,8 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: litmus-server-cr
rules:
- apiGroups: [networking.k8s.io, extensions]
resources: [ingresses]
verbs: [get]
- apiGroups: [""]
resources: [services, nodes, pods/log]
verbs: [get, watch]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: litmus-server-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: litmus-server-cr
subjects:
- kind: ServiceAccount
name: litmus-server-account
namespace: litmus
## Control plane manifests
---
apiVersion: v1
kind: Namespace
metadata:
name: litmus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: litmus-server-account
namespace: litmus
---
apiVersion: v1
kind: Secret
metadata:
name: litmus-portal-admin-secret
namespace: litmus
stringData:
JWT_SECRET: "litmus-portal@123"
DB_USER: "root"
@ -50,24 +12,15 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: litmus-portal-admin-config
namespace: litmus
data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "ci"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmusportal-frontend-nginx-configuration
namespace: litmus
data:
nginx.conf: |
pid /tmp/nginx.pid;
@ -151,7 +104,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-frontend
namespace: litmus
labels:
component: litmusportal-frontend
spec:
@ -168,11 +120,11 @@ spec:
containers:
- name: litmusportal-frontend
image: litmuschaos/litmusportal-frontend:ci
imagePullPolicy: Always
# securityContext:
# runAsUser: 2000
# allowPrivilegeEscalation: false
# runAsNonRoot: true
imagePullPolicy: Always
ports:
- containerPort: 8185
volumeMounts:
@ -188,7 +140,6 @@ apiVersion: v1
kind: Service
metadata:
name: litmusportal-frontend-service
namespace: litmus
spec:
type: NodePort
ports:
@ -202,7 +153,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-server
namespace: litmus
labels:
component: litmusportal-server
spec:
@ -215,6 +165,7 @@ spec:
labels:
component: litmusportal-server
spec:
automountServiceAccountToken: false
volumes:
- name: gitops-storage
emptyDir: {}
@ -239,17 +190,15 @@ spec:
- secretRef:
name: litmus-portal-admin-secret
env:
# if self-signed certificate are used pass the k8s tls secret name created in portal ns, to allow agents to use tls for communication
- name: TLS_SECRET_NAME
# if self-signed certificate are used pass the base64 tls certificate, to allow agents to use tls for communication
- name: TLS_CERT_B64
value: ""
- name: LITMUS_PORTAL_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CHAOS_CENTER_SCOPE
value: "cluster"
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: SUBSCRIBER_IMAGE
value: "litmuschaos/litmusportal-subscriber:ci"
- name: EVENT_TRACKER_IMAGE
@ -264,20 +213,6 @@ spec:
value: "litmuschaos/chaos-runner:ci"
- name: LITMUS_CHAOS_EXPORTER_IMAGE
value: "litmuschaos/chaos-exporter:ci"
- name: SERVER_SERVICE_NAME
value: "litmusportal-server-service"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: INGRESS
value: "false"
- name: INGRESS_NAME
value: "litmus-ingress"
- name: CONTAINER_RUNTIME_EXECUTOR
value: "k8sapi"
- name: DEFAULT_HUB_BRANCH_NAME
@ -287,24 +222,22 @@ spec:
- name: LITMUS_AUTH_GRPC_PORT
value: ":3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.7.0"
value: "ci"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["ci"]'
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
value: ".*"
ports:
- containerPort: 8080
- containerPort: 8000
imagePullPolicy: Always
serviceAccountName: litmus-server-account
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-server-service
namespace: litmus
spec:
type: NodePort
ports:
@ -321,7 +254,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-auth-server
namespace: litmus
labels:
component: litmusportal-auth-server
spec:
@ -360,7 +292,7 @@ spec:
- name: LITMUS_GQL_GRPC_PORT
value: ":8000"
- name: ALLOWED_ORIGINS
value: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-auth-server-service(:[0-9]+|)?
value: ".*"
ports:
- containerPort: 3000
- containerPort: 3030
@ -370,7 +302,6 @@ apiVersion: v1
kind: Service
metadata:
name: litmusportal-auth-server-service
namespace: litmus
spec:
type: NodePort
ports: