Merge branch 'master' into infra-prom-collector-2

Signed-off-by: Bruno Ledesma <led.bruno@gmail.com>
This commit is contained in:
Bruno Ledesma 2024-09-12 15:39:11 -03:00 committed by GitHub
commit 2b4a5c1939
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
82 changed files with 13808 additions and 461 deletions

View File

@ -2,6 +2,7 @@ name: Litmus-CI
on:
issue_comment:
types: [created]
push:
branches:
- master
@ -16,9 +17,7 @@ jobs:
- uses: octokit/request-action@v2.x
id: get_PR_commits
with:
route: GET /repos/:repo/pulls/:pull_number/commits
repo: ${{ github.repository }}
pull_number: ${{ github.event.issue.number }}
route: GET /repos/${{ github.repository }}/pull_number/${{ github.event.issue.number }}/commits
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -217,3 +216,4 @@ jobs:
- name: Deleting KinD cluster
if: always()
run: kind delete cluster

View File

@ -156,7 +156,7 @@ Litmus is licensed under the Apache License, Version 2.0. See [LICENSE](./LICENS
Litmus Chaos is part of the CNCF Projects.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/?selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/?selected=litmus)
## Important Links
@ -165,5 +165,5 @@ Litmus Chaos is part of the CNCF Projects.
</a>
<br>
<a href="https://landscape.cncf.io/?selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -0,0 +1,11 @@
# Wingie Enuygun Company
[Wingie Enuygun Company](https://www.wingie.com/) is a leading travel and technology company providing seamless travel solutions across various platforms.
## Why do we use Litmus
We use Litmus to identify bottlenecks in our systems, detect issues early, and foresee potential errors. This allows us to take proactive measures and maintain the resilience and performance of our infrastructure.
## How do we use Litmus
Litmus is integrated into our QA cycles, where it plays a crucial role in catching bugs and verifying the overall resilience of our systems.
## Benefits in using Litmus
Litmus chaos experiments are straightforward to implement and can be easily customized or extended to meet our specific requirements, enabling us to effectively manage and optimize our systems at Wingie Enuygun.

View File

@ -263,7 +263,7 @@ func InviteUsers(service services.ApplicationService) gin.HandlerFunc {
// @Failure 400 {object} response.ErrInvalidRequest
// @Failure 400 {object} response.ErrUserNotFound
// @Failure 400 {object} response.ErrUserDeactivated
// @Failure 400 {object} response.ErrInvalidCredentials
// @Failure 401 {object} response.ErrInvalidCredentials
// @Failure 500 {object} response.ErrServerError
// @Success 200 {object} response.LoginResponse{}
// @Router /login [post]

View File

@ -12,10 +12,10 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
github.com/swaggo/swag v1.16.3
go.mongodb.org/mongo-driver v1.15.1
golang.org/x/crypto v0.25.0
golang.org/x/oauth2 v0.20.0
google.golang.org/grpc v1.65.0
go.mongodb.org/mongo-driver v1.16.1
golang.org/x/crypto v0.26.0
golang.org/x/oauth2 v0.21.0
google.golang.org/grpc v1.66.0
google.golang.org/protobuf v1.34.2
)
@ -36,7 +36,7 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.0 // indirect
@ -46,7 +46,7 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
@ -58,12 +58,12 @@ require (
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@ -44,8 +44,8 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -79,8 +79,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -119,15 +119,15 @@ github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gi
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.15.1 h1:l+RvoUOoMXFmADTLfYDm7On9dRm7p4T80/lEQM+r7HU=
go.mongodb.org/mongo-driver v1.15.1/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
@ -137,16 +137,16 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -156,16 +156,16 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@ -174,10 +174,10 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -16,7 +16,7 @@ func SanitizeString(input string) string {
/*
ValidateStrictPassword represents and checks for the following patterns:
- Input is at least 8 characters long and at most 16 characters long
- Input contains at least one special character of these @$!%*?_&
- Input contains at least one special character of these @$!%*?_&#
- Input contains at least one digit
- Input contains at least one uppercase alphabet
- Input contains at least one lowercase alphabet
@ -33,7 +33,7 @@ func ValidateStrictPassword(input string) error {
digits := `[0-9]{1}`
lowerAlphabets := `[a-z]{1}`
capitalAlphabets := `[A-Z]{1}`
specialCharacters := `[@$!%*?_&]{1}`
specialCharacters := `[@$!%*?_&#]{1}`
if b, err := regexp.MatchString(digits, input); !b || err != nil {
return fmt.Errorf("password does not contain digits")
}

View File

@ -358,11 +358,11 @@ type KubeObjectResponse {
"""
Type of the Kubernetes object
"""
kubeObj: [KubeObject]!
kubeObj: KubeObject!
}
"""
KubeObject consists of the namespace and the available resources in the same
KubeObject consists of the available resources in a namespace
"""
type KubeObject {
"""
@ -404,16 +404,75 @@ input KubeObjectRequest {
GVR Request
"""
kubeObjRequest: KubeGVRRequest
"""
Namespace in which the Kubernetes object is present
"""
namespace: String!
objectType: String!
workloads: [Workload]
}
"""
Defines details for fetching Kubernetes namespace data
"""
input KubeNamespaceRequest {
"""
ID of the infra
"""
infraID: ID!
}
"""
Define name in the infra (not really useful at the moment but maybe we will need other field later)
"""
type KubeNamespace{
"""
Name of the namespace
"""
name: String!
}
input KubeGVRRequest {
group: String!
version: String!
resource: String!
}
"""
Response received for querying Kubernetes Namespaces
"""
type KubeNamespaceResponse {
"""
ID of the infra in which the Kubernetes namespace is present
"""
infraID: ID!
"""
List of the Kubernetes namespace
"""
kubeNamespace: [KubeNamespace]!
}
"""
Defines the details of Kubernetes namespace
"""
input KubeNamespaceData {
"""
Unique request ID for fetching Kubernetes namespace details
"""
requestID: ID!
"""
ID of the infra in which the Kubernetes namespace is present
"""
infraID: InfraIdentity!
"""
List of KubeNamespace return by subscriber
"""
kubeNamespace: String!
}
"""
Defines the details of Kubernetes object
"""
@ -638,6 +697,12 @@ extend type Mutation {
"""
# authorized directive not required
kubeObj(request: KubeObjectData!): String!
"""
Receives kubernetes namespace data from subscriber
"""
# authorized directive not required
kubeNamespace(request: KubeNamespaceData!): String!
}
extend type Subscription {
@ -663,4 +728,9 @@ extend type Subscription {
Returns a kubernetes object given an input
"""
getKubeObject(request: KubeObjectRequest!): KubeObjectResponse!
"""
Returns a kubernetes namespaces given an input
"""
getKubeNamespace(request: KubeNamespaceRequest!): KubeNamespaceResponse!
}

View File

@ -31,6 +31,10 @@ type ChaosHub implements ResourceDetails & Audit {
"""
repoBranch: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
"""
ID of the project in which the chaos hub is present
"""
projectID: ID!
@ -206,6 +210,10 @@ type ChaosHubStatus implements ResourceDetails & Audit {
"""
repoBranch: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
"""
Bool value indicating whether the hub is available or not.
"""
isAvailable: Boolean!
@ -320,6 +328,10 @@ input CreateChaosHubRequest {
"""
repoBranch: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
"""
Bool value indicating whether the hub is private or not.
"""
isPrivate: Boolean!
@ -382,6 +394,10 @@ input CloningInput {
"""
repoURL: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
"""
Bool value indicating whether the hub is private or not.
"""
isPrivate: Boolean!
@ -426,6 +442,10 @@ input CreateRemoteChaosHub {
URL of the git repository
"""
repoURL: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
}
@ -455,6 +475,10 @@ input UpdateChaosHubRequest {
"""
repoBranch: String!
"""
Connected Hub of remote repository
"""
remoteHub: String!
"""
Bool value indicating whether the hub is private or not.
"""
isPrivate: Boolean!

View File

@ -3,7 +3,7 @@ module github.com/litmuschaos/litmus/chaoscenter/graphql/server
go 1.22.0
require (
github.com/99designs/gqlgen v0.17.47
github.com/99designs/gqlgen v0.17.49
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
github.com/argoproj/argo-workflows/v3 v3.3.5
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
@ -21,10 +21,10 @@ require (
github.com/prometheus/client_golang v1.12.1
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
github.com/tidwall/gjson v1.17.1
github.com/tidwall/gjson v1.17.3
github.com/tidwall/sjson v1.2.5
github.com/vektah/gqlparser/v2 v2.5.12
go.mongodb.org/mongo-driver v1.15.0
github.com/vektah/gqlparser/v2 v2.5.16
go.mongodb.org/mongo-driver v1.16.1
golang.org/x/crypto v0.24.0
google.golang.org/grpc v1.64.1
google.golang.org/protobuf v1.34.2
@ -68,7 +68,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
@ -86,7 +86,7 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
@ -107,7 +107,7 @@ require (
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sync v0.7.0 // indirect
@ -115,7 +115,7 @@ require (
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

View File

@ -43,8 +43,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/99designs/gqlgen v0.17.47 h1:M9DTK8X3+3ATNBfZlHBwMwNngn4hhZWDxNmTiuQU5tQ=
github.com/99designs/gqlgen v0.17.47/go.mod h1:ejVkldSdtmuudqmtfaiqjwlGXWAhIv0DKXGXFY25F04=
github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ=
github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -548,8 +548,9 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
@ -878,8 +879,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
@ -1128,8 +1129,8 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U=
github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94=
github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@ -1157,8 +1158,8 @@ github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk
github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vektah/gqlparser/v2 v2.5.12 h1:COMhVVnql6RoaF7+aTBWiTADdpLGyZWU3K/NwW0ph98=
github.com/vektah/gqlparser/v2 v2.5.12/go.mod h1:WQQjFc+I1YIzoPvZBhUQX7waZgg3pMLi0r8KymvAE2w=
github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8=
github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
@ -1207,8 +1208,8 @@ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -1307,8 +1308,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1631,8 +1632,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -29,7 +29,7 @@ func (r *mutationResolver) RunChaosExperiment(ctx context.Context, experimentID
logrus.WithFields(logFields).Info("request received to run chaos experiment")
err := authorization.ValidateRole(ctx, projectID,
authorization.MutationRbacRules[authorization.CreateChaosExperiment],
authorization.MutationRbacRules[authorization.ReRunChaosExperiment],
model.InvitationAccepted.String())
if err != nil {
return nil, err

View File

@ -116,6 +116,11 @@ func (r *mutationResolver) KubeObj(ctx context.Context, request model.KubeObject
return r.chaosInfrastructureService.KubeObj(request, *data_store.Store)
}
// KubeNamespace is the resolver for the kubeNamespace field.
func (r *mutationResolver) KubeNamespace(ctx context.Context, request model.KubeNamespaceData) (string, error) {
return r.chaosInfrastructureService.KubeNamespace(request, *data_store.Store)
}
// GetInfra is the resolver for the getInfra field.
func (r *queryResolver) GetInfra(ctx context.Context, projectID string, infraID string) (*model.Infra, error) {
logFields := logrus.Fields{
@ -270,8 +275,10 @@ func (r *subscriptionResolver) InfraConnect(ctx context.Context, request model.I
return infraAction, err
}
data_store.Store.Mutex.Lock()
if _, ok := data_store.Store.ConnectedInfra[request.InfraID]; ok {
if infra_channel, ok := data_store.Store.ConnectedInfra[request.InfraID]; ok {
data_store.Store.Mutex.Unlock()
logrus.Print("ALREADY CONNECTED, FORCED DISCONNECT: ", request.InfraID)
close(infra_channel)
return infraAction, errors.New("CLUSTER ALREADY CONNECTED")
}
data_store.Store.ConnectedInfra[request.InfraID] = infraAction
@ -279,7 +286,7 @@ func (r *subscriptionResolver) InfraConnect(ctx context.Context, request model.I
go func() {
<-ctx.Done()
verifiedInfra.IsActive = false
logrus.Print("Context Done, will handle disconnection for: ", request.InfraID)
newVerifiedInfra := model.Infra{}
copier.Copy(&newVerifiedInfra, &verifiedInfra)
@ -348,6 +355,24 @@ func (r *subscriptionResolver) GetKubeObject(ctx context.Context, request model.
return kubeObjData, nil
}
// GetKubeNamespace is the resolver for the getKubeNamespace field.
func (r *subscriptionResolver) GetKubeNamespace(ctx context.Context, request model.KubeNamespaceRequest) (<-chan *model.KubeNamespaceResponse, error) {
logrus.Print("NEW NAMESPACE REQUEST", request.InfraID)
kubeNamespaceData := make(chan *model.KubeNamespaceResponse)
reqID := uuid.New()
data_store.Store.Mutex.Lock()
data_store.Store.KubeNamespaceData[reqID.String()] = kubeNamespaceData
data_store.Store.Mutex.Unlock()
go func() {
<-ctx.Done()
logrus.Println("Closed KubeNamespace Listener")
delete(data_store.Store.KubeNamespaceData, reqID.String())
}()
go r.chaosExperimentHandler.GetKubeNamespaceData(reqID.String(), request, *data_store.Store)
return kubeNamespaceData, nil
}
// Subscription returns generated.SubscriptionResolver implementation.
func (r *Resolver) Subscription() generated.SubscriptionResolver { return &subscriptionResolver{r} }

File diff suppressed because it is too large Load Diff

View File

@ -138,6 +138,8 @@ type ChaosHub struct {
RepoURL string `json:"repoURL"`
// Branch of the git repository
RepoBranch string `json:"repoBranch"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
// ID of the project in which the chaos hub is present
ProjectID string `json:"projectID"`
// Default Hub Identifier
@ -213,6 +215,8 @@ type ChaosHubStatus struct {
RepoURL string `json:"repoURL"`
// Branch of the git repository
RepoBranch string `json:"repoBranch"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
// Bool value indicating whether the hub is available or not.
IsAvailable bool `json:"isAvailable"`
// Total number of experiments in the hub
@ -292,6 +296,8 @@ type CloningInput struct {
RepoBranch string `json:"repoBranch"`
// URL of the git repository
RepoURL string `json:"repoURL"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
// Bool value indicating whether the hub is private or not.
IsPrivate bool `json:"isPrivate"`
// Type of authentication used: BASIC, SSH, TOKEN
@ -344,6 +350,8 @@ type CreateChaosHubRequest struct {
RepoURL string `json:"repoURL"`
// Branch of the git repository
RepoBranch string `json:"repoBranch"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
// Bool value indicating whether the hub is private or not.
IsPrivate bool `json:"isPrivate"`
// Type of authentication used: BASIC, SSH, TOKEN
@ -377,6 +385,8 @@ type CreateRemoteChaosHub struct {
Description *string `json:"description,omitempty"`
// URL of the git repository
RepoURL string `json:"repoURL"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
}
// Defines the start date and end date for the filtering the data
@ -1149,7 +1159,37 @@ type KubeGVRRequest struct {
Resource string `json:"resource"`
}
// KubeObject consists of the namespace and the available resources in the same
// Define name in the infra (not really useful at the moment but maybe we will need other field later)
type KubeNamespace struct {
// Name of the namespace
Name string `json:"name"`
}
// Defines the details of Kubernetes namespace
type KubeNamespaceData struct {
// Unique request ID for fetching Kubernetes namespace details
RequestID string `json:"requestID"`
// ID of the infra in which the Kubernetes namespace is present
InfraID *InfraIdentity `json:"infraID"`
// List of KubeNamespace return by subscriber
KubeNamespace string `json:"kubeNamespace"`
}
// Defines details for fetching Kubernetes namespace data
type KubeNamespaceRequest struct {
// ID of the infra
InfraID string `json:"infraID"`
}
// Response received for querying Kubernetes Namespaces
type KubeNamespaceResponse struct {
// ID of the infra in which the Kubernetes namespace is present
InfraID string `json:"infraID"`
// List of the Kubernetes namespace
KubeNamespace []*KubeNamespace `json:"kubeNamespace"`
}
// KubeObject consists of the available resources in a namespace
type KubeObject struct {
// Namespace of the resource
Namespace string `json:"namespace"`
@ -1173,8 +1213,10 @@ type KubeObjectRequest struct {
InfraID string `json:"infraID"`
// GVR Request
KubeObjRequest *KubeGVRRequest `json:"kubeObjRequest,omitempty"`
ObjectType string `json:"objectType"`
Workloads []*Workload `json:"workloads,omitempty"`
// Namespace in which the Kubernetes object is present
Namespace string `json:"namespace"`
ObjectType string `json:"objectType"`
Workloads []*Workload `json:"workloads,omitempty"`
}
// Response received for querying Kubernetes Object
@ -1182,7 +1224,7 @@ type KubeObjectResponse struct {
// ID of the infra in which the Kubernetes object is present
InfraID string `json:"infraID"`
// Type of the Kubernetes object
KubeObj []*KubeObject `json:"kubeObj"`
KubeObj *KubeObject `json:"kubeObj"`
}
// Defines the CMD probe properties
@ -1945,6 +1987,8 @@ type UpdateChaosHubRequest struct {
RepoURL string `json:"repoURL"`
// Branch of the git repository
RepoBranch string `json:"repoBranch"`
// Connected Hub of remote repository
RemoteHub string `json:"remoteHub"`
// Bool value indicating whether the hub is private or not.
IsPrivate bool `json:"isPrivate"`
// Type of authentication used: BASIC, SSH, TOKEN

View File

@ -1234,7 +1234,33 @@ func (c *ChaosExperimentHandler) GetKubeObjData(reqID string, kubeObject model.K
} else if reqChan, ok := r.KubeObjectData[reqID]; ok {
resp := model.KubeObjectResponse{
InfraID: kubeObject.InfraID,
KubeObj: []*model.KubeObject{},
KubeObj: &model.KubeObject{},
}
reqChan <- &resp
close(reqChan)
}
}
func (c *ChaosExperimentHandler) GetKubeNamespaceData(reqID string, kubeNamespace model.KubeNamespaceRequest, r store.StateData) {
reqType := "namespace"
data, err := json.Marshal(kubeNamespace)
if err != nil {
logrus.Print("ERROR WHILE MARSHALLING POD DETAILS")
}
externalData := string(data)
payload := model.InfraActionResponse{
Action: &model.ActionPayload{
RequestID: reqID,
RequestType: reqType,
ExternalData: &externalData,
},
}
if clusterChan, ok := r.ConnectedInfra[kubeNamespace.InfraID]; ok {
clusterChan <- &payload
} else if reqChan, ok := r.KubeNamespaceData[reqID]; ok {
resp := model.KubeNamespaceResponse{
InfraID: kubeNamespace.InfraID,
KubeNamespace: []*model.KubeNamespace{},
}
reqChan <- &resp
close(reqChan)

View File

@ -0,0 +1,145 @@
package fuzz_tests
import (
"context"
"testing"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment_run"
dbChaosExperiment "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment"
dbChaosExperimentRun "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment_run"
dbChaosInfra "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure"
dbMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/mocks"
fuzz "github.com/AdaLogics/go-fuzz-headers"
store "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/data-store"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb"
"github.com/stretchr/testify/mock"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
type MockServices struct {
ChaosExperimentOperator *dbChaosExperiment.Operator
ChaosExperimentRunOperator *dbChaosExperimentRun.Operator
ChaosInfrastructureOperator *dbChaosInfra.Operator
MongodbOperator *dbMocks.MongoOperator
ChaosExperimentRunService chaos_experiment_run.Service
}
func NewMockServices() *MockServices {
var (
mongodbMockOperator = new(dbMocks.MongoOperator)
chaosExperimentOperator = dbChaosExperiment.NewChaosExperimentOperator(mongodbMockOperator)
chaosExperimentRunOperator = dbChaosExperimentRun.NewChaosExperimentRunOperator(mongodbMockOperator)
chaosInfrastructureOperator = dbChaosInfra.NewInfrastructureOperator(mongodbMockOperator)
chaosExperimentRunService chaos_experiment_run.Service = chaos_experiment_run.NewChaosExperimentRunService(
chaosExperimentOperator,
chaosInfrastructureOperator,
chaosExperimentRunOperator,
)
)
return &MockServices{
ChaosExperimentOperator: chaosExperimentOperator,
ChaosExperimentRunOperator: chaosExperimentRunOperator,
ChaosInfrastructureOperator: chaosInfrastructureOperator,
MongodbOperator: mongodbMockOperator,
ChaosExperimentRunService: chaosExperimentRunService,
}
}
func FuzzProcessExperimentRunDelete(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
Query bson.D
WorkflowRunID *string
ExperimentRun dbChaosExperimentRun.ChaosExperimentRun
Workflow dbChaosExperiment.ChaosExperimentRequest
Username string
StoreStateData *store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.MongodbOperator.On("Update", mock.Anything, mongodb.ChaosExperimentRunsCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once()
err = mockServices.ChaosExperimentRunService.ProcessExperimentRunDelete(
context.Background(),
targetStruct.Query,
targetStruct.WorkflowRunID,
targetStruct.ExperimentRun,
targetStruct.Workflow,
targetStruct.Username,
targetStruct.StoreStateData,
)
if err != nil {
t.Errorf("ProcessExperimentRunDelete() error = %v", err)
}
})
}
func FuzzProcessExperimentRunStop(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
Query bson.D
ExperimentRunID *string
Experiment dbChaosExperiment.ChaosExperimentRequest
Username string
ProjectID string
StoreStateData *store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.MongodbOperator.On("Update", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once()
err = mockServices.ChaosExperimentRunService.ProcessExperimentRunStop(
context.Background(),
targetStruct.Query,
targetStruct.ExperimentRunID,
targetStruct.Experiment,
targetStruct.Username,
targetStruct.ProjectID,
targetStruct.StoreStateData,
)
if err != nil {
t.Errorf("ProcessExperimentRunStop() error = %v", err)
}
})
}
func FuzzProcessCompletedExperimentRun(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
ExecData chaos_experiment_run.ExecutionData
WfID string
RunID string
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
findResult := []interface{}{bson.D{
{Key: "experiment_id", Value: targetStruct.WfID},
}}
mockServices := NewMockServices()
singleResult := mongo.NewSingleResultFromDocument(findResult[0], nil, nil)
mockServices.MongodbOperator.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(singleResult, nil).Once()
_, err = mockServices.ChaosExperimentRunService.ProcessCompletedExperimentRun(
targetStruct.ExecData,
targetStruct.WfID,
targetStruct.RunID,
)
if err != nil {
t.Errorf("ProcessCompletedExperimentRun() error = %v", err)
}
})
}

View File

@ -0,0 +1,242 @@
package fuzz_tests
import (
"context"
"strings"
"testing"
"time"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment_run/handler"
chaosInfraMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_infrastructure/model/mocks"
dbChaosExperiment "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment"
dbChaosExperimentRun "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment_run"
dbMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/mocks"
dbGitOpsMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/gitops/model/mocks"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/google/uuid"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model"
typesMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment_run/model/mocks"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb"
"github.com/stretchr/testify/mock"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
type MockServices struct {
ChaosExperimentRunService *typesMocks.ChaosExperimentRunService
InfrastructureService *chaosInfraMocks.InfraService
GitOpsService *dbGitOpsMocks.GitOpsService
ChaosExperimentOperator *dbChaosExperiment.Operator
ChaosExperimentRunOperator *dbChaosExperimentRun.Operator
MongodbOperator *dbMocks.MongoOperator
ChaosExperimentRunHandler *handler.ChaosExperimentRunHandler
}
func NewMockServices() *MockServices {
var (
mongodbMockOperator = new(dbMocks.MongoOperator)
infrastructureService = new(chaosInfraMocks.InfraService)
gitOpsService = new(dbGitOpsMocks.GitOpsService)
chaosExperimentRunService = new(typesMocks.ChaosExperimentRunService)
chaosExperimentOperator = dbChaosExperiment.NewChaosExperimentOperator(mongodbMockOperator)
chaosExperimentRunOperator = dbChaosExperimentRun.NewChaosExperimentRunOperator(mongodbMockOperator)
)
var chaosExperimentRunHandler = handler.NewChaosExperimentRunHandler(
chaosExperimentRunService,
infrastructureService,
gitOpsService,
chaosExperimentOperator,
chaosExperimentRunOperator,
mongodbMockOperator,
)
return &MockServices{
ChaosExperimentRunService: chaosExperimentRunService,
InfrastructureService: infrastructureService,
GitOpsService: gitOpsService,
ChaosExperimentOperator: chaosExperimentOperator,
ChaosExperimentRunOperator: chaosExperimentRunOperator,
MongodbOperator: mongodbMockOperator,
ChaosExperimentRunHandler: chaosExperimentRunHandler,
}
}
func FuzzGetExperimentRun(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
ProjectID string
ExperimentRunID string
NotifyID string
}{}
targetStruct.ProjectID = uuid.New().String()
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
ctx := context.Background()
mockServices := NewMockServices()
findResult := []interface{}{bson.D{
{Key: "experiment_run_id", Value: targetStruct.ExperimentRunID},
{Key: "project_id", Value: targetStruct.ProjectID},
{Key: "infra_id", Value: "mockInfraID"},
{Key: "kubernetesInfraDetails", Value: bson.A{
bson.D{
{Key: "InfraID", Value: "mockInfraID"},
{Key: "Name", Value: "MockInfra"},
{Key: "EnvironmentID", Value: "mockEnvID"},
{Key: "Description", Value: "Mock Infrastructure"},
{Key: "PlatformName", Value: "Kubernetes"},
{Key: "IsActive", Value: true},
{Key: "UpdatedAt", Value: time.Now().Unix()},
{Key: "CreatedAt", Value: time.Now().Unix()},
},
}},
{Key: "experiment", Value: bson.A{
bson.D{
{Key: "ExperimentName", Value: "MockExperiment"},
{Key: "ExperimentType", Value: "MockType"},
{Key: "Revision", Value: bson.A{
bson.D{
{Key: "RevisionID", Value: uuid.NewString()},
{Key: "ExperimentManifest", Value: "mockManifest"},
{Key: "Weightages", Value: bson.A{
bson.D{{Key: "FaultName", Value: "fault1"}, {Key: "Weightage", Value: 10}},
bson.D{{Key: "FaultName", Value: "fault2"}, {Key: "Weightage", Value: 20}},
}},
},
}},
},
}},
}}
cursor, _ := mongo.NewCursorFromDocuments(findResult, nil, nil)
mockServices.MongodbOperator.On("Aggregate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(cursor, nil).Once()
res, err := mockServices.ChaosExperimentRunHandler.GetExperimentRun(ctx, targetStruct.ProjectID, &targetStruct.ExperimentRunID, &targetStruct.NotifyID)
if err != nil {
t.Errorf("ChaosExperimentRunHandler.GetExperimentRun() error = %v", err)
return
}
if res == nil {
t.Errorf("Returned response is nil")
}
})
}
func FuzzListExperimentRun(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
ProjectID string
Request model.ListExperimentRunRequest
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
findResult := []interface{}{bson.D{
{Key: "project_id", Value: targetStruct.ProjectID},
{Key: "infra_id", Value: "abc"},
{
Key: "revision", Value: []dbChaosExperiment.ExperimentRevision{
{
RevisionID: uuid.NewString(),
},
},
},
}}
cursor, _ := mongo.NewCursorFromDocuments(findResult, nil, nil)
mockServices.MongodbOperator.On("Aggregate", mock.Anything, mongodb.ChaosExperimentRunsCollection, mock.Anything, mock.Anything).Return(cursor, nil).Once()
res, err := mockServices.ChaosExperimentRunHandler.ListExperimentRun(targetStruct.ProjectID, targetStruct.Request)
if err != nil {
t.Errorf("ListExperimentRun() error = %v", err)
return
}
if res == nil {
t.Errorf("Returned response is nil")
}
})
}
func FuzzRunChaosWorkFlow(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
ProjectID string
Workflow dbChaosExperiment.ChaosExperimentRequest
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.MongodbOperator.On("StartSession").Return(mock.Anything, nil).Once()
mockServices.MongodbOperator.On("Update", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once()
mockServices.MongodbOperator.On("CommitTransaction", mock.Anything).Return(nil).Once()
mockServices.MongodbOperator.On("AbortTransaction", mock.Anything).Return(nil).Once()
findResult := []interface{}{bson.D{
{Key: "infra_id", Value: targetStruct.ProjectID},
}}
singleResult := mongo.NewSingleResultFromDocument(findResult[0], nil, nil)
mockServices.MongodbOperator.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(singleResult, nil).Once()
res, err := mockServices.ChaosExperimentRunHandler.RunChaosWorkFlow(context.Background(), targetStruct.ProjectID, targetStruct.Workflow, nil)
if strings.Contains(err.Error(), "inactive infra") {
t.Log("Handled expected error due to inactive infrastructure: ", err)
return
}
if res == nil {
t.Errorf("Returned response is nil")
}
})
}
func FuzzGetExperimentRunStats(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
ProjectID string
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
targetStruct.ProjectID = uuid.New().String()
mockServices := NewMockServices()
findResult := []interface{}{bson.D{
{Key: "project_id", Value: targetStruct.ProjectID},
{Key: "infra_id", Value: "abc"},
{
Key: "revision", Value: []dbChaosExperiment.ExperimentRevision{
{
RevisionID: uuid.NewString(),
},
},
},
}}
cursor, _ := mongo.NewCursorFromDocuments(findResult, nil, nil)
mockServices.MongodbOperator.On("Aggregate", mock.Anything, mongodb.ChaosExperimentRunsCollection, mock.Anything, mock.Anything).Return(cursor, nil).Once()
res, err := mockServices.ChaosExperimentRunHandler.GetExperimentRunStats(context.Background(), targetStruct.ProjectID)
if err != nil {
t.Errorf("GetExperimentRunStats() error = %v", err)
return
}
if res == nil {
t.Errorf("Returned response is nil")
}
})
}

View File

@ -0,0 +1,656 @@
package test
import (
"context"
"testing"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment/handler"
chaosExperimentMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment/model/mocks"
chaosExperimentRunMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_experiment_run/model/mocks"
chaosInfraMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaos_infrastructure/model/mocks"
dbChaosExperiment "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment"
dbChaosExperimentRun "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_experiment_run"
dbMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/mocks"
dbGitOpsMocks "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/gitops/model/mocks"
"go.mongodb.org/mongo-driver/bson"
dbChaosInfra "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_infrastructure"
dbOperationsEnvironment "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/environments"
fuzz "github.com/AdaLogics/go-fuzz-headers"
store "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/data-store"
)
type MockServices struct {
ChaosExperimentService *chaosExperimentMocks.ChaosExperimentService
ChaosExperimentRunService *chaosExperimentRunMocks.ChaosExperimentRunService
InfrastructureService *chaosInfraMocks.InfraService
GitOpsService *dbGitOpsMocks.GitOpsService
ChaosExperimentOperator *dbChaosExperiment.Operator
ChaosExperimentRunOperator *dbChaosExperimentRun.Operator
MongodbOperator *dbMocks.MongoOperator
ChaosExperimentHandler *handler.ChaosExperimentHandler
}
func NewMockServices() *MockServices {
var (
mongodbMockOperator = new(dbMocks.MongoOperator)
infrastructureService = new(chaosInfraMocks.InfraService)
chaosExperimentRunService = new(chaosExperimentRunMocks.ChaosExperimentRunService)
gitOpsService = new(dbGitOpsMocks.GitOpsService)
chaosExperimentOperator = dbChaosExperiment.NewChaosExperimentOperator(mongodbMockOperator)
chaosExperimentRunOperator = dbChaosExperimentRun.NewChaosExperimentRunOperator(mongodbMockOperator)
chaosExperimentService = new(chaosExperimentMocks.ChaosExperimentService)
)
var chaosExperimentHandler = handler.NewChaosExperimentHandler(chaosExperimentService, chaosExperimentRunService, infrastructureService, gitOpsService, chaosExperimentOperator, chaosExperimentRunOperator, mongodbMockOperator)
return &MockServices{
ChaosExperimentService: chaosExperimentService,
ChaosExperimentRunService: chaosExperimentRunService,
InfrastructureService: infrastructureService,
GitOpsService: gitOpsService,
ChaosExperimentOperator: chaosExperimentOperator,
ChaosExperimentRunOperator: chaosExperimentRunOperator,
MongodbOperator: mongodbMockOperator,
ChaosExperimentHandler: chaosExperimentHandler,
}
}
var (
mongodbMockOperator = new(dbMocks.MongoOperator)
environmentOperator = dbOperationsEnvironment.NewEnvironmentOperator(mongodbMockOperator)
)
func stringPointer(v string) *string { return &v }
func FuzzRegisterInfra(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
projectID string
request model.RegisterInfraRequest
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
ctx := context.Background()
mockServices := NewMockServices()
mockResponse := &model.RegisterInfraResponse{
Token: "test-token",
InfraID: "test-infra-id",
Name: targetStruct.request.Name,
Manifest: "test-manifest",
}
mockServices.InfrastructureService.
On("RegisterInfra", ctx, targetStruct.projectID, targetStruct.request).
Return(mockResponse, nil)
response, err := mockServices.InfrastructureService.RegisterInfra(ctx, targetStruct.projectID, targetStruct.request)
if response.Name != targetStruct.request.Name {
t.Errorf("Chaos Infrastructure Name is %s Return %s", response.Name, targetStruct.request.Name)
}
if err != nil {
t.Errorf("ChaosInfrastructure.RegisterInfra() error = %v", err)
return
}
if response == nil {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzDeleteInfra(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
projectID string
infraID string
r store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
ctx := context.Background()
mockServices := NewMockServices()
mockServices.InfrastructureService.
On("DeleteInfra", ctx, targetStruct.projectID, targetStruct.infraID, targetStruct.r).
Return("infra deleted successfully", nil)
response, err := mockServices.InfrastructureService.DeleteInfra(ctx, targetStruct.projectID, targetStruct.infraID, targetStruct.r)
if err != nil {
t.Errorf("ChaosInfrastructure.RegisterInfra() error = %v", err)
return
}
if response == "" {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzGetInfraTest(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
projectID string
infraID string
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
ctx := context.Background()
mockServices := NewMockServices()
mockResponse := &model.Infra{
InfraID: targetStruct.infraID,
ProjectID: targetStruct.projectID,
Name: "TestInfraName",
Description: nil,
Tags: []string{"tag1", "tag2"},
EnvironmentID: "test-env-id",
PlatformName: "test-platform",
IsActive: true,
IsInfraConfirmed: true,
IsRemoved: false,
UpdatedAt: "1680000000",
CreatedAt: "1670000000",
Token: "test-token",
InfraNamespace: nil,
ServiceAccount: nil,
InfraScope: "test-scope",
StartTime: "1675000000",
Version: "1.0.0",
CreatedBy: &model.UserDetails{Username: "test-user"},
UpdatedBy: &model.UserDetails{Username: "test-user"},
NoOfExperiments: nil,
NoOfExperimentRuns: nil,
LastExperimentTimestamp: nil,
UpdateStatus: "UpToDate",
}
mockServices.InfrastructureService.
On("GetInfra", context.Background(), targetStruct.projectID, targetStruct.infraID).
Return(mockResponse, nil)
infra, err := mockServices.InfrastructureService.GetInfra(ctx, targetStruct.projectID, targetStruct.infraID)
if err != nil {
t.Errorf("ChaosInfrastructure.GetInfra() error = %v", err)
return
}
if infra.InfraID != targetStruct.infraID {
t.Errorf("ProjectID mismatch: got %v, want %v", infra.InfraID, targetStruct.infraID)
}
})
}
func FuzzListInfras(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
projectID string
request *model.ListInfraRequest
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockResponse := &model.ListInfraResponse{
TotalNoOfInfras: 10,
Infras: []*model.Infra{
{
InfraID: "infra1",
ProjectID: targetStruct.projectID,
Name: "Test Infra",
EnvironmentID: "env1",
Description: stringPointer("Test description"),
PlatformName: "Test Platform",
IsActive: true,
IsInfraConfirmed: true,
UpdatedAt: "1622527200",
CreatedAt: "1622523600",
Token: "test-token",
InfraNamespace: stringPointer("test-namespace"),
ServiceAccount: stringPointer("test-service-account"),
InfraScope: "test-scope",
StartTime: "1622520000",
Version: "v1.0",
Tags: []string{"tag1", "tag2"},
IsRemoved: false,
},
},
}
mockServices.InfrastructureService.On("ListInfras", targetStruct.projectID, targetStruct.request).
Return(mockResponse, nil)
response, err := mockServices.InfrastructureService.ListInfras(targetStruct.projectID, targetStruct.request)
if err != nil {
t.Errorf("ChaosInfrastructure.DeleteInfra() error = %v", err)
return
}
if response.TotalNoOfInfras < 0 {
t.Errorf("Invalid TotalNoOfInfras: %d", response.TotalNoOfInfras)
}
for _, infra := range response.Infras {
if infra.InfraID == "" {
t.Errorf("InfraID should not be empty")
}
if infra.ProjectID != targetStruct.projectID {
t.Errorf("ProjectID mismatch: got %v, want %v", infra.ProjectID, targetStruct.projectID)
}
}
})
}
func FuzzGetInfraDetails(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
InfraID string
projectID string
request *model.ListInfraRequest
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockResponse := &model.Infra{
InfraID: targetStruct.InfraID,
ProjectID: targetStruct.projectID,
Name: "TestInfraName",
Description: nil,
Tags: []string{"tag1", "tag2"},
EnvironmentID: "test-env-id",
PlatformName: "test-platform",
IsActive: true,
IsInfraConfirmed: true,
IsRemoved: false,
UpdatedAt: "1680000000",
CreatedAt: "1670000000",
Token: "test-token",
InfraNamespace: nil,
ServiceAccount: nil,
InfraScope: "test-scope",
StartTime: "1675000000",
Version: "1.0.0",
CreatedBy: &model.UserDetails{Username: "test-user"},
UpdatedBy: &model.UserDetails{Username: "test-user"},
NoOfExperiments: nil,
NoOfExperimentRuns: nil,
LastExperimentTimestamp: nil,
UpdateStatus: "UpToDate",
}
mockServices.InfrastructureService.
On("GetInfra", context.Background(), targetStruct.projectID, targetStruct.InfraID).
Return(mockResponse, nil)
mockServices.InfrastructureService.
On("GetInfraDetails", context.Background(), targetStruct.InfraID, targetStruct.projectID).
Return(mockResponse, nil)
ctx := context.Background()
response, err := mockServices.InfrastructureService.GetInfraDetails(ctx, targetStruct.InfraID, targetStruct.projectID)
if err != nil {
t.Errorf("ChaosInfrastructure.DeleteInfra() error = %v", err)
return
}
if response.InfraID != targetStruct.InfraID {
t.Errorf("InfraID mismatch: got %v, want %v", response.InfraID, targetStruct.InfraID)
}
})
}
func FuzzGetInfraStats(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
projectID string
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockResponse := &model.GetInfraStatsResponse{
TotalInfrastructures: 10,
TotalActiveInfrastructure: 7,
TotalInactiveInfrastructures: 3,
TotalConfirmedInfrastructure: 8,
TotalNonConfirmedInfrastructures: 2,
}
mockServices.InfrastructureService.
On("GetInfraStats", context.Background(), targetStruct.projectID).
Return(mockResponse, nil)
ctx := context.Background()
response, err := mockServices.InfrastructureService.GetInfraStats(ctx, targetStruct.projectID)
if err != nil {
t.Errorf("ChaosInfrastructure.DeleteInfra() error = %v", err)
return
}
if response.TotalInfrastructures != mockResponse.TotalInfrastructures {
t.Errorf("TotalInfrastructures mismatch: got %v, want %v", response.TotalInfrastructures, mockResponse.TotalInfrastructures)
}
if response.TotalActiveInfrastructure != mockResponse.TotalActiveInfrastructure {
t.Errorf("TotalActiveInfrastructure mismatch: got %v, want %v", response.TotalActiveInfrastructure, mockResponse.TotalActiveInfrastructure)
}
if response.TotalInactiveInfrastructures != mockResponse.TotalInactiveInfrastructures {
t.Errorf("TotalInactiveInfrastructures mismatch: got %v, want %v", response.TotalInactiveInfrastructures, mockResponse.TotalInactiveInfrastructures)
}
if response.TotalConfirmedInfrastructure != mockResponse.TotalConfirmedInfrastructure {
t.Errorf("TotalConfirmedInfrastructure mismatch: got %v, want %v", response.TotalConfirmedInfrastructure, mockResponse.TotalConfirmedInfrastructure)
}
if response.TotalNonConfirmedInfrastructures != mockResponse.TotalNonConfirmedInfrastructures {
t.Errorf("TotalNonConfirmedInfrastructures mismatch: got %v, want %v", response.TotalNonConfirmedInfrastructures, mockResponse.TotalNonConfirmedInfrastructures)
}
})
}
func FuzzGetVersionDetails(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
mockServices := NewMockServices()
mockResponse := &model.InfraVersionDetails{
LatestVersion: "testVersion1",
CompatibleVersions: []string{"compatibleVersion1", "compatibleVersion2"},
}
mockServices.InfrastructureService.On("GetVersionDetails").Return(mockResponse, nil)
response, err := mockServices.InfrastructureService.GetVersionDetails()
if err != nil {
t.Errorf("infraService.GetVersionDetails() error = %v", err)
return
}
if response == nil {
t.Errorf("Expected a non-nil response")
return
}
if response.LatestVersion == "" {
t.Errorf("Expected a valid latest version")
}
})
}
func FuzzQueryServerVersion(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
ctx := context.Background()
mockResponse := &model.ServerVersionResponse{
Key: "version",
Value: string(data),
}
mockServices := NewMockServices()
mockServices.InfrastructureService.On("GetConfig", ctx, "version").Return(mockResponse, nil)
mockServices.InfrastructureService.On("QueryServerVersion", ctx).Return(mockResponse, nil)
response, err := mockServices.InfrastructureService.QueryServerVersion(ctx)
if err != nil {
t.Errorf("QueryServerVersion() error = %v", err)
return
}
if response == nil {
t.Errorf("Expected a non-nil response")
return
}
if response.Key != "version" {
t.Errorf("Expected Key to be 'version', got %s", response.Key)
}
if response.Value != string(data) {
t.Errorf("Expected Value to be %s, got %s", string(data), response.Value)
}
})
}
func FuzzPodLog(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
request model.PodLog
r store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.InfrastructureService.
On("PodLog", targetStruct.request, targetStruct.r).
Return("LOGS SENT SUCCESSFULLY", nil)
response, err := mockServices.InfrastructureService.PodLog(targetStruct.request, targetStruct.r)
if err != nil {
t.Errorf("ChaosInfrastructure.PodLog() error = %v", err)
return
}
if response == "" {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzKubeObj(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
request model.KubeObjectData
r store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.InfrastructureService.
On("KubeObj", targetStruct.request, targetStruct.r).
Return("KubeData sent successfully", nil)
response, err := mockServices.InfrastructureService.KubeObj(targetStruct.request, targetStruct.r)
if err != nil {
t.Errorf("ChaosInfrastructure.KubeObj() error = %v", err)
return
}
if response == "" {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzSendInfraEvent(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
EventType string
EventName string
Description string
Infra model.Infra
R store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
targetStruct.R.InfraEventPublish = make(map[string][]chan *model.InfraEventResponse)
projectID := targetStruct.Infra.ProjectID
if projectID != "" {
targetStruct.R.InfraEventPublish[projectID] = append(targetStruct.R.InfraEventPublish[projectID], make(chan *model.InfraEventResponse, 1))
}
mockServices.InfrastructureService.SendInfraEvent(targetStruct.EventType, targetStruct.EventName, targetStruct.Description, targetStruct.Infra, targetStruct.R)
if projectID != "" {
select {
case event := <-targetStruct.R.InfraEventPublish[projectID][0]:
if event == nil {
t.Errorf("Expected non-nil event")
}
if event.EventType != targetStruct.EventType {
t.Errorf("Expected EventType to be %s, got %s", targetStruct.EventType, event.EventType)
}
if event.EventName != targetStruct.EventName {
t.Errorf("Expected EventName to be %s, got %s", targetStruct.EventName, event.EventName)
}
if event.Description != targetStruct.Description {
t.Errorf("Expected Description to be %s, got %s", targetStruct.Description, event.Description)
}
if event.Infra != &targetStruct.Infra {
t.Errorf("Expected Infra to be %+v, got %+v", targetStruct.Infra, event.Infra)
}
default:
t.Errorf("Expected an event to be published")
}
}
})
}
func FuzzConfirmInfraRegistration(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
request model.InfraIdentity
r store.StateData
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.InfrastructureService.
On("ConfirmInfraRegistration", targetStruct.request, targetStruct.r).
Return(&model.ConfirmInfraRegistrationResponse{
IsInfraConfirmed: true,
NewAccessKey: &targetStruct.request.AccessKey,
InfraID: &targetStruct.request.InfraID,
}, nil)
response, err := mockServices.InfrastructureService.ConfirmInfraRegistration(targetStruct.request, targetStruct.r)
if err != nil {
t.Errorf("ChaosInfrastructure.ConfirmInfraRegistration() error = %v", err)
return
}
if response == nil {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzVerifyInfra(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
request := model.InfraIdentity{}
err := fuzzConsumer.GenerateStruct(&request)
if err != nil {
return
}
mockServices := NewMockServices()
expectedInfra := &dbChaosInfra.ChaosInfra{
InfraID: request.InfraID,
AccessKey: request.AccessKey,
IsRegistered: true,
Version: request.Version,
}
mockServices.InfrastructureService.
On("VerifyInfra", request).
Return(expectedInfra, nil)
response, err := mockServices.InfrastructureService.VerifyInfra(request)
if err != nil {
t.Errorf("ChaosInfrastructure.VerifyInfra() error = %v", err)
return
}
if response == nil {
t.Errorf("Returned environment is nil")
}
})
}
func FuzzUpdateInfra(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
targetStruct := &struct {
query bson.D
update bson.D
}{}
err := fuzzConsumer.GenerateStruct(targetStruct)
if err != nil {
return
}
mockServices := NewMockServices()
mockServices.InfrastructureService.
On("UpdateInfra", targetStruct.query, targetStruct.update).
Return(nil)
err = mockServices.InfrastructureService.UpdateInfra(targetStruct.query, targetStruct.update)
if err != nil {
t.Errorf("ChaosInfrastructure.UpdateInfra() error = %v", err)
return
}
})
}
func FuzzGetDBInfra(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
var infraID string
err := fuzzConsumer.GenerateStruct(&infraID)
if err != nil {
return
}
mockServices := NewMockServices()
expectedInfra := dbChaosInfra.ChaosInfra{
InfraID: infraID,
}
mockServices.InfrastructureService.
On("GetDBInfra", infraID).
Return(expectedInfra, nil)
response, err := mockServices.InfrastructureService.GetDBInfra(infraID)
if err != nil {
t.Errorf("ChaosInfrastructure.GetDBInfra() error = %v", err)
return
}
if response.InfraID != infraID {
t.Errorf("InfraID mismatch: got %v, want %v", response.InfraID, infraID)
}
})
}

View File

@ -88,6 +88,11 @@ func (s *InfraService) KubeObj(request model.KubeObjectData, r store.StateData)
return args.String(0), args.Error(1)
}
func (s *InfraService) KubeNamespace(request model.KubeNamespaceData, r store.StateData) (string, error) {
args := s.Called(request, r)
return args.String(0), args.Error(1)
}
func (s *InfraService) UpdateInfra(query bson.D, update bson.D) error {
args := s.Called(query, update)
return args.Error(0)

View File

@ -51,6 +51,7 @@ type Service interface {
GetVersionDetails() (*model.InfraVersionDetails, error)
QueryServerVersion(ctx context.Context) (*model.ServerVersionResponse, error)
PodLog(request model.PodLog, r store.StateData) (string, error)
KubeNamespace(request model.KubeNamespaceData, r store.StateData) (string, error)
KubeObj(request model.KubeObjectData, r store.StateData) (string, error)
UpdateInfra(query bson.D, update bson.D) error
GetDBInfra(infraID string) (dbChaosInfra.ChaosInfra, error)
@ -938,6 +939,9 @@ func fetchLatestVersion(versions map[int]string) int {
// updateVersionFormat converts string array to int by removing decimal points, 1.0.0 will be returned as 100, 0.1.0 will be returned as 10, 0.0.1 will be returned as 1
func updateVersionFormat(str string) (int, error) {
if str == CIVersion {
return 0, nil
}
var versionInt int
versionSlice := strings.Split(str, ".")
for i, val := range versionSlice {
@ -991,7 +995,7 @@ func (in *infraService) KubeObj(request model.KubeObjectData, r store.StateData)
return "", err
}
if reqChan, ok := r.KubeObjectData[request.RequestID]; ok {
var kubeObjData []*model.KubeObject
var kubeObjData *model.KubeObject
err = json.Unmarshal([]byte(request.KubeObj), &kubeObjData)
if err != nil {
return "", fmt.Errorf("failed to unmarshal kubeObj data %w", err)
@ -1008,6 +1012,31 @@ func (in *infraService) KubeObj(request model.KubeObjectData, r store.StateData)
return "KubeData sent successfully", nil
}
// KubeNamespace receives Kubernetes Namespace data from subscriber
func (in *infraService) KubeNamespace(request model.KubeNamespaceData, r store.StateData) (string, error) {
_, err := in.VerifyInfra(*request.InfraID)
if err != nil {
log.Print("Error", err)
return "", err
}
if reqChan, ok := r.KubeNamespaceData[request.RequestID]; ok {
var kubeNamespaceData []*model.KubeNamespace
err = json.Unmarshal([]byte(request.KubeNamespace), &kubeNamespaceData)
if err != nil {
return "", fmt.Errorf("failed to unmarshal kubeNamespace data %w", err)
}
resp := model.KubeNamespaceResponse{
InfraID: request.InfraID.InfraID,
KubeNamespace: kubeNamespaceData,
}
reqChan <- &resp
close(reqChan)
return "KubeData sent successfully", nil
}
return "KubeData sent successfully", nil
}
// SendInfraEvent sends events from the infras to the appropriate users listening for the events
func (in *infraService) SendInfraEvent(eventType, eventName, description string, infra model.Infra, r store.StateData) {
newEvent := model.InfraEventResponse{
@ -1077,8 +1106,8 @@ func (in *infraService) VerifyInfra(identity model.InfraIdentity) (*dbChaosInfra
} else {
splitCPVersion := strings.Split(currentVersion, ".")
splitSubVersion := strings.Split(identity.Version, ".")
if len(splitSubVersion) != 3 || splitSubVersion[0] != splitCPVersion[0] || splitSubVersion[1] != splitCPVersion[1] {
return nil, fmt.Errorf("ERROR: infra VERSION MISMATCH (need %v.%v.x got %v)", splitCPVersion[0], splitCPVersion[1], identity.Version)
if len(splitSubVersion) != 3 || splitSubVersion[0] != splitCPVersion[0] {
return nil, fmt.Errorf("ERROR: infra VERSION MISMATCH (need %v.x.x got %v)", splitCPVersion[0], identity.Version)
}
}
infra, err := in.infraOperator.GetInfra(identity.InfraID)

View File

@ -11,6 +11,10 @@ type KubeObjData struct {
Data []ObjectData `json:"data"`
}
type KubeNamespace struct {
Name string `json:"Name"`
}
type ObjectData struct {
Name string `json:"name"`
UID types.UID `json:"uid"`

View File

@ -0,0 +1,291 @@
package handler
import (
"archive/zip"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/google/uuid"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/model"
)
func FuzzGetChartsPath(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzConsumer := fuzz.NewConsumer(data)
chartsInput := model.CloningInput{}
err := fuzzConsumer.GenerateStruct(&chartsInput)
if err != nil {
return
}
projectID, _ := fuzzConsumer.GetString()
isDefault, _ := fuzzConsumer.GetBool()
result := GetChartsPath(chartsInput, projectID, isDefault)
if isDefault {
expected := DefaultPath + "default/" + chartsInput.Name + "/faults/"
if result != expected {
t.Errorf("Expected %s, got %s", expected, result)
}
} else {
expected := DefaultPath + projectID + "/" + chartsInput.Name + "/faults/"
if result != expected {
t.Errorf("Expected %s, got %s", expected, result)
}
}
})
}
func FuzzReadExperimentFile(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, filename string) {
fuzzConsumer := fuzz.NewConsumer(data)
// Create a temporary directory
tmpDir, err := os.MkdirTemp("", "*-fuzztest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir) // clean up
// Ensure the filename is valid and unique
safeFilename := filepath.Clean(filepath.Base(filename))
if isInvalidFilename(safeFilename) {
safeFilename = "test.yaml"
}
filePath := filepath.Join(tmpDir, safeFilename)
content := ChaosChart{}
err = fuzzConsumer.GenerateStruct(&content)
if err != nil {
return
}
jsonContent, _ := json.Marshal(content)
err = os.WriteFile(filePath, jsonContent, 0644)
if err != nil {
t.Fatal(err)
}
_, err = ReadExperimentFile(filePath)
if err != nil && !isInvalidYAML(jsonContent) {
t.Errorf("UnExpected error for valid YAML, got error: %v", err)
}
if err == nil && isInvalidYAML(jsonContent) {
t.Errorf("Expected error for invalid YAML, got nil")
}
_, err = ReadExperimentFile("./not_exist_file.yaml")
if err == nil {
t.Errorf("Expected error for file does not exist, got nil")
}
})
}
func FuzzGetExperimentData(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, filename string) {
fuzzConsumer := fuzz.NewConsumer(data)
// Create a temporary directory
tmpDir, err := os.MkdirTemp("", "*-fuzztest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir) // clean up
// Ensure the filename is valid and unique
safeFilename := filepath.Clean(filepath.Base(filename))
if isInvalidFilename(safeFilename) {
safeFilename = "test.yaml"
}
filePath := filepath.Join(tmpDir, safeFilename)
content := ChaosChart{}
err = fuzzConsumer.GenerateStruct(&content)
if err != nil {
return
}
jsonContent, _ := json.Marshal(content)
err = os.WriteFile(filePath, jsonContent, 0644)
if err != nil {
t.Fatal(err)
}
_, err = GetExperimentData(filePath)
if err != nil && !isInvalidYAML(jsonContent) && json.Valid(jsonContent) {
t.Errorf("UnExpected error for valid YAML, got error: %v", err)
}
if err == nil && isInvalidYAML(jsonContent) {
t.Errorf("Expected error for invalid YAML, got nil")
}
_, err = ReadExperimentFile("./not_exist_file.yaml")
if err == nil {
t.Errorf("Expected error for file does not exist, got nil")
}
})
}
func FuzzReadExperimentYAMLFile(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, filename string) {
fuzzConsumer := fuzz.NewConsumer(data)
// Create a temporary directory
tmpDir, err := os.MkdirTemp("", "*-fuzztest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir) // clean up
// Ensure the filename is valid and unique
safeFilename := filepath.Clean(filepath.Base(filename))
if isInvalidFilename(safeFilename) {
safeFilename = "test.yaml"
}
filePath := filepath.Join(tmpDir, safeFilename)
content := ChaosChart{}
err = fuzzConsumer.GenerateStruct(&content)
if err != nil {
return
}
jsonContent, _ := json.Marshal(content)
err = os.WriteFile(filePath, jsonContent, 0644)
if err != nil {
t.Fatal(err)
}
_, err = ReadExperimentYAMLFile(filePath)
if err != nil {
t.Errorf("UnExpected error for valid YAML, got error: %v", err)
}
_, err = ReadExperimentFile("./not_exist_file.yaml")
if err == nil {
t.Errorf("Expected error for file does not exist, got nil")
}
})
}
func FuzzUnzipRemoteHub(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte, filename string, projectID string) {
// Create a temporary directory
tmpDir, err := os.MkdirTemp("", "*-fuzztest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir) // clean up
// Ensure the filename is valid and unique
safeFilename := filepath.Clean(filepath.Base(filename))
if isInvalidFilename(safeFilename) {
safeFilename = "test.zip"
}
if !strings.HasSuffix(safeFilename, ".zip") {
safeFilename += ".zip"
}
if isInvalidFilename(projectID) {
projectID = uuid.New().String()
}
filePath := filepath.Join(tmpDir, safeFilename)
// Create a valid zip file
err = createValidZipFile(filePath, data)
if err != nil {
t.Fatal(err)
}
err = UnzipRemoteHub(filePath, projectID)
if err != nil {
t.Errorf("UnExpected error for valid zip, got error: %v", err)
}
// Test with non-existent file
err = UnzipRemoteHub("./not_exist_file.zip", projectID)
if err == nil {
t.Errorf("Expected error for file does not exist, got nil")
}
// Test with non-zip file
nonZipPath := filepath.Join(tmpDir, "no_zip")
err = os.WriteFile(nonZipPath, []byte("not a zip file"), 0644)
if err != nil {
t.Fatal(err)
}
err = UnzipRemoteHub(nonZipPath, projectID)
if err == nil {
t.Errorf("Expected error for no zip, got nil")
}
})
}
func FuzzIsFileExisting(f *testing.F) {
f.Fuzz(func(t *testing.T, filename string) {
// Create a temporary directory
tmpDir, err := os.MkdirTemp("", "*-fuzztest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir) // clean up
// Ensure the filename is valid and unique
safeFilename := filepath.Clean(filepath.Base(filename))
if isInvalidFilename(safeFilename) {
safeFilename = "test.yaml"
}
filePath := filepath.Join(tmpDir, safeFilename)
_, _ = os.Create(filePath)
result, err := IsFileExisting(filePath)
if !result {
t.Errorf("Expected true for existing file, got false")
}
result, err = IsFileExisting("./not_exist_file.yaml")
if result {
t.Errorf("Expected false for not existing file, got true")
}
})
}
func isInvalidFilename(filename string) bool {
return strings.IndexByte(filename, 0) != -1 || filename == "" || filename == "." || filename == ".." || filename == "/" || len(filename) > 255
}
func isInvalidYAML(data []byte) bool {
for _, b := range data {
if b < 32 || b == 127 {
return true
}
}
return false
}
func createValidZipFile(filename string, data []byte) error {
zipFile, err := os.Create(filename)
if err != nil {
return err
}
defer zipFile.Close()
zipWriter := zip.NewWriter(zipFile)
defer zipWriter.Close()
f, err := zipWriter.Create("test.txt")
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
return err
}
return nil
}

View File

@ -72,11 +72,6 @@ func TestReadExperimentFile(t *testing.T) {
filePath: "./temp1.yaml",
isError: true,
},
{
name: "failure: file is not a yaml",
filePath: "./types.go",
isError: true,
},
}
for _, tc := range testcases {
// when

View File

@ -6,6 +6,7 @@ func NewCloningInputFrom(chaosHub model.CreateChaosHubRequest) model.CloningInpu
return model.CloningInput{
RepoBranch: chaosHub.RepoBranch,
RepoURL: chaosHub.RepoURL,
RemoteHub: chaosHub.RemoteHub,
Name: chaosHub.Name,
IsPrivate: chaosHub.IsPrivate,
UserName: chaosHub.UserName,

View File

@ -9,6 +9,7 @@ import (
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/plumbing/transport/ssh"
@ -276,6 +277,10 @@ func (c ChaosHubConfig) gitPullPrivateRepo() error {
// generateAuthMethod creates AuthMethod for private repos
func (c ChaosHubConfig) generateAuthMethod() (transport.AuthMethod, error) {
transport.UnsupportedCapabilities = []capability.Capability{
capability.ThinPack,
}
var auth transport.AuthMethod
if c.AuthType == model.AuthTypeToken {
auth = &http.BasicAuth{

View File

@ -86,6 +86,7 @@ func (c *chaosHubService) AddChaosHub(ctx context.Context, chaosHub model.Create
ProjectID: projectID,
RepoURL: chaosHub.RepoURL,
RepoBranch: chaosHub.RepoBranch,
RemoteHub: chaosHub.RemoteHub,
ResourceDetails: mongodb.ResourceDetails{
Name: chaosHub.Name,
Description: description,
@ -155,6 +156,7 @@ func (c *chaosHubService) AddRemoteChaosHub(ctx context.Context, chaosHub model.
ProjectID: projectID,
RepoURL: chaosHub.RepoURL,
RepoBranch: "",
RemoteHub: chaosHub.RemoteHub,
ResourceDetails: mongodb.ResourceDetails{
Name: chaosHub.Name,
Description: description,
@ -226,6 +228,7 @@ func (c *chaosHubService) SaveChaosHub(ctx context.Context, chaosHub model.Creat
ProjectID: projectID,
RepoURL: chaosHub.RepoURL,
RepoBranch: chaosHub.RepoBranch,
RemoteHub: chaosHub.RemoteHub,
ResourceDetails: mongodb.ResourceDetails{
Name: chaosHub.Name,
Description: description,
@ -273,6 +276,7 @@ func (c *chaosHubService) SyncChaosHub(ctx context.Context, hubID string, projec
Name: chaosHub.Name,
RepoURL: chaosHub.RepoURL,
RepoBranch: chaosHub.RepoBranch,
RemoteHub: chaosHub.RemoteHub,
IsPrivate: chaosHub.IsPrivate,
UserName: chaosHub.UserName,
Password: chaosHub.Password,
@ -311,6 +315,7 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
cloneHub := model.CloningInput{
RepoBranch: chaosHub.RepoBranch,
RepoURL: chaosHub.RepoURL,
RemoteHub: chaosHub.RemoteHub,
Name: chaosHub.Name,
IsPrivate: chaosHub.IsPrivate,
UserName: chaosHub.UserName,
@ -326,10 +331,11 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
}
clonePath := DefaultPath + prevChaosHub.ProjectID + "/" + prevChaosHub.Name
if prevChaosHub.HubType == string(model.HubTypeRemote) {
if prevChaosHub.Name != chaosHub.Name || prevChaosHub.RepoURL != chaosHub.RepoURL {
if prevChaosHub.Name != chaosHub.Name || prevChaosHub.RepoURL != chaosHub.RepoURL || prevChaosHub.RemoteHub != chaosHub.RemoteHub {
remoteHub := model.CreateRemoteChaosHub{
Name: chaosHub.Name,
RepoURL: chaosHub.RepoURL,
Name: chaosHub.Name,
RepoURL: chaosHub.RepoURL,
RemoteHub: chaosHub.RemoteHub,
}
err = os.RemoveAll(clonePath)
if err != nil {
@ -342,7 +348,7 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
}
} else {
// Syncing/Cloning the repository at a path from ChaosHub link structure.
if prevChaosHub.Name != chaosHub.Name || prevChaosHub.RepoURL != chaosHub.RepoURL || prevChaosHub.RepoBranch != chaosHub.RepoBranch || prevChaosHub.IsPrivate != chaosHub.IsPrivate || prevChaosHub.AuthType != chaosHub.AuthType.String() {
if prevChaosHub.Name != chaosHub.Name || prevChaosHub.RepoURL != chaosHub.RepoURL || prevChaosHub.RepoBranch != chaosHub.RepoBranch || prevChaosHub.IsPrivate != chaosHub.IsPrivate || prevChaosHub.AuthType != chaosHub.AuthType.String() || prevChaosHub.RemoteHub != chaosHub.RemoteHub {
err = os.RemoveAll(clonePath)
if err != nil {
return nil, err
@ -368,6 +374,7 @@ func (c *chaosHubService) UpdateChaosHub(ctx context.Context, chaosHub model.Upd
{"$set", bson.D{
{"repo_url", chaosHub.RepoURL},
{"repo_branch", chaosHub.RepoBranch},
{"remote_hub", chaosHub.RemoteHub},
{"name", chaosHub.Name},
{"description", chaosHub.Description},
{"tags", chaosHub.Tags},
@ -454,6 +461,7 @@ func (c *chaosHubService) ListChaosFaults(ctx context.Context, hubID string, pro
Name: hub.Name,
RepoURL: hub.RepoURL,
RepoBranch: hub.RepoBranch,
RemoteHub: hub.RemoteHub,
}
ChartsPath := handler.GetChartsPath(chartsInput, projectID, hub.IsDefault)
@ -516,6 +524,7 @@ func (c *chaosHubService) ListChaosHubs(ctx context.Context, projectID string, r
},
RepoURL: defaultHub.RepoURL,
RepoBranch: defaultHub.RepoBranch,
RemoteHub: defaultHub.RemoteHub,
IsDefault: true,
}
@ -651,6 +660,7 @@ func (c *chaosHubService) ListChaosHubs(ctx context.Context, projectID string, r
UpdatedAt: strconv.Itoa(int(hub.UpdatedAt)),
CreatedBy: &model.UserDetails{Username: hub.CreatedBy.Username},
UpdatedBy: &model.UserDetails{Username: hub.UpdatedBy.Username},
RemoteHub: hub.RemoteHub,
}
hubDetails = append(hubDetails, hubDetail)
}
@ -711,6 +721,7 @@ func (c *chaosHubService) GetChaosHub(ctx context.Context, chaosHubID string, pr
UpdatedAt: strconv.Itoa(int(hub.UpdatedAt)),
CreatedBy: &model.UserDetails{Username: hub.CreatedBy.Username},
UpdatedBy: &model.UserDetails{Username: hub.UpdatedBy.Username},
RemoteHub: hub.RemoteHub,
}
return hubDetail, nil
@ -762,6 +773,7 @@ func (c *chaosHubService) getChaosHubDetails(ctx context.Context, hubID string,
ProjectID: hub.ProjectID,
RepoURL: hub.RepoURL,
RepoBranch: hub.RepoBranch,
RemoteHub: hub.RemoteHub,
AuthType: model.AuthType(hub.AuthType),
Name: hub.Name,
CreatedAt: strconv.Itoa(int(hub.CreatedAt)),
@ -879,6 +891,7 @@ func (c *chaosHubService) RecurringHubSync() {
Name: chaosHub.Name,
RepoURL: chaosHub.RepoURL,
RepoBranch: chaosHub.RepoBranch,
RemoteHub: chaosHub.RemoteHub,
IsPrivate: chaosHub.IsPrivate,
AuthType: chaosHub.AuthType,
Token: chaosHub.Token,

View File

@ -13,6 +13,7 @@ type StateData struct {
ExperimentEventPublish map[string][]chan *model.ExperimentRun
ExperimentLog map[string]chan *model.PodLogResponse
KubeObjectData map[string]chan *model.KubeObjectResponse
KubeNamespaceData map[string]chan *model.KubeNamespaceResponse
Mutex *sync.Mutex
}
@ -23,6 +24,7 @@ func NewStore() *StateData {
ExperimentEventPublish: make(map[string][]chan *model.ExperimentRun),
ExperimentLog: make(map[string]chan *model.PodLogResponse),
KubeObjectData: make(map[string]chan *model.KubeObjectResponse),
KubeNamespaceData: make(map[string]chan *model.KubeNamespaceResponse),
Mutex: &sync.Mutex{},
}
}

View File

@ -15,6 +15,7 @@ type ChaosHub struct {
mongodb.Audit `bson:",inline"`
RepoURL string `bson:"repo_url"`
RepoBranch string `bson:"repo_branch"`
RemoteHub string `bson:"remote_hub"`
IsPrivate bool `bson:"is_private"`
AuthType string `bson:"auth_type"`
HubType string `bson:"hub_type"`
@ -34,6 +35,7 @@ func (c *ChaosHub) GetOutputChaosHub() *model.ChaosHub {
ProjectID: c.ProjectID,
RepoURL: c.RepoURL,
RepoBranch: c.RepoBranch,
RemoteHub: c.RemoteHub,
Name: c.Name,
Description: &c.Description,
Tags: c.Tags,

View File

@ -1,46 +1,39 @@
package main
import (
"regexp"
"strconv"
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/api/middleware"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaoshub"
handler2 "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaoshub/handler"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb"
dbSchemaChaosHub "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/projects"
"github.com/prometheus/client_golang/prometheus/promhttp"
"context"
"fmt"
"net"
"net/http"
"regexp"
"runtime"
"strconv"
"time"
"github.com/kelseyhightower/envconfig"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization"
"github.com/99designs/gqlgen/graphql/handler/extension"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
"github.com/99designs/gqlgen/graphql/handler"
"github.com/99designs/gqlgen/graphql/handler/extension"
"github.com/99designs/gqlgen/graphql/handler/transport"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/generated"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/config"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/handlers"
pb "github.com/litmuschaos/litmus/chaoscenter/graphql/server/protos"
"github.com/sirupsen/logrus"
"github.com/kelseyhightower/envconfig"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/api/middleware"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/graph/generated"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/authorization"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaoshub"
handler2 "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/chaoshub/handler"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb"
dbSchemaChaosHub "github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/chaos_hub"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/database/mongodb/config"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/handlers"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/pkg/projects"
"github.com/prometheus/client_golang/prometheus/promhttp"
pb "github.com/litmuschaos/litmus/chaoscenter/graphql/server/protos"
"github.com/litmuschaos/litmus/chaoscenter/graphql/server/utils"
)
func init() {
@ -53,7 +46,6 @@ func init() {
if err != nil {
log.Fatal(err)
}
}
func validateVersion() error {
@ -108,15 +100,14 @@ func main() {
enableHTTPSConnection, err := strconv.ParseBool(utils.Config.EnableInternalTls)
if err != nil {
logrus.Errorf("unable to parse boolean value %v", err)
log.Errorf("unable to parse boolean value %v", err)
}
if enableHTTPSConnection {
if utils.Config.TlsCertPath != "" && utils.Config.TlsKeyPath != "" {
go startGRPCServerWithTLS(mongodbOperator) // start GRPC serve
} else {
if utils.Config.TlsCertPath == "" || utils.Config.TlsKeyPath == "" {
log.Fatalf("Failure to start chaoscenter authentication REST server due to empty TLS cert file path and TLS key path")
}
go startGRPCServerWithTLS(mongodbOperator) // start GRPC serve
} else {
go startGRPCServer(utils.Config.GrpcPort, mongodbOperator) // start GRPC serve
}
@ -146,7 +137,8 @@ func main() {
enableIntrospection, err := strconv.ParseBool(utils.Config.EnableGQLIntrospection)
if err != nil {
log.Errorf("unable to parse boolean value %v", err)
} else if err == nil && enableIntrospection == true {
}
if enableIntrospection {
srv.Use(extension.Introspection{})
}
@ -172,28 +164,26 @@ func main() {
go projects.ProjectEvents(projectEventChannel, mongodb.MgoClient, mongodbOperator)
if enableHTTPSConnection {
if utils.Config.TlsCertPath != "" && utils.Config.TlsKeyPath != "" {
log.Infof("graphql server running at https://localhost:%s", utils.Config.RestPort)
// configuring TLS config based on provided certificates & keys
conf := utils.GetTlsConfig(utils.Config.TlsCertPath, utils.Config.TlsKeyPath, true)
server := http.Server{
Addr: ":" + utils.Config.RestPort,
Handler: router,
TLSConfig: conf,
}
err := server.ListenAndServeTLS("", "")
if err != nil {
log.Fatalf("Failure to start litmus-portal graphql REST server due to %v", err)
}
} else {
if utils.Config.TlsCertPath == "" || utils.Config.TlsKeyPath == "" {
log.Fatalf("Failure to start chaoscenter authentication GRPC server due to empty TLS cert file path and TLS key path")
}
log.Infof("graphql server running at https://localhost:%s", utils.Config.RestPort)
// configuring TLS config based on provided certificates & keys
conf := utils.GetTlsConfig(utils.Config.TlsCertPath, utils.Config.TlsKeyPath, true)
server := http.Server{
Addr: ":" + utils.Config.RestPort,
Handler: router,
TLSConfig: conf,
}
if err := server.ListenAndServeTLS("", ""); err != nil {
log.Fatalf("Failure to start litmus-portal graphql REST server due to %v", err)
}
} else {
log.Infof("graphql server running at http://localhost:%s", utils.Config.RestPort)
log.Fatal(http.ListenAndServe(":"+utils.Config.RestPort, router))
}
}
// startGRPCServer initializes, registers services to and starts the gRPC server for RPC calls

View File

@ -18,10 +18,13 @@ type SubscriberK8s interface {
CreatePodLog(podLog types.PodLogRequest) (types.PodLog, error)
SendPodLogs(infraData map[string]string, podLog types.PodLogRequest)
GenerateLogPayload(cid, accessKey, version string, podLog types.PodLogRequest) ([]byte, error)
GetKubernetesObjects(request types.KubeObjRequest) ([]*types.KubeObject, error)
GetKubernetesNamespaces(request types.KubeNamespaceRequest) ([]*types.KubeNamespace, error)
GetKubernetesObjects(request types.KubeObjRequest) (*types.KubeObject, error)
GetObjectDataByNamespace(namespace string, dynamicClient dynamic.Interface, resourceType schema.GroupVersionResource) ([]types.ObjectData, error)
GenerateKubeObject(cid string, accessKey, version string, kubeobjectrequest types.KubeObjRequest) ([]byte, error)
GenerateKubeNamespace(cid string, accessKey, version string, kubenamespacerequest types.KubeNamespaceRequest) ([]byte, error)
SendKubeObjects(infraData map[string]string, kubeobjectrequest types.KubeObjRequest) error
SendKubeNamespaces(infraData map[string]string, kubenamespacerequest types.KubeNamespaceRequest) error
CheckComponentStatus(componentEnv string) error
IsAgentConfirmed() (bool, string, error)
AgentRegister(accessKey string) (bool, error)

View File

@ -23,17 +23,51 @@ var (
InfraScope = os.Getenv("INFRA_SCOPE")
)
// GetKubernetesObjects is used to get the Kubernetes Object details according to the request type
func (k8s *k8sSubscriber) GetKubernetesObjects(request types.KubeObjRequest) ([]*types.KubeObject, error) {
conf, err := k8s.GetKubeConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
return nil, err
}
// GetKubernetesNamespaces is used to get the list of Kubernetes Namespaces
func (k8s *k8sSubscriber) GetKubernetesNamespaces(request types.KubeNamespaceRequest) ([]*types.KubeNamespace, error) {
var namespaceData []*types.KubeNamespace
if strings.ToLower(InfraScope) == "namespace" {
// In case of namespace scope, only one namespace is available
KubeNamespace := &types.KubeNamespace{
Name: InfraNamespace,
}
namespaceData = append(namespaceData, KubeNamespace)
} else {
// In case of cluster scope, get all the namespaces
conf, err := k8s.GetKubeConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(conf)
if err != nil {
return nil, err
}
namespace, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(namespace.Items) > 0 {
for _, namespace := range namespace.Items {
KubeNamespace := &types.KubeNamespace{
Name: namespace.GetName(),
}
namespaceData = append(namespaceData, KubeNamespace)
}
} else {
return nil, errors.New("No namespace available")
}
}
//TODO Maybe add marshal/unmarshal here
return namespaceData, nil
}
// GetKubernetesObjects is used to get the Kubernetes Object details according to the request type
func (k8s *k8sSubscriber) GetKubernetesObjects(request types.KubeObjRequest) (*types.KubeObject, error) {
resourceType := schema.GroupVersionResource{
Group: request.KubeGVRRequest.Group,
Version: request.KubeGVRRequest.Version,
@ -43,43 +77,18 @@ func (k8s *k8sSubscriber) GetKubernetesObjects(request types.KubeObjRequest) ([]
if err != nil {
return nil, err
}
var ObjData []*types.KubeObject
if strings.ToLower(InfraScope) == "namespace" {
dataList, err := k8s.GetObjectDataByNamespace(InfraNamespace, dynamicClient, resourceType)
if err != nil {
return nil, err
}
KubeObj := &types.KubeObject{
Namespace: InfraNamespace,
Data: dataList,
}
ObjData = append(ObjData, KubeObj)
} else {
namespace, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(namespace.Items) > 0 {
for _, namespace := range namespace.Items {
podList, err := k8s.GetObjectDataByNamespace(namespace.GetName(), dynamicClient, resourceType)
if err != nil {
return nil, err
}
KubeObj := &types.KubeObject{
Namespace: namespace.GetName(),
Data: podList,
}
ObjData = append(ObjData, KubeObj)
}
} else {
return nil, errors.New("No namespace available")
}
dataList, err := k8s.GetObjectDataByNamespace(request.Namespace, dynamicClient, resourceType)
if err != nil {
return nil, err
}
kubeData, _ := json.Marshal(ObjData)
var kubeObjects []*types.KubeObject
KubeObj := &types.KubeObject{
Namespace: InfraNamespace,
Data: dataList,
}
kubeData, _ := json.Marshal(KubeObj)
var kubeObjects *types.KubeObject
err = json.Unmarshal(kubeData, &kubeObjects)
if err != nil {
return nil, err
@ -118,6 +127,22 @@ func (k8s *k8sSubscriber) updateLabels(labels map[string]string) []string {
return updatedLabels
}
func (k8s *k8sSubscriber) GenerateKubeNamespace(cid string, accessKey, version string, kubenamespacerequest types.KubeNamespaceRequest) ([]byte, error) {
infraID := `{infraID: \"` + cid + `\", version: \"` + version + `\", accessKey: \"` + accessKey + `\"}`
kubeObj, err := k8s.GetKubernetesNamespaces(kubenamespacerequest)
if err != nil {
return nil, err
}
processed, err := k8s.gqlSubscriberServer.MarshalGQLData(kubeObj)
if err != nil {
return nil, err
}
mutation := `{ infraID: ` + infraID + `, requestID:\"` + kubenamespacerequest.RequestID + `\", kubeNamespace:\"` + processed[1:len(processed)-1] + `\"}`
var payload = []byte(`{"query":"mutation { kubeNamespace(request:` + mutation + ` )}"}`)
return payload, nil
}
func (k8s *k8sSubscriber) GenerateKubeObject(cid string, accessKey, version string, kubeobjectrequest types.KubeObjRequest) ([]byte, error) {
infraID := `{infraID: \"` + cid + `\", version: \"` + version + `\", accessKey: \"` + accessKey + `\"}`
kubeObj, err := k8s.GetKubernetesObjects(kubeobjectrequest)
@ -134,6 +159,25 @@ func (k8s *k8sSubscriber) GenerateKubeObject(cid string, accessKey, version stri
return payload, nil
}
// SendKubeNamespace generates graphql mutation to send kubernetes namespaces data to graphql server
func (k8s *k8sSubscriber) SendKubeNamespaces(infraData map[string]string, kubenamespacerequest types.KubeNamespaceRequest) error {
// generate graphql payload
payload, err := k8s.GenerateKubeNamespace(infraData["INFRA_ID"], infraData["ACCESS_KEY"], infraData["VERSION"], kubenamespacerequest)
if err != nil {
logrus.WithError(err).Print("Error while getting KubeObject Data")
return err
}
body, err := k8s.gqlSubscriberServer.SendRequest(infraData["SERVER_ADDR"], payload)
if err != nil {
logrus.Print(err.Error())
return err
}
logrus.Println("Response", body)
return nil
}
// SendKubeObjects generates graphql mutation to send kubernetes objects data to graphql server
func (k8s *k8sSubscriber) SendKubeObjects(infraData map[string]string, kubeobjectrequest types.KubeObjRequest) error {
// generate graphql payload

View File

@ -69,7 +69,7 @@ func (req *subscriberRequests) AgentConnect(infraData map[string]string) {
for {
_, message, err := c.ReadMessage()
if err != nil {
logrus.WithError(err).Fatal("Failed to read message")
logrus.WithError(err).Panic("Failed to read message")
}
var r types.RawData
@ -87,6 +87,7 @@ func (req *subscriberRequests) AgentConnect(infraData map[string]string) {
}
if r.Payload.Errors != nil {
logrus.Error("Error response from the server : ", string(message))
panicWhen("ALREADY CONNECTED", message)
continue
}
@ -97,6 +98,12 @@ func (req *subscriberRequests) AgentConnect(infraData map[string]string) {
}
}
func panicWhen(errorMessage string, message []byte) {
if strings.Contains(string(message), errorMessage) {
logrus.Panic("Server error: ", errorMessage)
}
}
func (req *subscriberRequests) RequestProcessor(infraData map[string]string, r types.RawData) error {
if strings.Index("kubeobject kubeobjects", strings.ToLower(r.Payload.Data.InfraConnect.Action.RequestType)) >= 0 {
KubeObjRequest := types.KubeObjRequest{
@ -113,6 +120,21 @@ func (req *subscriberRequests) RequestProcessor(infraData map[string]string, r t
return errors.New("error getting kubernetes object data: " + err.Error())
}
}
if strings.Index("kubenamespace kubenamespaces", strings.ToLower(r.Payload.Data.InfraConnect.Action.RequestType)) >= 0 {
KubeNamespaceRequest := types.KubeNamespaceRequest{
RequestID: r.Payload.Data.InfraConnect.Action.RequestID,
}
err := json.Unmarshal([]byte(r.Payload.Data.InfraConnect.Action.ExternalData), &KubeNamespaceRequest)
if err != nil {
return errors.New("failed to json unmarshal: " + err.Error())
}
err = req.subscriberK8s.SendKubeNamespaces(infraData, KubeNamespaceRequest)
if err != nil {
return errors.New("error getting kubernetes namespace data: " + err.Error())
}
}
if strings.ToLower(r.Payload.Data.InfraConnect.Action.RequestType) == "logs" {
podRequest := types.PodLogRequest{
RequestID: r.Payload.Data.InfraConnect.Action.RequestID,

View File

@ -14,6 +14,7 @@ type KubeObject struct {
type KubeObjRequest struct {
RequestID string
InfraID string `json:"infraID"`
Namespace string `json:"namespace"`
ObjectType string `json:"objectType"`
KubeGVRRequest KubeGVRRequest `json:"kubeObjRequest"`
}
@ -24,6 +25,16 @@ type KubeGVRRequest struct {
Resource string `json:"resource"`
}
// Not really useful at the moment but we might need other fields in the future
type KubeNamespace struct {
Name string `json:"name"`
}
type KubeNamespaceRequest struct {
RequestID string
InfraID string `json:"infraID"`
}
type ObjectData struct {
Name string `json:"name"`
UID types.UID `json:"uid"`

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10
RUN microdnf module enable nginx:1.20
RUN microdnf install nginx
RUN microdnf update --refresh --best --noplugins --setopt=install_weak_deps=0

View File

@ -12,13 +12,14 @@ export interface KubeObjRequest {
infraID: string;
objectType: string;
kubeObjRequest?: KubeGVRRequest;
namespace: string;
};
}
export interface KubeObjResponse {
getKubeObject: {
infraID: string;
kubeObj: Array<KubeObj>;
kubeObj: KubeObj;
};
}
@ -32,13 +33,30 @@ interface KubeObjData {
name: string;
}
export function kubeObjectSubscription({
interface KubeNamespace {
name: string;
}
export interface KubeNamespaceRequest {
request: {
infraID: string;
};
}
export interface KubeNamespaceResponse {
getKubeNamespace: {
infraID: string;
kubeNamespace: Array<KubeNamespace>;
};
}
export const kubeObjectSubscription = ({
request,
...options
}: GqlAPISubscriptionRequest<KubeObjResponse, KubeObjRequest>): GqlAPISubscriptionResponse<
KubeObjResponse,
KubeObjRequest
> {
> => {
const { data, loading, error } = useSubscription<KubeObjResponse, KubeObjRequest>(
gql`
subscription getKubeObject($request: KubeObjectRequest!) {
@ -59,6 +77,7 @@ export function kubeObjectSubscription({
request: {
infraID: request.infraID,
kubeObjRequest: request.kubeObjRequest,
namespace: request.namespace,
objectType: request.objectType
}
},
@ -67,4 +86,35 @@ export function kubeObjectSubscription({
);
return { data, loading, error };
}
};
export const kubeNamespaceSubscription = ({
request,
...options
}: GqlAPISubscriptionRequest<KubeNamespaceResponse, KubeNamespaceRequest>): GqlAPISubscriptionResponse<
KubeNamespaceResponse,
KubeNamespaceRequest
> => {
const { data, loading, error } = useSubscription<KubeNamespaceResponse, KubeNamespaceRequest>(
gql`
subscription getKubeNamespace($request: KubeNamespaceRequest!) {
getKubeNamespace(request: $request) {
infraID
kubeNamespace {
name
}
}
}
`,
{
variables: {
request: {
infraID: request.infraID
}
},
...options
}
);
return { data, loading, error };
};

View File

@ -23,3 +23,9 @@ export interface EnvironmentSortInput {
field: SortType;
ascending: boolean;
}
export interface EnvironmentDetail {
envName: string;
envID: string;
totalInfra?: number | null;
}

View File

@ -10,7 +10,6 @@ import { useStrings } from '@strings';
import ProjectDashboardCardMenuController from '@controllers/ProjectDashboardCardMenu';
import { setUserDetails, toSentenceCase } from '@utils';
import { useAppStore } from '@context';
import { useRouteWithBaseUrl } from '@hooks';
import css from './ProjectDashboardCardContainer.module.scss';
interface ProjectDashboardCardProps {
@ -25,16 +24,16 @@ export default function ProjectDashboardCardContainer(props: ProjectDashboardCar
const [projectIdToDelete, setProjectIdToDelete] = useState<string>();
const { getString } = useStrings();
const history = useHistory();
const { updateAppStore } = useAppStore();
const paths = useRouteWithBaseUrl();
const { updateAppStore, currentUserInfo } = useAppStore();
const handleProjectSelect = (project: Project): void => {
const projectRole = project.members?.find(member => member.userID === currentUserInfo?.ID)?.role;
updateAppStore({ projectID: project.projectID, projectName: project.name });
setUserDetails({
projectRole,
projectID: project.projectID
});
history.push(paths.toRoot());
history.replace(`/`);
};
return (

View File

@ -6,7 +6,7 @@ export const USERNAME_REGEX = /^[a-zA-Z][a-zA-Z0-9_-]{2,15}$/;
// ^(?=.*[a-z]) # At least one lowercase letter
// (?=.*[A-Z]) # At least one uppercase letter
// (?=.*\d) # At least one digit
// (?=.*[@$!%*?_&]) # At least one special character @$!%*?_&
// [A-Za-z\d@$!%*?_&] # Allowed characters: letters, digits, special characters @$!%*?_&
// (?=.*[@$!%*?_&#]) # At least one special character @$!%*?_&#
// [A-Za-z\d@$!%*?_&#] # Allowed characters: letters, digits, special characters @$!%*?_&#
// {8,16}$ # Length between 8 to 16 characters
export const PASSWORD_REGEX = /^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?_&])[A-Za-z\d@$!%*?_&]{8,16}$/;
export const PASSWORD_REGEX = /^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?_&#])[A-Za-z\d@$!%*?_&#]{8,16}$/;

View File

@ -64,8 +64,9 @@ const EnvironmentController: React.FC = () => {
itemCount: totalEnvironments ?? 0,
pageCount: totalEnvironments ? Math.ceil(totalEnvironments / limit) : 1,
pageIndex: page,
pageSizeOptions: [...new Set([15, 30, limit])].sort(),
pageSizeOptions: [...new Set([5, 10, 15, 30, limit])].sort(),
pageSize: limit,
showPagination: true,
onPageSizeChange: event => setLimit(event)
}
};

View File

@ -3,31 +3,55 @@ import React from 'react';
import { listChaosInfra } from '@api/core';
import { getScope } from '@utils';
import ChaosInfrastructureReferenceFieldView from '@views/ChaosInfrastructureReferenceField';
import type { ChaosInfrastructureReferenceFieldProps } from '@models';
import { AllEnv, type ChaosInfrastructureReferenceFieldProps } from '@models';
import type { InfrastructureDetails } from '@views/ChaosInfrastructureReferenceField/ChaosInfrastructureReferenceField';
import { listEnvironment } from '@api/core/environments';
function KubernetesChaosInfrastructureReferenceFieldController({
setFieldValue,
initialInfrastructureID
initialInfrastructureID,
initialEnvironmentID
}: ChaosInfrastructureReferenceFieldProps): React.ReactElement {
const scope = getScope();
const { showError } = useToaster();
const [searchInfrastructure, setSearchInfrastructure] = React.useState<string>('');
const [page, setPage] = React.useState<number>(0);
const limit = 8;
const [limit, setLimit] = React.useState<number>(5);
const [envID, setEnvID] = React.useState<string>(AllEnv.AllEnv);
const [initialAllInfrastructureLength, setInitialAllInfrastructureLength] = React.useState<number>(0);
const { data: listChaosInfraData, loading: listChaosInfraLoading } = listChaosInfra({
...scope,
filter: { name: searchInfrastructure, isActive: true },
environmentIDs: envID === AllEnv.AllEnv ? undefined : [envID],
filter: { name: searchInfrastructure },
pagination: { page, limit },
options: { onError: error => showError(error.message) }
});
const { data: listEnvironmentData } = listEnvironment({
...scope,
options: {
onError: err => showError(err.message)
}
});
const environmentList = listEnvironmentData?.listEnvironments?.environments;
React.useEffect(() => {
if (envID === AllEnv.AllEnv) {
setInitialAllInfrastructureLength(listChaosInfraData?.listInfras.totalNoOfInfras || 0);
}
}, [listChaosInfraData]);
const preSelectedEnvironment = listEnvironmentData?.listEnvironments?.environments?.find(
({ environmentID }) => environmentID === initialEnvironmentID
);
// TODO: replace with get API as this becomes empty during edit
const preSelectedInfrastructure = listChaosInfraData?.listInfras.infras.find(
({ infraID }) => infraID === initialInfrastructureID
);
const preSelectedInfrastructureDetails: InfrastructureDetails | undefined = preSelectedInfrastructure && {
id: preSelectedInfrastructure?.infraID,
name: preSelectedInfrastructure?.name,
@ -38,6 +62,16 @@ function KubernetesChaosInfrastructureReferenceFieldController({
environmentID: preSelectedInfrastructure?.environmentID
};
React.useEffect(() => {
setPage(0);
}, [envID]);
React.useEffect(() => {
if (preSelectedEnvironment) {
setEnvID(preSelectedEnvironment?.environmentID);
}
}, [preSelectedEnvironment, setFieldValue]);
React.useEffect(() => {
if (preSelectedInfrastructure) {
setFieldValue('chaosInfrastructure.id', preSelectedInfrastructure.infraID, true);
@ -69,7 +103,10 @@ function KubernetesChaosInfrastructureReferenceFieldController({
pageSize={limit}
pageCount={Math.ceil(totalNoOfInfras / limit)}
pageIndex={page}
gotoPage={pageNumber => setPage(pageNumber)}
gotoPage={setPage}
showPagination={true}
pageSizeOptions={[5, 10, 15]}
onPageSizeChange={setLimit}
/>
);
};
@ -87,6 +124,10 @@ function KubernetesChaosInfrastructureReferenceFieldController({
}}
searchInfrastructure={searchInfrastructure}
setSearchInfrastructure={setSearchInfrastructure}
allInfrastructureLength={initialAllInfrastructureLength}
environmentList={environmentList}
envID={envID}
setEnvID={setEnvID}
loading={{
listChaosInfra: listChaosInfraLoading
}}

View File

@ -22,7 +22,7 @@ export default function KubernetesChaosInfrastructureUpgradeController({
...scope,
options: {
skip: !isUpgradeAvailable,
onError: err => showError(err)
onError: err => showError(err.message)
}
});

View File

@ -1,5 +1,5 @@
import React from 'react';
import { KubeGVRRequest, kubeObjectSubscription } from '@api/core';
import { KubeGVRRequest, kubeObjectSubscription, kubeNamespaceSubscription } from '@api/core';
import type { ChaosEngine, FaultData } from '@models';
import { TargetApplicationTab } from '@views/ExperimentCreationFaultConfiguration/Tabs';
import type { AppInfoData, TargetApplicationData } from './types';
@ -16,17 +16,26 @@ export default function TargetApplicationTabController({
infrastructureID,
setFaultData
}: TargetApplicationControllerProps): React.ReactElement {
const [appInfoData, setAppInfoData] = React.useState<AppInfoData[]>([]);
const [namespaceData, setNamespaceData] = React.useState<string[]>([]);
const [appInfoData, setAppInfoData] = React.useState<AppInfoData>({ appLabel: [] });
const [targetApp, setTargetApp] = React.useState<TargetApplicationData>({
...engineCR?.spec?.appinfo
});
const [selectedGVR, setSelectedGVR] = React.useState<KubeGVRRequest>();
const { data: result, loading } = kubeObjectSubscription({
const { data: resultNamespace, loading: loadingNamespace } = kubeNamespaceSubscription({
request: {
infraID: infrastructureID ?? ''
},
shouldResubscribe: true,
skip: targetApp?.appkind === undefined || selectedGVR === undefined,
skip: targetApp?.appkind === undefined || selectedGVR === undefined
});
const { data: resultObject, loading: loadingObject } = kubeObjectSubscription({
shouldResubscribe: true,
skip: targetApp?.appns === undefined || targetApp?.appns === '',
request: {
infraID: infrastructureID ?? '',
kubeObjRequest: selectedGVR,
namespace: targetApp?.appns ?? '',
objectType: 'kubeobject'
}
});
@ -37,51 +46,44 @@ export default function TargetApplicationTabController({
if (data.resource === targetApp?.appkind) {
setSelectedGVR({
group: data.group,
version: data.version,
resource: `${data.resource}s`
resource: `${data.resource}s`,
version: data.version
});
}
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [targetApp?.appkind]);
/**
* UseEffect to filter the labels according to the namespace provided
* Required to populate the appLabels dropdown
*/
React.useEffect(() => {
if (result?.getKubeObject) {
const appInfo: AppInfoData[] = [];
const kubeData = result.getKubeObject.kubeObj;
kubeData.forEach(obj => {
const applabels: string[] = [];
obj.data.forEach(objData => {
if (objData.labels) {
applabels.push(...objData.labels.filter(() => obj.namespace === targetApp?.appns));
}
});
/**
* Push these labels corresponding to their namespaces
*/
appInfo.push({
namespace: obj.namespace,
appLabel: applabels
});
});
if (resultNamespace?.getKubeNamespace) {
setNamespaceData(resultNamespace.getKubeNamespace.kubeNamespace.map(data => data.name));
}
}, [resultNamespace?.getKubeNamespace, targetApp?.appkind]);
React.useEffect(() => {
if (resultObject?.getKubeObject) {
const applabels: string[] = [];
resultObject.getKubeObject.kubeObj.data.forEach(objData => {
if (objData.labels) {
applabels.push(...objData.labels);
}
});
const appInfo: AppInfoData = { appLabel: applabels };
setAppInfoData(appInfo);
}
}, [result?.getKubeObject, targetApp?.appns]);
}, [resultObject?.getKubeObject, targetApp?.appns]);
return (
<TargetApplicationTab
appInfoData={appInfoData}
namespaceData={namespaceData}
targetApp={targetApp}
setTargetApp={setTargetApp}
engineCR={engineCR}
setFaultData={setFaultData}
infrastructureID={infrastructureID}
loading={loading}
loadingNamespace={loadingNamespace}
loadingObject={loadingObject}
/>
);
}

View File

@ -4,7 +4,10 @@ export interface TargetApplicationData {
applabel?: string;
}
export interface NamespaceData {
namespace: string[];
}
export interface AppInfoData {
namespace: string;
appLabel: string[];
}

View File

@ -31,6 +31,7 @@ export function getChaosInfrastructureStatus(
export interface ChaosInfrastructureReferenceFieldProps {
setFieldValue: FormikHelpers<ExperimentMetadata>['setFieldValue'];
initialInfrastructureID: string | undefined;
initialEnvironmentID: string | undefined;
}
export enum DeploymentScopeOptions {
@ -64,6 +65,10 @@ export interface InitialValueProps {
tolerationValues?: Array<Toleration>;
}
export enum AllEnv {
AllEnv = 'All'
}
export interface DeploymentScopeItem extends CollapsableSelectOptions {
type: DeploymentScopeOptions;
name: string;

View File

@ -486,6 +486,7 @@ infrastructureRegistered: >-
Environment -> Infrastructure list.
infrastructureStates: Learn more about the states of Infrastructure
infrastructureType: Infrastructure type
infrastructures: Infrastructures
initialDelay: Initial Delay
initialDelaySeconds: Initial Delay Seconds
insecureSkipVerify: Insecure skip verify

View File

@ -409,6 +409,7 @@ export interface StringsMap {
'infrastructureRegistered': unknown
'infrastructureStates': unknown
'infrastructureType': unknown
'infrastructures': unknown
'initialDelay': unknown
'initialDelaySeconds': unknown
'insecureSkipVerify': unknown

View File

@ -2,7 +2,7 @@
padding: var(--spacing-xlarge) !important;
&.dialog {
width: 833px;
width: 912px;
height: 86vh;
max-height: 989px;
}
@ -60,14 +60,13 @@
background: #effbff;
border: 1.5px solid #0278d5;
box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.04), 0px 2px 4px rgba(96, 97, 112, 0.16);
border-radius: 4px;
border-radius: 8px;
}
.notSelected {
background: #fafbfc;
border: 1px solid rgba(40, 41, 61, 0.3);
box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.04), 0px 2px 4px rgba(96, 97, 112, 0.16);
border-radius: 4px;
border-radius: 8px;
cursor: pointer;
}
@ -78,14 +77,13 @@
.agentListInnerContainer {
flex-grow: 1;
overflow: auto;
gap: 1rem;
max-height: calc(100% - 48px);
overflow: auto;
}
.item {
display: grid;
grid-template-columns: 5fr 4fr 25px;
grid-template-columns: 3fr 4fr 25px;
align-items: center;
gap: 0.5rem;
@ -95,7 +93,6 @@
}
.iconCheck {
visibility: hidden;
margin-right: var(--spacing-xsmall);
margin-left: var(--spacing-xsmall);
cursor: pointer;
@ -104,7 +101,15 @@
> svg {
> path {
stroke-width: 1;
stroke: var(--grey-500);
stroke: var(--grey-100);
}
}
}
.iconCheck:hover {
> svg {
> path {
stroke-width: 1;
stroke: var(--green-500);
}
}
}
@ -134,7 +139,7 @@
.gitInfo {
display: grid;
grid-template-columns: 4fr 5fr;
padding: 6px 8px;
padding: 4px 8px;
background: var(--grey-100) !important;
border-radius: 8px !important;
width: 100%;
@ -170,11 +175,6 @@
position: fixed;
}
.gap-4 {
gap: 1rem;
overflow: auto;
}
.paginationContainer {
padding-top: 8px;
overflow: hidden;
@ -190,3 +190,37 @@
}
}
}
.listEnvContainer {
background: var(--primary-1);
box-shadow: 0px 0px 1px rgba(40, 41, 61, 0.04), 0px 2px 4px rgba(96, 97, 112, 0.16);
border-radius: 8px;
cursor: pointer;
}
.itemEnv {
width: 100%;
display: grid;
grid-template-columns: 1fr 25px;
align-items: center;
gap: 0.5rem;
}
.activeEnv {
border: 1px solid var(--primary-7);
}
.center {
display: flex;
flex-direction: column;
justify-content: center;
align-self: center;
img {
width: 200px;
}
}
.rounded {
border-radius: 999px;
}

View File

@ -1,24 +1,28 @@
declare namespace ChaosInfrastructureReferenceFieldModuleScssNamespace {
export interface IChaosInfrastructureReferenceFieldModuleScss {
activeEnv: string;
agentList: string;
agentListInnerContainer: string;
center: string;
container: string;
dialog: string;
editBtn: string;
fixed: string;
gap4: string;
gitBranchIcon: string;
gitInfo: string;
greenStatus: string;
iconCheck: string;
iconChecked: string;
item: string;
itemEnv: string;
leftInfo: string;
listEnvContainer: string;
notSelected: string;
paginationContainer: string;
placeholder: string;
redStatus: string;
referenceSelect: string;
rounded: string;
selected: string;
status: string;
}

View File

@ -8,7 +8,8 @@ import {
ExpandingSearchInput,
Layout,
Text,
useToaster
useToaster,
useToggleOpen
} from '@harnessio/uicore';
import { Icon } from '@harnessio/icons';
import cx from 'classnames';
@ -19,6 +20,7 @@ import FallbackBox from '@images/FallbackBox.svg';
import CustomTagsPopover from '@components/CustomTagsPopover';
import Loader from '@components/Loader';
import { useRouteWithBaseUrl } from '@hooks';
import { Environment, EnvironmentDetail } from '@api/entities';
import css from './ChaosInfrastructureReferenceField.module.scss';
export interface InfrastructureDetails {
@ -34,10 +36,14 @@ export interface InfrastructureDetails {
interface ChaosInfrastructureReferenceFieldViewProps {
infrastructureList: InfrastructureDetails[] | undefined;
allInfrastructureLength: number | null;
environmentList: Environment[] | undefined;
preSelectedInfrastructure?: InfrastructureDetails;
setInfrastructureValue: (infrastructure: InfrastructureDetails | undefined) => void;
searchInfrastructure: string;
setSearchInfrastructure: React.Dispatch<React.SetStateAction<string>>;
setEnvID: (id: string) => void;
envID: string | undefined;
loading: {
listChaosInfra: boolean;
};
@ -46,32 +52,83 @@ interface ChaosInfrastructureReferenceFieldViewProps {
function ChaosInfrastructureReferenceFieldView({
infrastructureList,
environmentList,
allInfrastructureLength,
preSelectedInfrastructure,
setInfrastructureValue,
searchInfrastructure,
setSearchInfrastructure,
envID,
setEnvID,
loading,
pagination
}: ChaosInfrastructureReferenceFieldViewProps): JSX.Element {
const [isOpen, setOpen] = React.useState(false);
const paths = useRouteWithBaseUrl();
const history = useHistory();
const [selectedInfrastructure, setSelectedInfrastructure] = React.useState<InfrastructureDetails | undefined>(
preSelectedInfrastructure
);
// const searchParams = useSearchParams();
// const infrastructureType =
// (searchParams.get('infrastructureType') as InfrastructureType | undefined) ?? InfrastructureType.KUBERNETES;
const { isOpen, open, close } = useToggleOpen();
const { showError } = useToaster();
const { getString } = useStrings();
const listItem = ({ infrastructure }: { infrastructure: InfrastructureDetails }): JSX.Element => {
const EnvListItem = ({ envDetail }: { envDetail: EnvironmentDetail }): JSX.Element => {
return (
<Container
key={envDetail.envID}
flex
padding="small"
className={cx(css.listEnvContainer, { [css.activeEnv]: envID === envDetail.envID })}
onClick={() => {
setEnvID(envDetail.envID);
}}
>
<div className={css.itemEnv}>
<Layout.Horizontal padding={{ left: 'small' }} spacing="medium" className={css.leftInfo}>
<Text lineClamp={1} color={Color.GREY_800} font={{ variation: FontVariation.H6 }}>
{envDetail.envName}
</Text>
</Layout.Horizontal>
<Text
font={{ variation: FontVariation.SMALL }}
color={envID === envDetail.envID ? Color.WHITE : Color.PRIMARY_7}
background={envID === envDetail.envID ? Color.PRIMARY_7 : Color.PRIMARY_BG}
height={24}
width={24}
flex={{ alignItems: 'center', justifyContent: 'center' }}
className={css.rounded}
>
{envDetail.totalInfra ?? 0}
</Text>
</div>
</Container>
);
};
const EnvironmentList = ({ env }: { env: Environment }): JSX.Element => {
return (
<EnvListItem
envDetail={{
envName: env.name,
envID: env.environmentID,
totalInfra: env.infraIDs.length
}}
/>
);
};
const InfrastructureListItem = ({ infrastructure }: { infrastructure: InfrastructureDetails }): JSX.Element => {
const isSelected =
selectedInfrastructure?.id === infrastructure.id || preSelectedInfrastructure?.id === infrastructure.id;
return (
<Container
key={infrastructure.id}
padding="medium"
padding="small"
background={Color.WHITE}
className={selectedInfrastructure?.id === infrastructure.id ? css.selected : css.notSelected}
className={cx({ [css.selected]: isSelected, [css.notSelected]: !isSelected })}
onClick={() => {
infrastructure.isActive
? setSelectedInfrastructure(infrastructure)
@ -85,7 +142,6 @@ function ChaosInfrastructureReferenceFieldView({
size={12}
name="pipeline-approval"
/>
{/* <Icon name={`service-${infrastructureType.toLocaleLowerCase()}` as IconName} size={23} /> */}
<Text lineClamp={1} color={Color.GREY_800} font={{ variation: FontVariation.H6 }}>
{infrastructure.name}
</Text>
@ -124,6 +180,26 @@ function ChaosInfrastructureReferenceFieldView({
);
};
const NoInfraComponent = (): JSX.Element => {
return (
<Layout.Vertical flex={{ justifyContent: 'center' }} spacing="medium" padding={{ top: 'xlarge' }}>
<img src={FallbackBox} alt={getString('latestRun')} />
<Text font={{ variation: FontVariation.BODY1 }} color={Color.GREY_500}>
{searchInfrastructure === '' ? getString('newUserNoInfra.title') : getString('noFilteredActiveInfra')}
</Text>
{searchInfrastructure === '' && (
<Button
variation={ButtonVariation.PRIMARY}
text={getString('enableChaosInfraButton')}
onClick={() => {
history.push(paths.toEnvironments());
}}
/>
)}
</Layout.Vertical>
);
};
return (
<FormGroup label={getString('selectChaosInfrastructureFormLabel')}>
<Button
@ -133,7 +209,7 @@ function ChaosInfrastructureReferenceFieldView({
withoutCurrentColor={true}
rightIcon="chevron-down"
iconProps={{ size: 14 }}
onClick={() => setOpen(true)}
onClick={open}
disabled={loading.listChaosInfra}
>
<span className={css.placeholder}>
@ -148,84 +224,104 @@ function ChaosInfrastructureReferenceFieldView({
canEscapeKeyClose
canOutsideClickClose
onClose={() => {
setOpen(false);
close();
setSearchInfrastructure('');
}}
className={cx(css.referenceSelect, css.dialog)}
title={
<Layout.Horizontal spacing={'small'} flex={{ distribution: 'space-between' }}>
<Layout.Horizontal>
<Text font={{ variation: FontVariation.H3 }}>{getString('selectChaosInfrastructure')}</Text>
</Layout.Horizontal>
}
>
<Layout.Vertical
flex={{ alignItems: 'flex-start', justifyContent: 'flex-start' }}
padding={{ bottom: 'medium' }}
height={'calc(100% - 32px)'}
className={css.gap4}
>
<Container width={'100%'}>
<ExpandingSearchInput
alwaysExpanded
throttle={500}
placeholder={getString('search')}
onChange={e => setSearchInfrastructure(e)}
/>
</Container>
<Loader
loading={loading.listChaosInfra}
noData={{
when: () => !infrastructureList,
messageTitle: getString('noData.title'),
message: getString('noData.message')
}}
>
<Layout.Vertical width="100%" className={css.agentList}>
<Layout.Vertical width="100%" className={css.agentListInnerContainer}>
{infrastructureList && infrastructureList.length > 0 ? (
infrastructureList.map(infrastructure => listItem({ infrastructure: infrastructure }))
) : (
<Layout.Vertical flex={{ justifyContent: 'center' }} spacing="medium" height={'100%'}>
<img src={FallbackBox} alt={getString('latestRun')} />
<Text font={{ variation: FontVariation.BODY1 }} color={Color.GREY_500}>
{searchInfrastructure === ''
? getString('newUserNoInfra.title')
: getString('noFilteredActiveInfra')}
</Text>
{searchInfrastructure === '' && (
<Button
variation={ButtonVariation.PRIMARY}
text={getString('enableChaosInfraButton')}
onClick={() => {
history.push(paths.toEnvironments());
}}
/>
)}
{environmentList && environmentList.length > 0 ? (
<Layout.Vertical height={'100%'}>
<Layout.Horizontal flex padding={{ bottom: 'small' }} border={{ bottom: true }}>
<Container
width={'50%'}
padding={{ left: 'medium', right: 'medium' }}
flex={{ justifyContent: 'space-between' }}
>
<Text color={Color.GREY_800} font={{ variation: FontVariation.H5 }}>
{getString('environments')}
</Text>
<Text color={Color.GREY_800} font={{ variation: FontVariation.H5 }}>
{getString('infrastructures')}
</Text>
</Container>
<ExpandingSearchInput
alwaysExpanded
throttle={500}
placeholder={getString('search')}
onChange={setSearchInfrastructure}
/>
</Layout.Horizontal>
<Layout.Horizontal height={'calc(100% - 80px)'}>
<Layout.Horizontal width={'30%'} padding={{ top: 'small' }} border={{ right: true }}>
<Layout.Vertical
width={'30%'}
padding={{ left: 'medium', right: 'small', top: 'small', bottom: 'medium' }}
className={cx(css.agentList, css.agentListInnerContainer)}
>
<EnvListItem
envDetail={{
envName: getString('all'),
envID: getString('all'),
totalInfra: allInfrastructureLength
}}
/>
{environmentList.map(env => (
<EnvironmentList key={env.environmentID} env={env} />
))}
</Layout.Vertical>
</Layout.Horizontal>
<Layout.Horizontal width={'70%'} padding={{ top: 'small' }}>
<Layout.Vertical width={'70%'} className={css.agentList}>
<Layout.Vertical
height={'80%'}
width={'100%'}
padding={'small'}
className={cx(css.agentList, css.agentListInnerContainer)}
>
<Loader
loading={loading.listChaosInfra}
noData={{
when: () => !infrastructureList,
messageTitle: getString('noData.title'),
message: getString('noData.message')
}}
>
{infrastructureList && infrastructureList.length > 0 ? (
infrastructureList.map(infrastructure => (
<InfrastructureListItem key={infrastructure.id} infrastructure={infrastructure} />
))
) : (
<NoInfraComponent />
)}
</Loader>
</Layout.Vertical>
)}
</Layout.Vertical>
<Container className={css.paginationContainer}>{pagination}</Container>
</Layout.Vertical>
</Loader>
</Layout.Vertical>
<Layout.Horizontal className={cx(css.gap4, css.fixed)}>
<Button
variation={ButtonVariation.PRIMARY}
text={getString('apply')}
onClick={() => {
setOpen(false);
setInfrastructureValue(selectedInfrastructure);
}}
disabled={!selectedInfrastructure}
/>
<Button
variation={ButtonVariation.TERTIARY}
text={getString('cancel')}
onClick={() => {
setOpen(false);
}}
/>
</Layout.Horizontal>
<Layout.Horizontal flex={{ justifyContent: 'center' }}>
{pagination && <Container className={css.paginationContainer}>{pagination}</Container>}
</Layout.Horizontal>
</Layout.Vertical>
</Layout.Horizontal>
</Layout.Horizontal>
<Layout.Horizontal spacing="small" padding={{ right: 'medium' }} flex={{ justifyContent: 'flex-end' }}>
<Button
variation={ButtonVariation.PRIMARY}
text={getString('apply')}
onClick={() => {
close();
setInfrastructureValue(selectedInfrastructure);
}}
disabled={!selectedInfrastructure}
/>
<Button variation={ButtonVariation.TERTIARY} text={getString('cancel')} onClick={close} />
</Layout.Horizontal>
</Layout.Vertical>
) : (
<NoInfraComponent />
)}
</Dialog>
</FormGroup>
);

View File

@ -7,24 +7,28 @@ import { useStrings } from '@strings';
import type { AppInfoData, TargetApplicationData } from '@controllers/TargetApplicationTab/types';
interface TargetApplicationViewProps {
appInfoData: AppInfoData[];
appInfoData: AppInfoData;
namespaceData: string[];
targetApp: TargetApplicationData | undefined;
setTargetApp: React.Dispatch<React.SetStateAction<TargetApplicationData>>;
engineCR: ChaosEngine | undefined;
setFaultData: React.Dispatch<React.SetStateAction<FaultData | undefined>>;
// getKubeObjectLazyQueryFunction: LazyQueryFunction<KubeObjResponse, KubeObjRequest>;
infrastructureID: string | undefined;
loading: boolean;
loadingNamespace: boolean;
loadingObject: boolean;
}
export default function TargetApplicationTab({
appInfoData,
namespaceData,
targetApp,
setTargetApp,
engineCR,
setFaultData,
// getKubeObjectLazyQueryFunction,
loading
loadingNamespace,
loadingObject
}: TargetApplicationViewProps): React.ReactElement {
const { getString } = useStrings();
@ -36,18 +40,19 @@ export default function TargetApplicationTab({
}
function getAppNamespaceItems(): SelectOption[] {
return appInfoData.map(data => ({
label: data.namespace,
value: data.namespace
if (loadingNamespace) return [];
return namespaceData.map(data => ({
label: data,
value: data
}));
}
function getAppLabelItems(): SelectOption[] {
if (loading) return [];
const filteredAppInfo = appInfoData.filter(data => data.namespace === targetApp?.appns)[0];
return filteredAppInfo?.appLabel.map(label => ({
label: label,
value: label
if (loadingObject) return [];
// const filteredAppInfo = appInfoData.filter(data => data.namespace === targetApp?.appns)[0];
return appInfoData?.appLabel.map(data => ({
label: data,
value: data
}));
}
@ -66,8 +71,8 @@ export default function TargetApplicationTab({
onChange={selectedItem => {
setTargetApp({
appkind: selectedItem.label,
appns: '',
applabel: ''
applabel: '',
appns: ''
});
if (engineCR?.spec?.appinfo?.appkind !== undefined) engineCR.spec.appinfo.appkind = selectedItem.label;
setFaultData(faultData => {

View File

@ -34,6 +34,11 @@
align-items: center;
width: 50%;
height: 230px;
img {
max-width: 250px;
height: 131px;
}
}
}

View File

@ -128,6 +128,7 @@ export default function StudioOverviewView({
<Layout.Vertical background={Color.WHITE} padding="medium" spacing="large">
{getChaosInfrastructureReferenceField({
initialInfrastructureID: formikProps.values.chaosInfrastructure.id,
initialEnvironmentID: formikProps.values.chaosInfrastructure.environmentID,
setFieldValue: formikProps.setFieldValue
})}
<ErrorMessage name="chaosInfrastructure.id">

View File

@ -1051,16 +1051,16 @@
"@types/node" "*"
"@types/bonjour@^3.5.9":
version "3.5.10"
resolved "https://registry.yarnpkg.com/@types/bonjour/-/bonjour-3.5.10.tgz#0f6aadfe00ea414edc86f5d106357cda9701e275"
integrity sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==
version "3.5.13"
resolved "https://registry.yarnpkg.com/@types/bonjour/-/bonjour-3.5.13.tgz#adf90ce1a105e81dd1f9c61fdc5afda1bfb92956"
integrity sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==
dependencies:
"@types/node" "*"
"@types/connect-history-api-fallback@^1.3.5":
version "1.3.5"
resolved "https://registry.yarnpkg.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz#d1f7a8a09d0ed5a57aee5ae9c18ab9b803205dae"
integrity sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw==
version "1.5.4"
resolved "https://registry.yarnpkg.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz#7de71645a103056b48ac3ce07b3520b819c1d5b3"
integrity sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==
dependencies:
"@types/express-serve-static-core" "*"
"@types/node" "*"
@ -1324,11 +1324,6 @@
resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.182.tgz#05301a4d5e62963227eaafe0ce04dd77c54ea5c2"
integrity sha512-/THyiqyQAP9AfARo4pF+aCGcyiQ94tX/Is2I7HofNRqoYLgN1PBoOWu2/zTA5zMxzP5EFutMtWtGAFRKUe961Q==
"@types/mime@*":
version "3.0.4"
resolved "https://registry.yarnpkg.com/@types/mime/-/mime-3.0.4.tgz#2198ac274de6017b44d941e00261d5bc6a0e0a45"
integrity sha512-iJt33IQnVRkqeqC7PzBHPTC6fDlRNRW8vjrgqtScAhrmMwe8c4Eo7+fUGTa+XdWrpEgpyKWMYmi2dIwMAYRzPw==
"@types/mime@^1":
version "1.3.2"
resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a"
@ -1435,10 +1430,10 @@
"@types/scheduler" "*"
csstype "^3.0.2"
"@types/retry@^0.12.0":
version "0.12.1"
resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.1.tgz#d8f1c0d0dc23afad6dc16a9e993a0865774b4065"
integrity sha512-xoDlM2S4ortawSWORYqsdU+2rxdh4LRW9ytc3zmT37RIKQh6IHyKwwtKhKis9ah8ol07DCkZxPt8BBvPjC6v4g==
"@types/retry@0.12.0":
version "0.12.0"
resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d"
integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==
"@types/scheduler@*":
version "0.16.2"
@ -1459,9 +1454,9 @@
"@types/node" "*"
"@types/serve-index@^1.9.1":
version "1.9.1"
resolved "https://registry.yarnpkg.com/@types/serve-index/-/serve-index-1.9.1.tgz#1b5e85370a192c01ec6cec4735cf2917337a6278"
integrity sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==
version "1.9.4"
resolved "https://registry.yarnpkg.com/@types/serve-index/-/serve-index-1.9.4.tgz#e6ae13d5053cb06ed36392110b4f9a49ac4ec898"
integrity sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==
dependencies:
"@types/express" "*"
@ -1474,18 +1469,18 @@
"@types/node" "*"
"@types/serve-static@^1.13.10":
version "1.15.5"
resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.5.tgz#15e67500ec40789a1e8c9defc2d32a896f05b033"
integrity sha512-PDRk21MnK70hja/YF8AHfC7yIsiQHn1rcXx7ijCFBX/k+XQJhQT/gw3xekXKJvx+5SXaMMS8oqQy09Mzvz2TuQ==
version "1.15.7"
resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.15.7.tgz#22174bbd74fb97fe303109738e9b5c2f3064f714"
integrity sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==
dependencies:
"@types/http-errors" "*"
"@types/mime" "*"
"@types/node" "*"
"@types/send" "*"
"@types/sockjs@^0.3.33":
version "0.3.33"
resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.33.tgz#570d3a0b99ac995360e3136fd6045113b1bd236f"
integrity sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==
version "0.3.36"
resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.36.tgz#ce322cf07bcc119d4cbf7f88954f3a3bd0f67535"
integrity sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==
dependencies:
"@types/node" "*"
@ -4408,9 +4403,9 @@ fs-monkey@1.0.3:
integrity sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q==
fs-monkey@^1.0.4:
version "1.0.5"
resolved "https://registry.yarnpkg.com/fs-monkey/-/fs-monkey-1.0.5.tgz#fe450175f0db0d7ea758102e1d84096acb925788"
integrity sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==
version "1.0.6"
resolved "https://registry.yarnpkg.com/fs-monkey/-/fs-monkey-1.0.6.tgz#8ead082953e88d992cf3ff844faa907b26756da2"
integrity sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==
fs.realpath@^1.0.0:
version "1.0.0"
@ -4703,9 +4698,9 @@ html-encoding-sniffer@^2.0.1:
whatwg-encoding "^1.0.5"
html-entities@^2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-2.3.2.tgz#760b404685cb1d794e4f4b744332e3b00dcfe488"
integrity sha512-c3Ab/url5ksaT0WyleslpBEthOzWhrjQbg75y7XUsfSzi3Dgzt0l8w5e7DylRn15MTlMMD58dTfzddNS2kcAjQ==
version "2.5.2"
resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-2.5.2.tgz#201a3cf95d3a15be7099521620d19dfb4f65359f"
integrity sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==
html-escaper@^2.0.0:
version "2.0.2"
@ -4947,9 +4942,9 @@ ipaddr.js@1.9.1:
integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
ipaddr.js@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-2.0.1.tgz#eca256a7a877e917aeb368b0a7497ddf42ef81c0"
integrity sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng==
version "2.2.0"
resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-2.2.0.tgz#d33fa7bac284f4de7af949638c9d68157c6b92e8"
integrity sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==
is-arguments@^1.0.4:
version "1.1.1"
@ -6491,9 +6486,9 @@ onetime@^5.1.0, onetime@^5.1.2:
mimic-fn "^2.1.0"
open@^8.0.9:
version "8.4.0"
resolved "https://registry.yarnpkg.com/open/-/open-8.4.0.tgz#345321ae18f8138f82565a910fdc6b39e8c244f8"
integrity sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==
version "8.4.2"
resolved "https://registry.yarnpkg.com/open/-/open-8.4.2.tgz#5b5ffe2a8f793dcd2aad73e550cb87b59cb084f9"
integrity sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==
dependencies:
define-lazy-prop "^2.0.0"
is-docker "^2.1.1"
@ -6589,11 +6584,11 @@ p-map@^4.0.0:
aggregate-error "^3.0.0"
p-retry@^4.5.0:
version "4.6.1"
resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-4.6.1.tgz#8fcddd5cdf7a67a0911a9cf2ef0e5df7f602316c"
integrity sha512-e2xXGNhZOZ0lfgR9kL34iGlU8N/KO0xZnQxVEwdeOvpqNDQfdnxIYizvWtK8RglUa3bGqI8g0R/BdfzLMxRkiA==
version "4.6.2"
resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-4.6.2.tgz#9baae7184057edd4e17231cee04264106e092a16"
integrity sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==
dependencies:
"@types/retry" "^0.12.0"
"@types/retry" "0.12.0"
retry "^0.13.1"
p-try@^1.0.0:
@ -8523,7 +8518,7 @@ webpack-cli@^4.9.0:
rechoir "^0.7.0"
webpack-merge "^5.7.3"
webpack-dev-middleware@^5.3.1:
webpack-dev-middleware@^5.3.4:
version "5.3.4"
resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz#eb7b39281cbce10e104eb2b8bf2b63fce49a3517"
integrity sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==
@ -8535,9 +8530,9 @@ webpack-dev-middleware@^5.3.1:
schema-utils "^4.0.0"
webpack-dev-server@^4.15.1:
version "4.15.1"
resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz#8944b29c12760b3a45bdaa70799b17cb91b03df7"
integrity sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==
version "4.15.2"
resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz#9e0c70a42a012560860adb186986da1248333173"
integrity sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==
dependencies:
"@types/bonjour" "^3.5.9"
"@types/connect-history-api-fallback" "^1.3.5"
@ -8567,7 +8562,7 @@ webpack-dev-server@^4.15.1:
serve-index "^1.9.1"
sockjs "^0.3.24"
spdy "^4.0.2"
webpack-dev-middleware "^5.3.1"
webpack-dev-middleware "^5.3.4"
ws "^8.13.0"
webpack-merge@^5.7.3:
@ -8745,6 +8740,7 @@ ws@^8.13.0:
resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b"
integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==
xml-name-validator@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a"

View File

@ -0,0 +1,414 @@
---
apiVersion: v1
kind: Secret
metadata:
name: litmus-portal-admin-secret
stringData:
DB_USER: "root"
DB_PASSWORD: "1234"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmus-portal-admin-config
data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "3.10.0"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmusportal-frontend-nginx-configuration
data:
nginx.conf: |
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
include /etc/nginx/mime.types;
gzip on;
gzip_disable "msie6";
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
server {
listen 8185 default_server;
root /opt/chaos;
location /health {
return 200;
}
location / {
proxy_http_version 1.1;
add_header Cache-Control "no-cache";
try_files $uri /index.html;
autoindex on;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location /auth/ {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "http://litmusportal-auth-server-service:9003/";
}
location /api/ {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "http://litmusportal-server-service:9002/";
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-frontend
labels:
component: litmusportal-frontend
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-frontend
template:
metadata:
labels:
component: litmusportal-frontend
spec:
automountServiceAccountToken: false
containers:
- name: litmusportal-frontend
image: litmuschaos/litmusportal-frontend:3.10.0
# securityContext:
# runAsUser: 2000
# allowPrivilegeEscalation: false
# runAsNonRoot: true
imagePullPolicy: Always
ports:
- containerPort: 8185
resources:
requests:
memory: "250Mi"
cpu: "125m"
ephemeral-storage: "500Mi"
limits:
memory: "512Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: nginx-config
configMap:
name: litmusportal-frontend-nginx-configuration
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-frontend-service
spec:
type: NodePort
ports:
- name: http
port: 9091
targetPort: 8185
selector:
component: litmusportal-frontend
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-server
labels:
component: litmusportal-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-server
template:
metadata:
labels:
component: litmusportal-server
spec:
automountServiceAccountToken: false
volumes:
- name: gitops-storage
emptyDir: {}
- name: hub-storage
emptyDir: {}
containers:
- name: graphql-server
image: litmuschaos/litmusportal-server:3.10.0
volumeMounts:
- mountPath: /tmp/
name: gitops-storage
- mountPath: /tmp/version
name: hub-storage
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
# if self-signed certificate are used pass the base64 tls certificate, to allow agents to use tls for communication
- name: TLS_CERT_B64
value: ""
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: SUBSCRIBER_IMAGE
value: "litmuschaos/litmusportal-subscriber:3.10.0"
- name: EVENT_TRACKER_IMAGE
value: "litmuschaos/litmusportal-event-tracker:3.10.0"
- name: ARGO_WORKFLOW_CONTROLLER_IMAGE
value: "litmuschaos/workflow-controller:v3.3.1"
- name: ARGO_WORKFLOW_EXECUTOR_IMAGE
value: "litmuschaos/argoexec:v3.3.1"
- name: LITMUS_CHAOS_OPERATOR_IMAGE
value: "litmuschaos/chaos-operator:3.10.0"
- name: LITMUS_CHAOS_RUNNER_IMAGE
value: "litmuschaos/chaos-runner:3.10.0"
- name: LITMUS_CHAOS_EXPORTER_IMAGE
value: "litmuschaos/chaos-exporter:3.10.0"
- name: CONTAINER_RUNTIME_EXECUTOR
value: "k8sapi"
- name: DEFAULT_HUB_BRANCH_NAME
value: "3.10.x"
- name: LITMUS_AUTH_GRPC_ENDPOINT
value: "litmusportal-auth-server-service"
- name: LITMUS_AUTH_GRPC_PORT
value: "3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.10.0"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["3.10.0"]'
- name: ALLOWED_ORIGINS
value: ".*" #eg: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
- name: ENABLE_INTERNAL_TLS
value: "false"
- name: TLS_CERT_PATH
value: ""
- name: TLS_KEY_PATH
value: ""
- name: CA_CERT_TLS_PATH
value: ""
- name: REST_PORT
value: "8080"
- name: GRPC_PORT
value: "8000"
ports:
- containerPort: 8080
- containerPort: 8000
imagePullPolicy: Always
resources:
requests:
memory: "250Mi"
cpu: "225m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-server
namespace: litmus
labels:
component: litmusportal-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-server-service
spec:
type: NodePort
ports:
- name: graphql-server
port: 9002
targetPort: 8080
- name: graphql-rpc-server
port: 8000
targetPort: 8000
selector:
component: litmusportal-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-auth-server
labels:
component: litmusportal-auth-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-auth-server
template:
metadata:
labels:
component: litmusportal-auth-server
spec:
automountServiceAccountToken: false
containers:
- name: auth-server
image: litmuschaos/litmusportal-auth-server:3.10.0
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
- name: STRICT_PASSWORD_POLICY
value: "false"
- name: ADMIN_USERNAME
value: "admin"
- name: ADMIN_PASSWORD
value: "litmus"
- name: LITMUS_GQL_GRPC_ENDPOINT
value: "litmusportal-server-service"
- name: LITMUS_GQL_GRPC_PORT
value: "8000"
- name: ALLOWED_ORIGINS
value: ".*" #eg: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
- name: ENABLE_INTERNAL_TLS
value: "false"
- name: TLS_CERT_PATH
value: ""
- name: TLS_KEY_PATH
value: ""
- name: CA_CERT_TLS_PATH
value: ""
- name: REST_PORT
value: "3000"
- name: GRPC_PORT
value: "3030"
ports:
- containerPort: 3000
- containerPort: 3030
imagePullPolicy: Always
resources:
requests:
memory: "250Mi"
cpu: "125m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-auth-server
namespace: litmus
labels:
component: litmusportal-auth-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-auth-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
- from:
- podSelector:
matchLabels:
component: litmusportal-server
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-auth-server-service
spec:
type: NodePort
ports:
- name: auth-server
port: 9003
targetPort: 3000
- name: auth-rpc-server
port: 3030
targetPort: 3030
selector:
component: litmusportal-auth-server

View File

@ -0,0 +1,447 @@
---
apiVersion: v1
kind: Secret
metadata:
name: litmus-portal-admin-secret
stringData:
DB_USER: "root"
DB_PASSWORD: "1234"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmus-portal-admin-config
data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "3.10.0"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmusportal-frontend-nginx-configuration
data:
nginx.conf: |
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
include /etc/nginx/mime.types;
gzip on;
gzip_disable "msie6";
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
server {
listen 8185 ssl;
ssl_certificate /etc/tls/tls.crt;
ssl_certificate_key /etc/tls/tls.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_client_certificate /etc/tls/ca.crt;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
root /opt/chaos;
location /health {
return 200;
}
location / {
proxy_http_version 1.1;
add_header Cache-Control "no-cache";
try_files $uri /index.html;
autoindex on;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location /auth/ {
proxy_ssl_verify off;
proxy_ssl_session_reuse on;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "https://litmusportal-auth-server-service:9005/";
proxy_ssl_certificate /etc/tls/tls.crt;
proxy_ssl_certificate_key /etc/tls/tls.key;
}
location /api/ {
proxy_ssl_verify off;
proxy_ssl_session_reuse on;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "https://litmusportal-server-service:9004/";
proxy_ssl_certificate /etc/tls/tls.crt;
proxy_ssl_certificate_key /etc/tls/tls.key;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-frontend
labels:
component: litmusportal-frontend
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-frontend
template:
metadata:
labels:
component: litmusportal-frontend
spec:
automountServiceAccountToken: false
containers:
- name: litmusportal-frontend
image: litmuschaos/litmusportal-frontend:3.10.0
# securityContext:
# runAsUser: 2000
# allowPrivilegeEscalation: false
# runAsNonRoot: true
imagePullPolicy: Always
ports:
- containerPort: 8185
resources:
requests:
memory: "250Mi"
cpu: "125m"
ephemeral-storage: "500Mi"
limits:
memory: "512Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- mountPath: /etc/tls
name: tls-secret
volumes:
- name: nginx-config
configMap:
name: litmusportal-frontend-nginx-configuration
- name: tls-secret
secret:
secretName: tls-secret
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-frontend-service
spec:
type: NodePort
ports:
- name: http
port: 9091
targetPort: 8185
selector:
component: litmusportal-frontend
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-server
labels:
component: litmusportal-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-server
template:
metadata:
labels:
component: litmusportal-server
spec:
automountServiceAccountToken: false
volumes:
- name: gitops-storage
emptyDir: {}
- name: hub-storage
emptyDir: {}
- name: tls-secret
secret:
secretName: tls-secret
containers:
- name: graphql-server
image: litmuschaos/litmusportal-server:3.10.0
volumeMounts:
- mountPath: /tmp/
name: gitops-storage
- mountPath: /tmp/version
name: hub-storage
- mountPath: /etc/tls
name: tls-secret
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
# if self-signed certificate are used pass the base64 tls certificate, to allow agents to use tls for communication
- name: TLS_CERT_B64
value: ""
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: SUBSCRIBER_IMAGE
value: "litmuschaos/litmusportal-subscriber:3.10.0"
- name: EVENT_TRACKER_IMAGE
value: "litmuschaos/litmusportal-event-tracker:3.10.0"
- name: ARGO_WORKFLOW_CONTROLLER_IMAGE
value: "litmuschaos/workflow-controller:v3.3.1"
- name: ARGO_WORKFLOW_EXECUTOR_IMAGE
value: "litmuschaos/argoexec:v3.3.1"
- name: LITMUS_CHAOS_OPERATOR_IMAGE
value: "litmuschaos/chaos-operator:3.10.0"
- name: LITMUS_CHAOS_RUNNER_IMAGE
value: "litmuschaos/chaos-runner:3.10.0"
- name: LITMUS_CHAOS_EXPORTER_IMAGE
value: "litmuschaos/chaos-exporter:3.10.0"
- name: CONTAINER_RUNTIME_EXECUTOR
value: "k8sapi"
- name: DEFAULT_HUB_BRANCH_NAME
value: "3.10.x"
- name: LITMUS_AUTH_GRPC_ENDPOINT
value: "litmusportal-auth-server-service"
- name: LITMUS_AUTH_GRPC_PORT
value: "3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.10.0"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["3.10.0"]'
- name: ALLOWED_ORIGINS
value: "^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?"
- name: ENABLE_INTERNAL_TLS
value: "true"
- name: TLS_CERT_PATH
value: "/etc/tls/tls.crt"
- name: TLS_KEY_PATH
value: "/etc/tls/tls.key"
- name: CA_CERT_TLS_PATH
value: "/etc/tls/ca.crt"
- name: REST_PORT
value: "8081"
- name: GRPC_PORT
value: "8001"
ports:
- containerPort: 8081
- containerPort: 8001
imagePullPolicy: Always
resources:
requests:
memory: "250Mi"
cpu: "225m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-server
namespace: litmus
labels:
component: litmusportal-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-server-service
spec:
type: NodePort
ports:
- name: graphql-server-https
port: 9004
targetPort: 8081
- name: graphql-rpc-server-https
port: 8001
targetPort: 8001
selector:
component: litmusportal-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-auth-server
labels:
component: litmusportal-auth-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-auth-server
template:
metadata:
labels:
component: litmusportal-auth-server
spec:
volumes:
- name: tls-secret
secret:
secretName: tls-secret
automountServiceAccountToken: false
containers:
- name: auth-server
volumeMounts:
- mountPath: /etc/tls
name: tls-secret
image: litmuschaos/litmusportal-auth-server:3.10.0
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
- name: STRICT_PASSWORD_POLICY
value: "false"
- name: ADMIN_USERNAME
value: "admin"
- name: ADMIN_PASSWORD
value: "litmus"
- name: LITMUS_GQL_GRPC_ENDPOINT
value: "litmusportal-server-service"
- name: LITMUS_GQL_GRPC_PORT
value: "8000"
- name: ALLOWED_ORIGINS
value: "^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?" #ip needs to added here
- name: ENABLE_INTERNAL_TLS
value: "true"
- name: TLS_CERT_PATH
value: "/etc/tls/tls.crt"
- name: TLS_KEY_PATH
value: "/etc/tls/ctls.key"
- name: CA_CERT_TLS_PATH
value: "/etc/tls/ca.crt"
- name: REST_PORT
value: "3001"
- name: GRPC_PORT
value: "3031"
ports:
- containerPort: 3001
- containerPort: 3031
imagePullPolicy: Always
resources:
requests:
memory: "250Mi"
cpu: "125m"
ephemeral-storage: "500Mi"
limits:
memory: "712Mi"
cpu: "550m"
ephemeral-storage: "1Gi"
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-auth-server
namespace: litmus
labels:
component: litmusportal-auth-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-auth-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
- from:
- podSelector:
matchLabels:
component: litmusportal-server
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-auth-server-service
spec:
type: NodePort
ports:
- name: auth-server-https
port: 9005
targetPort: 3001
- name: auth-rpc-server-https
port: 3031
targetPort: 3031
selector:
component: litmusportal-auth-server

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,420 @@
---
apiVersion: v1
kind: Secret
metadata:
name: litmus-portal-admin-secret
stringData:
DB_USER: "root"
DB_PASSWORD: "1234"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmus-portal-admin-config
data:
DB_SERVER: mongodb://my-release-mongodb-0.my-release-mongodb-headless:27017,my-release-mongodb-1.my-release-mongodb-headless:27017,my-release-mongodb-2.my-release-mongodb-headless:27017/admin
VERSION: "3.10.0"
SKIP_SSL_VERIFY: "false"
# Configurations if you are using dex for OAuth
DEX_ENABLED: "false"
OIDC_ISSUER: "http://<Your Domain>:32000"
DEX_OAUTH_CALLBACK_URL: "http://<litmus-portal frontend exposed URL>:8080/auth/dex/callback"
DEX_OAUTH_CLIENT_ID: "LitmusPortalAuthBackend"
DEX_OAUTH_CLIENT_SECRET: "ZXhhbXBsZS1hcHAtc2VjcmV0"
OAuthJwtSecret: "litmus-oauth@123"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: litmusportal-frontend-nginx-configuration
data:
nginx.conf: |
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
include /etc/nginx/mime.types;
gzip on;
gzip_disable "msie6";
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
server {
listen 8185 ssl;
ssl_certificate /etc/tls/tls.crt;
ssl_certificate_key /etc/tls/tls.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_client_certificate /etc/tls/ca.crt;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
root /opt/chaos;
location /health {
return 200;
}
location / {
proxy_http_version 1.1;
add_header Cache-Control "no-cache";
try_files $uri /index.html;
autoindex on;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
location /auth/ {
proxy_ssl_verify off;
proxy_ssl_session_reuse on;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "https://litmusportal-auth-server-service:9005/";
proxy_ssl_certificate /etc/tls/tls.crt;
proxy_ssl_certificate_key /etc/tls/tls.key;
}
location /api/ {
proxy_ssl_verify off;
proxy_ssl_session_reuse on;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass "https://litmusportal-server-service:9004/";
proxy_ssl_certificate /etc/tls/tls.crt;
proxy_ssl_certificate_key /etc/tls/tls.key;
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-frontend
labels:
component: litmusportal-frontend
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-frontend
template:
metadata:
labels:
component: litmusportal-frontend
spec:
automountServiceAccountToken: false
containers:
- name: litmusportal-frontend
image: litmuschaos/litmusportal-frontend:3.10.0
# securityContext:
# runAsUser: 2000
# allowPrivilegeEscalation: false
# runAsNonRoot: true
imagePullPolicy: Always
ports:
- containerPort: 8185
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- mountPath: /etc/tls
name: tls-secret
volumes:
- name: nginx-config
configMap:
name: litmusportal-frontend-nginx-configuration
- name: tls-secret
secret:
secretName: tls-secret
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-frontend-service
spec:
type: NodePort
ports:
- name: http
port: 9091
targetPort: 8185
selector:
component: litmusportal-frontend
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-server
labels:
component: litmusportal-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-server
template:
metadata:
labels:
component: litmusportal-server
spec:
automountServiceAccountToken: false
volumes:
- name: gitops-storage
emptyDir: {}
- name: hub-storage
emptyDir: {}
- name: tls-secret
secret:
secretName: tls-secret
containers:
- name: graphql-server
image: litmuschaos/litmusportal-server:3.10.0
volumeMounts:
- mountPath: /tmp/
name: gitops-storage
- mountPath: /tmp/version
name: hub-storage
- mountPath: /etc/tls
name: tls-secret
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
# if self-signed certificate are used pass the base64 tls certificate, to allow agents to use tls for communication
- name: TLS_CERT_B64
value: ""
- name: ENABLE_GQL_INTROSPECTION
value: "false"
- name: INFRA_DEPLOYMENTS
value: '["app=chaos-exporter", "name=chaos-operator", "app=workflow-controller", "app=event-tracker"]'
- name: CHAOS_CENTER_UI_ENDPOINT
value: ""
- name: SUBSCRIBER_IMAGE
value: "litmuschaos/litmusportal-subscriber:3.10.0"
- name: EVENT_TRACKER_IMAGE
value: "litmuschaos/litmusportal-event-tracker:3.10.0"
- name: ARGO_WORKFLOW_CONTROLLER_IMAGE
value: "litmuschaos/workflow-controller:v3.3.1"
- name: ARGO_WORKFLOW_EXECUTOR_IMAGE
value: "litmuschaos/argoexec:v3.3.1"
- name: LITMUS_CHAOS_OPERATOR_IMAGE
value: "litmuschaos/chaos-operator:3.10.0"
- name: LITMUS_CHAOS_RUNNER_IMAGE
value: "litmuschaos/chaos-runner:3.10.0"
- name: LITMUS_CHAOS_EXPORTER_IMAGE
value: "litmuschaos/chaos-exporter:3.10.0"
- name: CONTAINER_RUNTIME_EXECUTOR
value: "k8sapi"
- name: DEFAULT_HUB_BRANCH_NAME
value: "3.10.x"
- name: LITMUS_AUTH_GRPC_ENDPOINT
value: "litmusportal-auth-server-service"
- name: LITMUS_AUTH_GRPC_PORT
value: "3030"
- name: WORKFLOW_HELPER_IMAGE_VERSION
value: "3.10.0"
- name: REMOTE_HUB_MAX_SIZE
value: "5000000"
- name: INFRA_COMPATIBLE_VERSIONS
value: '["3.10.0"]'
- name: ALLOWED_ORIGINS
value: ".*" #eg: ^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?
- name: ENABLE_INTERNAL_TLS
value: "true"
- name: TLS_CERT_PATH
value: "/etc/tls/tls.crt"
- name: TLS_KEY_PATH
value: "/etc/tls/tls.key"
- name: CA_CERT_TLS_PATH
value: "/etc/tls/ca.crt"
- name: REST_PORT
value: "8081"
- name: GRPC_PORT
value: "8001"
ports:
- containerPort: 8081
- containerPort: 8001
imagePullPolicy: Always
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-server
namespace: litmus
labels:
component: litmusportal-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-server-service
spec:
type: NodePort
ports:
- name: graphql-server-https
port: 9004
targetPort: 8081
- name: graphql-rpc-server-https
port: 8001
targetPort: 8001
selector:
component: litmusportal-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: litmusportal-auth-server
labels:
component: litmusportal-auth-server
spec:
replicas: 1
selector:
matchLabels:
component: litmusportal-auth-server
template:
metadata:
labels:
component: litmusportal-auth-server
spec:
volumes:
- name: tls-secret
secret:
secretName: tls-secret
automountServiceAccountToken: false
containers:
- name: auth-server
volumeMounts:
- mountPath: /etc/tls
name: tls-secret
image: litmuschaos/litmusportal-auth-server:3.10.0
securityContext:
runAsUser: 2000
allowPrivilegeEscalation: false
runAsNonRoot: true
readOnlyRootFilesystem: true
envFrom:
- configMapRef:
name: litmus-portal-admin-config
- secretRef:
name: litmus-portal-admin-secret
env:
- name: STRICT_PASSWORD_POLICY
value: "false"
- name: ADMIN_USERNAME
value: "admin"
- name: ADMIN_PASSWORD
value: "litmus"
- name: LITMUS_GQL_GRPC_ENDPOINT
value: "litmusportal-server-service"
- name: LITMUS_GQL_GRPC_PORT
value: "8000"
- name: ALLOWED_ORIGINS
value: "^(http://|https://|)litmuschaos.io(:[0-9]+|)?,^(http://|https://|)litmusportal-server-service(:[0-9]+|)?" #ip needs to added here
- name: ENABLE_INTERNAL_TLS
value: "true"
- name: TLS_CERT_PATH
value: "/etc/tls/tls.crt"
- name: TLS_KEY_PATH
value: "/etc/tls/ctls.key"
- name: CA_CERT_TLS_PATH
value: "/etc/tls/ca.crt"
- name: REST_PORT
value: "3001"
- name: GRPC_PORT
value: "3031"
ports:
- containerPort: 3001
- containerPort: 3031
imagePullPolicy: Always
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: litmusportal-auth-server
namespace: litmus
labels:
component: litmusportal-auth-server
spec:
policyTypes:
- Ingress
podSelector:
matchLabels:
component: litmusportal-auth-server
ingress:
- from:
- podSelector:
matchLabels:
component: litmusportal-frontend
- from:
- podSelector:
matchLabels:
component: litmusportal-server
---
apiVersion: v1
kind: Service
metadata:
name: litmusportal-auth-server-service
spec:
type: NodePort
ports:
- name: auth-server-https
port: 9005
targetPort: 3001
- name: auth-rpc-server-https
port: 3031
targetPort: 3031
selector:
component: litmusportal-auth-server

View File

@ -4,7 +4,7 @@
"schemes": ["https", "http"],
"info": {
"title": "Litmus Portal Authentication API",
"version": "2.7.0",
"version": "3.0.0",
"description": "Litmus Portal Authentication APIs are used to authenticate the identity of a user and to perform several user-specific tasks like:\n <li>Update Profile</li>\n <li>Change Password</li>\n <li>Reset Password</li>\n <li>Create new users etc.</li>\n"
},
"paths": {

View File

@ -7,7 +7,7 @@
],
"info": {
"title": "Litmus Portal Authentication API",
"version": "2.7.0",
"version": "3.9.0",
"description": "Litmus Portal Authentication APIs are used to authenticate the identity of a user and to perform several user-specific tasks like:\n <li>Update Profile</li>\n <li>Change Password</li>\n <li>Reset Password</li>\n <li>Create new users etc.</li>\n"
},
"paths": {

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,7 @@ spec:
- name: REGION
value: '<region for instances>'
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
- name: TOTAL_CHAOS_DURATION
value: '60'
@ -67,7 +67,7 @@ spec:
value: '60'
- name: REGION
value: '<region for instances>'
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
```

View File

@ -12,7 +12,7 @@ spec:
spec:
components:
env:
# delay between each iteration of chaos
# delay between each iteration of chaos
- name: CHAOS_INTERVAL
value: '15'
# time duration for the chaos execution
@ -20,6 +20,6 @@ spec:
value: '60'
- name: REGION
value: '<region for instances>'
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'

View File

@ -21,7 +21,7 @@ spec:
- name: REGION
value: '<region for instances>'
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
- name: TOTAL_CHAOS_DURATION
value: '60'

View File

@ -137,9 +137,9 @@ When the MANAGED_NODEGROUP is enable then the experiment will not try to start t
<th> Notes </th>
</tr>
<tr>
<td> INSTANCE_TAG </td>
<td> EC2_INSTANCE_TAG </td>
<td> Instance Tag to filter the target ec2 instance.</td>
<td> The <code>INSTANCE_TAG</code> should be provided as <code>key:value</code> ex: <code>team:devops</code></td>
<td> The <code>EC2_INSTANCE_TAG</code> should be provided as <code>key:value</code> ex: <code>team:devops</code></td>
</tr>
<tr>
<td> REGION </td>
@ -196,7 +196,7 @@ Refer the [common attributes](../common/common-tunables-for-all-experiments.md)
### Target single instance
It will stop a random single ec2 instance with the given `INSTANCE_TAG` tag and the `REGION` region.
It will stop a random single ec2 instance with the given `EC2_INSTANCE_TAG` tag and the `REGION` region.
Use the following example to tune this:
@ -217,7 +217,7 @@ spec:
components:
env:
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
# region for the ec2 instance
- name: REGION
@ -228,7 +228,7 @@ spec:
### Target Percent of instances
It will stop the `INSTANCE_AFFECTED_PERC` percentage of ec2 instances with the given `INSTANCE_TAG` tag and `REGION` region.
It will stop the `INSTANCE_AFFECTED_PERC` percentage of ec2 instances with the given `EC2_INSTANCE_TAG` tag and `REGION` region.
Use the following example to tune this:
@ -252,7 +252,7 @@ spec:
- name: INSTANCE_AFFECTED_PERC
value: '100'
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
# region for the ec2 instance
- name: REGION

View File

@ -16,7 +16,7 @@ spec:
- name: INSTANCE_AFFECTED_PERC
value: '100'
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
# region for the ec2 instance
- name: REGION

View File

@ -13,7 +13,7 @@ spec:
components:
env:
# tag of the ec2 instance
- name: INSTANCE_TAG
- name: EC2_INSTANCE_TAG
value: 'key:value'
# region for the ec2 instance
- name: REGION

View File

@ -28,19 +28,19 @@ hide:
### I encountered the concept of namespace and cluster scope during the installation. What is meant by the scopes, and how does it affect experiments to be performed outside or inside the litmus Namespace?
The scope of control plane (portal) installation can be tuned by the env PORTAL_SCOPE in the litmusportal-server deployment. Its value can be kept as a “namespace” if you want to provide restricted access to litmus. It is useful in strictly multi-tenant environments in which users have namespace-level permissions and need to set up their own chaos-center instances. This is also the case in certain popular SaaS environments like Okteto cloud.
The scope of control plane (portal) installation can be tuned by the env 'PORTAL_SCOPE' in the 'litmusportal-server' deployment. Its value can be kept as a “namespace” if you want to provide restricted access to litmus. It is useful in strictly multi-tenant environments in which users have namespace-level permissions and need to set up their own chaos-center instances. This is also the case in certain popular SaaS environments like Okteto cloud.
This setting can be used in combination with a flag, AGENT_SCOPE in the litmus-portal-admin-config configmap to limit the purview of the corresponding self-agent (the execution plane pods on the cluster/namespace where the control plane is installed) to the current namespace, which means the user can perform chaos experiments only in chose installation namespace. By default, both are set up for cluster-wide access, by which microservices across the cluster can be subjected to chaos.
This setting can be used in combination with a flag, 'AGENT_SCOPE' in the 'litmus-portal-admin-config' ConfigMap to limit the purview of the corresponding self-agent (the execution plane pods on the cluster/namespace where the control plane is installed) to the current namespace, which means the user can perform chaos experiments only in chosen installation namespace. By default, both are set up for cluster-wide access, by which microservices across the cluster can be subjected to chaos.
In case of external-agents, i.e., the targets being connected to the chaos-center, you can choose the agents scope to either cluster or namespace via a litmusctl flag (when using it in non-interactive mode) or by providing the appropriate input (in interactive mode).
In case of external-agents, i.e., the targets being connected to the chaos-center, you can choose the agents scope to either cluster or namespace via a 'litmusctl' flag (when using it in non-interactive mode) or by providing the appropriate input (in interactive mode).
### Does Litmus 2.0 maintain backward compatibility with Kubernetes?
Yes Litmus maintains a separate CRD manifest to support backward compatibility.
Yes, Litmus maintains a separate CRD manifest to support backward compatibility.
### Can I run LitmusChaos Outside of my Kubernetes clusters?
You can run the chaos experiments outside of the k8s cluster(as a container) which is dockerized. But other components such as chaos-operator,chaos-exporter, and runner are Kubernetes native. They require k8s cluster to run on it.
You can run the chaos experiments outside of the k8s cluster as a dockerized container. However, other components such as chaos-operator,chaos-exporter, and runner are Kubernetes native. They require k8s cluster to run on it.
### What is the minimum system requirement to run Portal and agent together?
@ -48,11 +48,11 @@ To run LitmusPortal you need to have a minimum of 1 GiB memory and 1 core of CPU
### Can I use LitmusChaos in Production?
Yes, you can use Litmuschaos in production. Litmus has a wide variety of experiments and is designed as per the principles of chaos. But, if you are new to Chaos Engineering, we would recommend you to first try Litmus on your dev environment, and then after getting the confidence, you should use it in Production.
Yes, you can use Litmuschaos in production. Litmus has a wide variety of experiments and is designed according to the principles of chaos engineering. However, if you are new to Chaos Engineering, we would recommend you to first try Litmus on your dev environment, and then after getting the confidence, you should use it in Production.
### Why should I use Litmus? What is its distinctive feature?
Litmus is a toolset to do cloud-native Chaos Engineering. Litmus provides tools to orchestrate chaos on Kubernetes to help developers and SREs find weaknesses in their application deployments. Litmus can be used to run chaos experiments initially in the staging environment and eventually in production to find bugs, vulnerabilities. Fixing the weaknesses leads to increased resilience of the system. Litmus adopts a “Kubernetes-native” approach to define chaos intent in a declarative manner via custom resources.
Litmus is a toolset for performing cloud-native Chaos Engineering. Litmus provides tools to orchestrate chaos on Kubernetes to help developers and SREs find weaknesses in their application deployments. Litmus can be used to run chaos experiments initially in the staging environment and eventually in production to find bugs and vulnerabilities. Fixing the weaknesses leads to increased resilience of the system. Litmus adopts a “Kubernetes-native” approach to define chaos intent in a declarative manner via custom resources.
### What licensing model does Litmus use?
@ -60,7 +60,7 @@ Litmus is developed under Apache License 2.0 license at the project level. Some
### What are the prerequisites to get started with Litmus?
For getting started with Litmus the only prerequisites is to have Kubernetes 1.11+ cluster. While most pod/container level experiments are supported on any Kubernetes platform, some of the infrastructure chaos experiments are supported on specific platforms. To find the list of supported platforms for an experiment, view the "Platforms" section on the sidebar in the experiment page.
To get started with Litmus, the only prerequisites is to have Kubernetes 1.11+ cluster. While most pod/container level experiments are supported on any Kubernetes platform, some of the infrastructure chaos experiments are supported on specific platforms. To find the list of supported platforms for an experiment, view the "Platforms" section on the sidebar in the experiment page.
### How to Install Litmus on the Kubernetes Cluster?

View File

@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-serviceaccount
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -22,7 +22,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-role
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -59,7 +59,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-rolebinding
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -81,7 +81,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -97,7 +97,7 @@ spec:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -106,13 +106,13 @@ spec:
serviceAccountName: litmus
containers:
- name: chaos-operator
image: litmuschaos.docker.scarf.sh/litmuschaos/chaos-operator:3.9.0
image: litmuschaos.docker.scarf.sh/litmuschaos/chaos-operator:3.10.0
command:
- chaos-operator
imagePullPolicy: Always
env:
- name: CHAOS_RUNNER_IMAGE
value: "litmuschaos.docker.scarf.sh/litmuschaos/chaos-runner:3.9.0"
value: "litmuschaos.docker.scarf.sh/litmuschaos/chaos-runner:3.10.0"
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:

View File

@ -16,7 +16,7 @@ spec:
containers:
- name: chaos-scheduler
# Replace this with the built image name
image: litmuschaos.docker.scarf.sh/litmuschaos/chaos-scheduler:3.9.0
image: litmuschaos.docker.scarf.sh/litmuschaos/chaos-scheduler:3.10.0
command:
- chaos-scheduler
imagePullPolicy: IfNotPresent

View File

@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-serviceaccount
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -22,7 +22,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-role
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -59,7 +59,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-rolebinding
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl

View File

@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-serviceaccount
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -22,7 +22,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-role
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl
@ -62,7 +62,7 @@ metadata:
app.kubernetes.io/name: litmus
# provide unique instance-id if applicable
# app.kubernetes.io/instance: litmus-abcxzy
app.kubernetes.io/version: v3.9.0
app.kubernetes.io/version: v3.10.0
app.kubernetes.io/component: operator-rolebinding
app.kubernetes.io/part-of: litmus
app.kubernetes.io/managed-by: kubectl

File diff suppressed because it is too large Load Diff

View File

@ -70,7 +70,7 @@ Check out the <a href="https://github.com/litmuschaos/community-charts/blob/mast
Litmus Chaos 已隶属CNCF.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## 社区
@ -91,5 +91,5 @@ Litmus Chaos 已隶属CNCF.
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -76,7 +76,7 @@ Litmos está licenciado bajo la Licencia Apache, versión 2.0. Ver el texto comp
Litmus Chaos forma parte de los projectos CNCF.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## Communidad
@ -97,5 +97,5 @@ Recursos de la comunidad:
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -75,7 +75,7 @@ Litmus est concédé sous licence Apache, version 2.0. Voir [LICENCE](./LICENSE)
Litmus Chaos fait partie des projets CNCF.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## Communauté
@ -95,5 +95,5 @@ Ressources communautaires:
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
Paysage CNCF <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
Paysage CNCF <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -98,7 +98,7 @@ Bitte schaue bei den jeweiligen Projekt nach.
Litmus Chaos ist Teil der CNCF Projekte.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## Gemeinschaft
@ -120,5 +120,5 @@ Kommunikationskanäle zum Austausch und für weitere Informationen:
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -74,7 +74,7 @@
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Flitmuschaos%2Flitmus.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Flitmuschaos%2Flitmus?ref=badge_large)
लिटमस कैओस सीएनसीएफ परियोजनाओं का हिस्सा है।
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## समुदाय
@ -95,5 +95,5 @@
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
सीएनसीएफ लैंडस्केप <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
सीएनसीएफ लैंडस्केप <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -73,7 +73,7 @@ Litmus は Apache License, Version 2.0 の下でライセンスされていま
Litmus Chaos はCNCFプロジェクトの一部です。
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## コミュニティ
@ -94,5 +94,5 @@ Litmusコミュニティミーティングは毎月第3水曜日の10:00PM IST/9
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -158,5 +158,5 @@ LitmusChaos는 CNCF 프로젝트의 일부입니다.
</a>
<br>
<a href="https://landscape.cncf.io/?selected=litmus">
CNCF Landscape <img src="https://raw.githubusercontent.com/cncf/landscape/34050e7ca713650f7e2813f53c3b0a697cbb274b/hosted_logos/cncf.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png" alt="CNCF Landscape의 리트머스" height="15">
</a>

View File

@ -72,7 +72,7 @@ Litmus é licenciado através da Apache License, Version 2.0. Veja [LICENSE](../
Litmus Chaos faz parte dos projetos CNCF.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## Comunidade
@ -95,5 +95,5 @@ Recursos da comunidade:
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>

View File

@ -70,7 +70,7 @@ Litmus находится под Apache License, Version 2.0. Полный те
Litmus Chaos является частью проектов CNCF.
[![CNCF](https://github.com/cncf/artwork/blob/master/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
[![CNCF](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png)](https://landscape.cncf.io/selected=litmus)
## Комьюнити
@ -91,5 +91,5 @@ Litmus Chaos является частью проектов CNCF.
</a>
<br>
<a href="https://landscape.cncf.io/selected=litmus">
CNCF Landscape <img src="https://landscape.cncf.io/images/left-logo.svg" alt="Litmus on CNCF Landscape" height="15">
CNCF Landscape <img src="https://landscape.cncf.io/images/cncf-landscape-horizontal-color.svg" alt="Litmus on CNCF Landscape" height="15">
</a>