Compare commits

...

22 Commits

Author SHA1 Message Date
LeiZhou e1e498819f
Getting started: fix some typo (#45)
Signed-off-by: Zhou,Lei <lei.zhou@intel.com>

Signed-off-by: Zhou,Lei <lei.zhou@intel.com>
2022-10-06 13:39:24 +05:30
Abhishek Dubey d403e947c8
[Development][Add] Added support for keystore in elasticsearch (#44)
* Added support for elastic keystore

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added support for elastic keystore

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added example for keystore

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated documentation for v0.4.0

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-09-03 13:29:44 +05:30
Abhishek Dubey c1a2a62465
[Development][Add] Added plugins support for elasticsearch (#42)
* Added plugins support for elasticsearch

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated version of operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added examples for elasticsearch plugin

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed code for plugin installation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Changed plugin installation architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Changed plugin installation architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-09-02 11:12:19 +05:30
Abhishek Dubey 6f1f2c8f20
[Development][Add] Added support for 8.X version as well in elasticsearch (#41)
* Added support for 8.X version as well in elasticsearch

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed logic for sa creation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-08-07 02:16:14 +05:30
Abhishek Dubey 70ec8436c6
Add files via upload 2022-08-07 00:22:35 +05:30
Abhishek Dubey 9ba46d486d
[LICENSE][Add] Added LICENSE for the project (#38) 2022-07-04 21:02:57 +05:30
Abhishek Dubey 56535fd84e
[Documentation][Update] Updated documentation with latest information (#37)
* Added initial documentation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added documentation for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated documentation with latest information

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-07-04 20:51:10 +05:30
Abhishek Dubey 7144b8b463
[Documentation][Add] Added documentation for fluentd (#36)
* Added initial documentation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added documentation for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added documentation for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-06-09 13:41:24 +05:30
Abhishek Dubey 426f61139f
Add files via upload 2022-06-06 19:32:59 +05:30
Abhishek Dubey 2c331c4d30
[Documentation][Add] Added initial documentation (#35)
Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-06-06 19:00:13 +05:30
Abhishek Dubey 06743df10e
Add files via upload 2022-06-02 16:29:33 +05:30
Abhishek Dubey e6907e08a3
Add files via upload 2022-06-02 12:59:31 +05:30
Abhishek Dubey 7155df7d7c
[Documentation][Add] Added badges and readme with informations (#34)
* Added some basic badges inside README.md

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added badge of azure devops and architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added badge of azure devops and architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added badge of azure devops and architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added badge of azure devops and architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added badge of azure devops and architecture

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added block for supported features

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added getting started and prerequisite

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added installation information

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-05-29 01:24:55 +05:30
Abhishek Dubey aaaa1a5eb1 Updated correct branch name 2022-05-29 01:24:05 +05:30
iamabhishek-dubey dd6b5ca173 Added pipeline for CI of operator
Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-05-28 00:48:50 +05:30
iamabhishek-dubey 911e921038 Added pipeline for CI of operator
Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-05-28 00:40:55 +05:30
Abhishek Dubey d9d3dd51a7
Fixed golang ci linter warnings (#32)
Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-05-28 00:25:44 +05:30
Abhishek Dubey 0f34fb9af2
Added code security information 2022-05-27 00:58:04 +05:30
Abhishek Dubey 0ac5d5dc8c
Added security scanning 2022-05-27 00:56:25 +05:30
Abhishek Dubey 8e0fbdc727
[Revamped][Change] Changed the logging operator structure (#20)
* Updated logo design for logging operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated logo design for logging operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Revamped Folder structure

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added revamped code for logging operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added default values for elasticsearch

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added example for basic elasticsearch cluster

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added code for container definition

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added secret creation logic

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added secret creation logic

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated examples

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated manifest for logging operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated manifest for logging operator

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed secret creation logic

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added logic for es master service

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added default values for master

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Modified code for master creation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed master creation logic

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed TLS secret reference

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added example for resources

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added example for affinity and selector

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added examples for tolerations and priority

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added examples for multinode

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Additional printer coloumn is added

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added status fields in elasticsearch

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added logic for updating status for elasticsearcg

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added example for additional config

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added types and roles for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added capability to create serviceaccount

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added support for fluentd clusterroles

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Configured RBAC for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added configmap creation methods

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added codebase for managing daemonset

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added fluentd setup codebase

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added status fields for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added examples for fluentd

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added definition for kibana

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added examples for kibana

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated example for kibana

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added some basic info for installation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated CRD definition for kibana

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Fixed elasticsearch version

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2022-05-27 00:50:28 +05:30
Abhishek Dubey a6026954ca
Fixed extra env support (#16)
Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2021-08-19 12:18:54 +05:30
Abhishek Dubey daf136070e
[Development][Update] Updated Index template and documentation (#14)
* Updated lifecycle CRD

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated CRD for index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated CRD for index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated CRD for index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated Documentation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated Documentation

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added function to generate index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added elastic connection details

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added code for index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added controller for index template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated helm charts for latest code

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated Documentation for latest code

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Added initial updated template

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>

* Updated CRD definition

Signed-off-by: iamabhishek-dubey <abhishekbhardwaj510@gmail.com>
2020-09-10 11:09:25 +05:30
7685 changed files with 433614 additions and 14309 deletions

View File

@ -0,0 +1,28 @@
---
trigger:
- master
pr:
branches:
include:
- master
variables:
- group: RuntimeVariables
resources:
repositories:
- repository: golang-template
type: github
name: opstree/azure-devops-template
endpoint: OT-CONTAINER-KIT
extends:
template: operator-ci.yaml@golang-template
parameters:
ApplicationName: logging-operator
QuayImageName: opstree/logging-operator
GithubImageName: ot-container-kit/logging-operator/logging-operator
BuildDocs: false
AppVersion: "0.3.1"
GolangVersion: "1.17"

View File

@ -1,80 +0,0 @@
---
version: 2
jobs:
code-quality:
docker:
- image: circleci/golang:1.13
working_directory: /go/src/logging-operator
steps:
- checkout
- run: make fmt
- run: make vet
code-build:
docker:
- image: circleci/golang:1.13
working_directory: /go/src/logging-operator
steps:
- checkout
- run: make manager
image-build:
docker:
- image: circleci/golang:1.13
working_directory: /go/src/logging-operator
steps:
- checkout
- setup_remote_docker
- run: make bundle-build
k8s-validation:
machine:
image: circleci/classic:201808-01
environment:
K8S_VERSION: v1.15.7
HELM_VERSION: v3.1.0
KUBECONFIG: /home/circleci/.kube/config
MINIKUBE_VERSION: v1.7.3
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
MINIKUBE_HOME: /home/circleci
CHANGE_MINIKUBE_NONE_USER: true
steps:
- checkout
- run:
name: Setup kubectl
command: |
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
mkdir -p ${HOME}/.kube
touch ${HOME}/.kube/config
- run:
name: Setup minikube
command: |
curl -Lo minikube https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
- run:
name: Start minikube
command: |
sudo -E minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${K8S_VERSION}
- run:
name: Setup helm
command: |
curl -Lo helm-${HELM_VERSION}-linux-amd64.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz \
&& tar -xvzf helm-${HELM_VERSION}-linux-amd64.tar.gz && chmod +x linux-amd64/helm \
&& sudo mv linux-amd64/helm /usr/local/bin/
- run:
name: Setup Namespace
command: |
kubectl create namespace logging-operator
- run:
name: Validating logging operator
command: |
./scripts/k8s-validate.sh
workflows:
version: 2
main:
jobs:
- code-quality
- code-build
- image-build
- k8s-validation

4
.dockerignore Normal file
View File

@ -0,0 +1,4 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
bin/
testbin/

View File

@ -1,15 +0,0 @@
name: ci
on:
push:
branches:
- master
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.x
- run: pip install mkdocs-material
- run: mkdocs gh-deploy --force

72
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,72 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
schedule:
- cron: '19 1 * * 6'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

1
.gitignore vendored
View File

@ -6,6 +6,7 @@
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test

View File

@ -1,18 +1,29 @@
FROM golang:1.13 as builder
WORKDIR /go/src/logging-operator
# Build the manager binary
FROM golang:1.17 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
COPY . /go/src/logging-operator
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
COPY k8sgo/ k8sgo/
COPY elasticgo/ elasticgo/
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /go/src/logging-operator/manager .
USER nonroot:nonroot
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]

208
LICENSE
View File

@ -1,13 +1,201 @@
Copyright 2020 Opstree Solutions.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
http://www.apache.org/licenses/LICENSE-2.0
1. Definitions.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2022] [Opstree Solutions]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

280
Makefile
View File

@ -1,20 +1,55 @@
# Current Operator version
VERSION ?= 0.2.0
# Default bundle image tag
BUNDLE_IMG ?= logging-operator:$(VERSION)
# Options for 'bundle-build'
# VERSION defines the project version for the bundle.
# Update this value when you upgrade the version of your project.
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= v0.4.0
# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
endif
# DEFAULT_CHANNEL defines the default channel used in the bundle.
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
# To re-generate a bundle for any other default channel without changing the default setup, you can:
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
ifneq ($(origin DEFAULT_CHANNEL), undefined)
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# logging.opstreelabs.in/logging-operator-bundle:$VERSION and logging.opstreelabs.in/logging-operator-catalog:$VERSION.
IMAGE_TAG_BASE ?= logging.opstreelabs.in/logging-operator
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
# You can enable this value if you would like to use SHA Based Digests
# To enable set flag to true
USE_IMAGE_DIGESTS ?= false
ifeq ($(USE_IMAGE_DIGESTS), true)
BUNDLE_GEN_FLAGS += --use-image-digests
endif
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
IMG ?= quay.io/opstree/logging-operator:$(VERSION)
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.23
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@ -23,103 +58,176 @@ else
GOBIN=$(shell go env GOBIN)
endif
all: manager
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
# Run tests
test: generate fmt vet manifests
go test ./... -coverprofile cover.out
.PHONY: all
all: build
# Build manager binary
manager: generate fmt vet
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=logging-operator crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: test
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
##@ Build
.PHONY: build
build: fmt
go build -o bin/manager main.go
# Run against the configured Kubernetes cluster in ~/.kube/config
run: generate fmt vet manifests
.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
# Install CRDs into a cluster
install: manifests kustomize
.PHONY: docker-build
docker-build: ## Build docker image with the manager.
docker build -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
docker push ${IMG}
##@ Deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
# Uninstall CRDs from a cluster
uninstall: manifests kustomize
$(KUSTOMIZE) build config/crd | kubectl delete -f -
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
deploy: manifests kustomize
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
# Run go fmt against code
fmt:
go fmt ./...
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
.PHONY: controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0)
# Run go vet against code
vet:
go vet ./...
KUSTOMIZE = $(shell pwd)/bin/kustomize
.PHONY: kustomize
kustomize: ## Download kustomize locally if necessary.
$(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7)
# Generate code
generate: controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
ENVTEST = $(shell pwd)/bin/setup-envtest
.PHONY: envtest
envtest: ## Download envtest-setup locally if necessary.
$(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
# Build the docker image
docker-build: test
docker build . -t ${IMG}
# go-get-tool will 'go get' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-get-tool
@[ -f $(1) ] || { \
set -e ;\
TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef
# Push the docker image
docker-push:
docker push ${IMG}
# find or download controller-gen
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
@{ \
set -e ;\
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
CONTROLLER_GEN=$(GOBIN)/controller-gen
else
CONTROLLER_GEN=$(shell which controller-gen)
endif
kustomize:
ifeq (, $(shell which kustomize))
@{ \
set -e ;\
KUSTOMIZE_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$KUSTOMIZE_GEN_TMP_DIR ;\
go mod init tmp ;\
go get sigs.k8s.io/kustomize/kustomize/v3@v3.5.4 ;\
rm -rf $$KUSTOMIZE_GEN_TMP_DIR ;\
}
KUSTOMIZE=$(GOBIN)/kustomize
else
KUSTOMIZE=$(shell which kustomize)
endif
# Generate bundle manifests and metadata, then validate generated files.
bundle: manifests
.PHONY: bundle
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
operator-sdk generate kustomize manifests -q
kustomize build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle $(BUNDLE_GEN_FLAGS)
operator-sdk bundle validate ./bundle
# Build the bundle image.
bundle-build:
docker build -f Dockerfile -t $(BUNDLE_IMG) .
.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
# Template the helm chart
helm-template:
cd helm-charts && helm template logging-operator ./
.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
# Install using the helm chart
helm-install:
cd helm-charts && helm upgrade logging-operator ./ -f values.yaml --namespace logging-operator --install
.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
ifeq (,$(wildcard $(OPM)))
ifeq (,$(shell which opm 2>/dev/null))
@{ \
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.19.1/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
OPM = $(shell which opm)
endif
endif
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)

67
PROJECT
View File

@ -1,22 +1,55 @@
domain: opstreelabs.in
layout: go.kubebuilder.io/v2
domain: logging.opstreelabs.in
layout:
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: logging-operator
repo: logging-operator
resources:
- group: logging
- api:
crdVersion: v1
namespaced: true
controller: true
domain: logging.opstreelabs.in
group: logging
kind: Elasticsearch
version: v1alpha1
- group: logging
kind: Kibana
version: v1alpha1
- group: logging
path: logging-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: logging.opstreelabs.in
group: logging
kind: Fluentd
version: v1alpha1
- group: logging
kind: IndexLifecycle
version: v1alpha1
- group: logging
path: logging-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: logging.opstreelabs.in
group: logging
kind: Kibana
path: logging-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: logging.opstreelabs.in
group: logging
kind: IndexLifeCycle
path: logging-operator/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: logging.opstreelabs.in
group: logging
kind: IndexTemplate
version: v1alpha1
version: 3-alpha
plugins:
go.operator-sdk.io/v2-alpha: {}
path: logging-operator/api/v1beta1
version: v1beta1
version: "3"

143
README.md
View File

@ -1,95 +1,112 @@
<p align="center">
<img src="./static/logging-operator-logo2.svg" height="220" width="220">
<img src="./static/logging-operator-logo.svg" height="220" width="220">
</p>
<p align="center">
<a href="https://circleci.com/gh/OT-CONTAINER-KIT/logging-operator/tree/master">
<img src="https://circleci.com/gh/OT-CONTAINER-KIT/logging-operator/tree/master.svg?style=shield" alt="CircleCI">
<a href="https://dev.azure.com/opstreedevops/DevOps/_build/latest?definitionId=8&repoName=OT-CONTAINER-KIT%logging-operator&branchName=master">
<img src="https://dev.azure.com/opstreedevops/DevOps/_apis/build/status/logging-operator?repoName=OT-CONTAINER-KIT%logging-operator&branchName=master" alt="Azure Pipelines">
</a>
<a href="https://goreportcard.com/report/github.com/OT-CONTAINER-KIT/logging-operator">
<img src="https://goreportcard.com/badge/github.com/OT-CONTAINER-KIT/logging-operator" alt="Go Report Card">
<img src="https://goreportcard.com/badge/github.com/OT-CONTAINER-KIT/logging-operator" alt="GoReportCard">
</a>
<a href="http://golang.org">
<img src="https://img.shields.io/github/go-mod/go-version/OT-CONTAINER-KIT/logging-operator" alt="GitHub go.mod Go version (subdirectory of monorepo)">
</a>
<a href="https://quay.io/repository/opstree/logging-operator">
<img src="https://img.shields.io/badge/container-ready-green" alt="Docker Repository on Quay">
<img src="https://img.shields.io/badge/container-ready-green" alt="Docker">
</a>
<a href="https://github.com/OT-CONTAINER-KIT/logging-operator/blob/master/LICENSE">
<img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="Apache License">
</a>
<a href="https://codeclimate.com/github/OT-CONTAINER-KIT/logging-operator/maintainability">
<img src="https://api.codeclimate.com/v1/badges/f9e99ffcba997de51eaa/maintainability" alt="Maintainability">
</a>
<a href="https://github.com/OT-CONTAINER-KIT/logging-operator/releases">
<img src="https://img.shields.io/github/v/release/OT-CONTAINER-KIT/logging-operator" alt="GitHub release (latest by date)">
<a href="https://github.com/OT-CONTAINER-KIT/logging-operator/master/LICENSE">
<img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="License">
</a>
</p>
## Logging Operator
Logging Operator is an operator created in Golang to set up and manage EFK(Elasticsearch, Fluentd, and Kibana) cluster inside Kubernetes and Openshift environment. This operator is capable of setting up each individual component of EFK cluster separately.
A golang based CRD operator to setup and manage logging stack (Elasticsearch, Fluentd, and Kibana) in the Kubernetes cluster. It helps to setup each component of the EFK stack separately.
For documentation, please refer to [https://ot-logging-operator.netlify.app/](https://ot-logging-operator.netlify.app/)
> The K8s API name is "logging.opstreelabs.in/v1alpha1"
## Architecture
### Documentation
[Documentation](https://docs.opstreelabs.in/logging-operator)
### Supported Features
The "Logging Operator" includes these features:-
- Elasticsearch different node types, like:-
- **Master Node** => A node that has the master role (default), which makes it eligible to be elected as the master node, which controls the cluster.
- **Data Node** => A node that has the data role (default). Data nodes hold data and perform data related operations such as CRUD, search, and aggregations.
- **Ingestion Node** => A node that has the ingest role (default). Ingest nodes are able to apply an ingest pipeline to a document in order to transform and enrich the document before indexing. With a heavy ingest load, it makes sense to use dedicated ingest nodes and to not include the ingest role from nodes that have the master or data roles.
- **Client or Coordinator Node** => Requests like search requests or bulk-indexing requests may involve data held on different data nodes. A search request, for example, is executed in two phases which are coordinated by the node which receives the client requestthe coordinating node.
- Elasticsearch setup with or without TLS on Transport and HTTP Layer
- Customizable elasticsearch configuration and configurable heap size
- Fluentd as a lightweight log-shipper and JSON field seperation support
- Kibana integration with elasticsearch for logs visualization
- Seamless upgrade for Elasticsearch, Fluentd, and Kibana stack
- Inculcated best practices for Kubernetes setup like `SecurityContext` and `Privilege Control`
- Loosely coupled setup, i.e. Elasticsearch, Fluentd, and Kibana setup can be done individually as well
- Index Lifecycle support to manage rollover and cleanup of indexes
- Index template support for configuring index settings like:- policy, replicas, shards etc.
### Architecture
Architecture of logging operator looks like this:-
<div align="center">
<img src="./static/logging-operator-arch.png">
<img src="./static/logging-operator-arc.png">
</div>
### Purpose
## Purpose
The purpose behind creating this CRD operator was to provide an easy and yet production grade logging setup on Kubernetes. But it doesn't mean this can only be used for logging setup only.
The aim and purpose of creating this Logging Operator to provide an easy and extensible interface to setting up logging stack such as EFK(Elasticsearch, Fluentd, and Kibana). It helps in setting up different nodes of elasticsearch cluster, fluentd as a log shipper and kibana for visualization.
> This operator blocks Elasticsearch, Fluentd, and Kibana are loosely-couples so they can be setup individually as well. For example:- If we need elasticsearch for application database we can setup only elasticsearch as well by using this operator.
## Supported Features
### Prerequisites
- Setup of elasticsearch cluster with different nodes type:- master, data, ingestion, and client.
- Customizable configuration for elasticsearch, fluentd stack.
- Setup of fluentd as light-weight log-shipper as Daemonset.
- Kibana will be set up as visualization tool for elastic stack.
- Seamless upgrades of elasticsearch, fluentd, and kibana.
- Security best practices support for complete stack such as TLS, elastic security.
- Kubernetes resources objects support like:- resources, securityContext, affinity, tolerations, etc.
The "Logging Operator" needs a Kubernetes/Openshift cluster of version `>=1.8.0`. If you have just started using Operatorss, it's highly recommended to use the latest version of Kubernetes.
## Prerequisites
The cluster size selection should be done on the basis of requirements and resources.
Logging Operator requires a Kubernetes cluster of version `>=1.16.0`. If you have just started with the CRD and Operators, it is highly recommended using the latest version of Kubernetes cluster.
### Logging Operator Installation
## Getting Started
For the "Logging Operator" installation, we have categorized the steps in 3 parts:-
If you want to start using the logging-operator in a quickstart mode, you can begin with the [documentation](https://ot-logging-operator.netlify.app/). It will help you and guide you through the setup of Elasticsearch, Fluentd, and Kibana step-by-step.
- Namespace Setup for operator
- CRD setup in Kubernetes cluster
- RBAC setup for an operator to create resources in Kubernetes
- Operator deployment and validation
The configuration for EFK (Elasticsearch, Fluentd, Kibana) set up is defined inside the CRD manifests. But all the examples manifests can be found in the [example](./examples) directory.
The detailed installation steps are present in [Documentation Guide](https://docs.opstreelabs.in/logging-operator)
## Quickstart
### Examples
### Logging Operator Setup
All the examples are present inside the [config/samples/](./config/samples/) directory. These manifests can be applied by the `kubectl` command line. These configurations have some dummy values which can be changed and customized by the individuals as per needs and requirements.
The setup can be done by using helm. The logging-operator can easily get installed using helm commands.
### Contact Information
```shell
# Add the helm chart
$ helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/
...
"ot-helm" has been added to your repositories
```
```shell
# Deploy the Logging Operator
$ helm upgrade logging-operator ot-helm/logging-operator \
--install --namespace ot-operators
...
Release "logging-operator" does not exist. Installing it now.
NAME: logging-operator
LAST DEPLOYED: Sun May 29 01:06:58 2022
NAMESPACE: ot-operators
STATUS: deployed
REVISION: 1
```
After the deployment, verify the installation of operator.
```shell
# Testing Operator
$ helm test logging-operator --namespace ot-operators
...
NAME: logging-operator
LAST DEPLOYED: Sun May 29 01:06:58 2022
NAMESPACE: ot-operators
STATUS: deployed
REVISION: 1
TEST SUITE: logging-operator-test-connection
Last Started: Sun May 29 01:07:56 2022
Last Completed: Sun May 29 01:08:02 2022
Phase: Succeeded
```
## Upcoming Features
- Plugins support for elasticsearch and kibana
- Plugins support for fluentd
- Index template support in operator
- Index lifecycle support in operator
## Contact
This project is managed by [OpsTree Solutions](https://opstree.com/). If you have any queries or suggestions, mail us at [opensource@opstree.com](mailto:opensource@opstree.com).
This project is managed by [OpsTree Solutions](https://opstree.com). If you have any queries or suggestions, mail us at opensource@opstree.com

View File

@ -1,5 +0,0 @@
- Add CHANGELOG.md file to release track
- Add DEVELOPMENT.md for development of operator
- Add helm integration for deployment
- Add documentation site with detailed information
- Publish the operator at Operatorhub

16
SECURITY.md Normal file
View File

@ -0,0 +1,16 @@
# Security Policy
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 0.3.0 | :white_check_mark: |
| 0.2.0 | :x: |
| 0.1.0 | :x: |
## Reporting a Vulnerability
If you find any security vulnerability inside the Project, please open a issue at https://github.com/OT-CONTAINER-KIT/redis-operator/issues

View File

@ -1,112 +0,0 @@
/*
Copyright 2020 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ElasticsearchSpec defines the desired state of Elasticsearch
type ElasticsearchSpec struct {
ClusterName string `json:"clusterName"`
Image string `json:"image"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
Security Security `json:"security,omitempty"`
Plugins []*string `json:"plugins,omitempty"`
Master NodeSpec `json:"master,omitempty"`
Data NodeSpec `json:"data,omitempty"`
Ingestion NodeSpec `json:"ingestion,omitempty"`
Client NodeSpec `json:"client,omitempty"`
}
// ElasticsearchStatus defines the observed state of Elasticsearch
type ElasticsearchStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
ClusterName string `json:"clusterName"`
Master *int32 `json:"master"`
Data *int32 `json:"data"`
Ingestion *int32 `json:"ingestion"`
Client *int32 `json:"client"`
ClusterState string `json:"clusterState"`
}
// Security defines the security of elasticsearch
type Security struct {
TLSEnabled *bool `json:"tlsEnabled,omitempty"`
Password string `json:"password,omitempty"`
}
// NodeSpec define the state of elasticsearch nodes
type NodeSpec struct {
Enabled bool `json:"enabled,omitempty"`
Count *int32 `json:"count,omitempty"`
Resources *Resources `json:"resources,omitempty"`
ExtraEnvVariables *map[string]string `json:"extraEnvVariables,omitempty"`
Storage *Storage `json:"storage,omitempty"`
JVMOptions JVMOptions `json:"jvmOptions,omitempty"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
}
// JVMOptions define the JVM size for elasticsearch nodes
type JVMOptions struct {
Max string `json:"Xmx,omitempty"`
Min string `json:"Xms,omitempty"`
}
// Resources describes requests and limits for the cluster resouces.
type Resources struct {
ResourceRequests ResourceDescription `json:"requests,omitempty"`
ResourceLimits ResourceDescription `json:"limits,omitempty"`
}
// Storage is the inteface to add pvc and pv support in redis
type Storage struct {
VolumeClaimTemplate corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
}
// ResourceDescription describes CPU and memory resources defined for a cluster.
type ResourceDescription struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Elasticsearch is the Schema for the elasticsearches API
type Elasticsearch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ElasticsearchSpec `json:"spec,omitempty"`
Status ElasticsearchStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ElasticsearchList contains a list of Elasticsearch
type ElasticsearchList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Elasticsearch `json:"items"`
}
func init() {
SchemeBuilder.Register(&Elasticsearch{}, &ElasticsearchList{})
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2020 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// IndexLifecycleSpec defines the desired state of IndexLifecycle
type IndexLifecycleSpec struct {
Rollover Rollover `json:"rollover,omitempty"`
Delete Delete `json:"delete,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
Elasticsearch ManagementElasticsearch `json:"elasticsearch,omitempty"`
}
// Rollover is the struct for index roll overing
type Rollover struct {
MaxSize string `json:"maxSize,omitempty"`
MaxAge string `json:"maxAge,omitempty"`
}
// ManagementElasticsearch is the struct for elasticsearch configuration for fluentd
type ManagementElasticsearch struct {
Host *string `json:"host,omitempty"`
Username *string `json:"username,omitempty"`
Password *string `json:"password,omitempty"`
}
// Delete is the struct for index deletion
type Delete struct {
MinAge string `json:"minAge,omitempty"`
}
// IndexLifecycleStatus defines the observed state of IndexLifecycle
type IndexLifecycleStatus struct {
Rollover Rollover `json:"rollover,omitempty"`
Delete Delete `json:"delete,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// IndexLifecycle is the Schema for the indexlifecycles API
type IndexLifecycle struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IndexLifecycleSpec `json:"spec,omitempty"`
Status IndexLifecycleStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// IndexLifecycleList contains a list of IndexLifecycle
type IndexLifecycleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []IndexLifecycle `json:"items"`
}
func init() {
SchemeBuilder.Register(&IndexLifecycle{}, &IndexLifecycleList{})
}

View File

@ -1,76 +0,0 @@
/*
Copyright 2020 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// KibanaSpec defines the desired state of Kibana
type KibanaSpec struct {
Replicas *int32 `json:"replicas"`
Image string `json:"image"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
KibanaElasticsearch KibanaElasticsearch `json:"elasticsearch,omitempty"`
ElasticSecretName *string `json:"elasticSecretName,omitempty"`
Resources *Resources `json:"resources,omitempty"`
Affinity *corev1.Affinity `json:"affinity,omitempty"`
}
// KibanaElasticsearch is the struct for elasticsearch configuration for fluentd
type KibanaElasticsearch struct {
Host string `json:"host,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
TLSEnabled bool `json:"tlsEnabled,omitempty"`
}
// KibanaStatus defines the observed state of Kibana
type KibanaStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
Elasticsearch string `json:"elasticsearch,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// Kibana is the Schema for the kibanas API
type Kibana struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KibanaSpec `json:"spec,omitempty"`
Status KibanaStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// KibanaList contains a list of Kibana
type KibanaList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Kibana `json:"items"`
}
func init() {
SchemeBuilder.Register(&Kibana{}, &KibanaList{})
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
)
// KubernetesConfig will define the Kubernetes specific properties
type KubernetesConfig struct {
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
Affinity *corev1.Affinity `json:"elasticAffinity,omitempty"`
NodeSelector map[string]string `json:"nodeSelectors,omitempty"`
Tolerations *[]corev1.Toleration `json:"tolerations,omitempty"`
PriorityClassName *string `json:"priorityClassName,omitempty"`
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
}
// Storage is the inteface to add pvc and pv support in MongoDB
type Storage struct {
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
StorageClassName *string `json:"storageClass,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
StorageSize string `json:"storageSize,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
}

View File

@ -0,0 +1,100 @@
/*
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// ElasticsearchSpec defines the desired state of Elasticsearch
type ElasticsearchSpec struct {
ClusterName string `json:"esClusterName"`
ESVersion string `json:"esVersion"`
Security *Security `json:"esSecurity,omitempty"`
// +kubebuilder:validation:default:={esMaster:{replicas: 3}}
// +kubebuilder:default:={storage:{accessModes: {ReadWriteOnce}, storageSize: "1Gi"},jvmMaxMemory: "1g", jvmMinMemory: "1g", replicas: 3}
ESMaster *NodeSpecificConfig `json:"esMaster,omitempty"`
ESData *NodeSpecificConfig `json:"esData,omitempty"`
ESIngestion *NodeSpecificConfig `json:"esIngestion,omitempty"`
ESClient *NodeSpecificConfig `json:"esClient,omitempty"`
ESPlugins *[]string `json:"esPlugins,omitempty"`
ESKeystoreSecret *string `json:"esKeystoreSecret,omitempty"`
}
// NodeSpecificConfig defines the properties for elasticsearch nodes
type NodeSpecificConfig struct {
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
Replicas *int32 `json:"replicas,omitempty"`
CustomConfig *string `json:"customConfig,omitempty"`
Storage *Storage `json:"storage,omitempty"`
JvmMaxMemory *string `json:"jvmMaxMemory,omitempty"`
JvmMinMemory *string `json:"jvmMinMemory,omitempty"`
}
// Security defines the security config of Elasticsearch
type Security struct {
ExistingSecret *string `json:"existingSecret,omitempty"`
TLSEnabled *bool `json:"tlsEnabled,omitempty"`
AutoGeneratePassword *bool `json:"autoGeneratePassword,omitempty"`
}
//+kubebuilder:subresource:status
// ElasticsearchStatus defines the observed state of Elasticsearch
type ElasticsearchStatus struct {
ESVersion string `json:"esVersion,omitempty"`
ClusterState string `json:"esClusterState,omitempty"`
ActiveShards *int32 `json:"activeShards,omitempty"`
Indices *int32 `json:"indices,omitempty"`
ESMaster *int32 `json:"esMaster,omitempty"`
ESData *int32 `json:"esData,omitempty"`
ESClient *int32 `json:"esClient,omitempty"`
ESIngestion *int32 `json:"esIngestion,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Version",type=string,priority=0,JSONPath=`.status.esVersion`
// +kubebuilder:printcolumn:name="State",type=string,priority=0,JSONPath=`.status.esClusterState`
// +kubebuilder:printcolumn:name="Shards",type=integer,priority=0,JSONPath=`.status.activeShards`
// +kubebuilder:printcolumn:name="Indices",type=integer,priority=0,JSONPath=`.status.indices`
// +kubebuilder:printcolumn:name="Master",type=integer,priority=1,JSONPath=`.status.esMaster`
// +kubebuilder:printcolumn:name="Data",type=integer,priority=1,JSONPath=`.status.esClient`
// +kubebuilder:printcolumn:name="Client",type=integer,priority=1,JSONPath=`.status.esMaster`
// +kubebuilder:printcolumn:name="Ingestion",type=integer,priority=1,JSONPath=`.status.esIngestion`
// Elasticsearch is the Schema for the elasticsearches API
type Elasticsearch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ElasticsearchSpec `json:"spec,omitempty"`
Status ElasticsearchStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// ElasticsearchList contains a list of Elasticsearch
type ElasticsearchList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Elasticsearch `json:"items"`
}
func init() {
SchemeBuilder.Register(&Elasticsearch{}, &ElasticsearchList{})
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,40 +14,42 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// FluentdSpec defines the desired state of Fluentd
type FluentdSpec struct {
FluentdElasticsearch FluentdElasticsearch `json:"elasticsearch,omitempty"`
Image string `json:"image"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
NodeSelector *map[string]string `json:"nodeSelector,omitempty"`
LogPrefix *string `json:"logPrefix,omitempty"`
CustomConfiguration *map[string]string `json:"customConfiguration,omitempty"`
Resources *Resources `json:"resources,omitempty"`
ElasticConfig ElasticConfig `json:"esCluster"`
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
Security *Security `json:"esSecurity,omitempty"`
// +kubebuilder:default:=namespace_name
// +kubebuilder:validation:Pattern=`namespace_name$|pod_name$`
IndexNameStrategy *string `json:"indexNameStrategy,omitempty"`
CustomConfig *string `json:"customConfig,omitempty"`
AdditionalConfig *string `json:"additionalConfig,omitempty"`
}
// FluentdElasticsearch is the struct for elasticsearch configuration for fluentd
type FluentdElasticsearch struct {
Host string `json:"host,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
TLSEnabled bool `json:"tlsEnabled,omitempty"`
// ElasticConfig is a method for elasticsearch configuration
type ElasticConfig struct {
Host *string `json:"host"`
ClusterName string `json:"clusterName,omitempty"`
ESVersion string `json:"esVersion,omitempty"`
}
// FluentdStatus defines the observed state of Fluentd
type FluentdStatus struct {
Elasticsearch string `json:"elasticsearch,omitempty"`
TotalAgents *int32 `json:"totalAgents,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Elasticsearch Host",type=string,priority=0,JSONPath=`.spec.esCluster.host`
// +kubebuilder:printcolumn:name="Total Agents",type=string,priority=0,JSONPath=`.status.totalAgents`
// Fluentd is the Schema for the fluentds API
type Fluentd struct {
metav1.TypeMeta `json:",inline"`
@ -57,7 +59,7 @@ type Fluentd struct {
Status FluentdStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
//+kubebuilder:object:root=true
// FluentdList contains a list of Fluentd
type FluentdList struct {

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the logging v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=logging.opstreelabs.in
package v1alpha1
// Package v1beta1 contains API Schema definitions for the logging v1beta1 API group
//+kubebuilder:object:generate=true
//+groupName=logging.logging.opstreelabs.in
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@ -26,7 +26,7 @@ import (
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "logging.opstreelabs.in", Version: "v1alpha1"}
GroupVersion = schema.GroupVersion{Group: "logging.logging.opstreelabs.in", Version: "v1beta1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}

View File

@ -0,0 +1,64 @@
/*
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// IndexLifeCycleSpec defines the desired state of IndexLifeCycle
type IndexLifeCycleSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Foo is an example field of IndexLifeCycle. Edit indexlifecycle_types.go to remove/update
Foo string `json:"foo,omitempty"`
}
// IndexLifeCycleStatus defines the observed state of IndexLifeCycle
type IndexLifeCycleStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// IndexLifeCycle is the Schema for the indexlifecycles API
type IndexLifeCycle struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IndexLifeCycleSpec `json:"spec,omitempty"`
Status IndexLifeCycleStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// IndexLifeCycleList contains a list of IndexLifeCycle
type IndexLifeCycleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []IndexLifeCycle `json:"items"`
}
func init() {
SchemeBuilder.Register(&IndexLifeCycle{}, &IndexLifeCycleList{})
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,36 +14,32 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// IndexTemplateSpec defines the desired state of IndexTemplate
type IndexTemplateSpec struct {
Enabled *bool `json:"enabled,omitempty"`
IndexPatterns []string `json:"indexPatterns,omitempty"`
IndexTemplateSettings IndexTemplateSettings `json:"settings,omitempty"`
Elasticsearch ManagementElasticsearch `json:"elasticsearch,omitempty"`
}
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// IndexTemplateSettings defines the desired state for settings of index
type IndexTemplateSettings struct {
Shards int32 `json:"shards,omitempty"`
Replicas int32 `json:"replicas,omitempty"`
IndexLifecycleName string `json:"indexLifeCycleName,omitempty"`
RollOverAlias string `json:"rolloverAlias,omitempty"`
// Foo is an example field of IndexTemplate. Edit indextemplate_types.go to remove/update
Foo string `json:"foo,omitempty"`
}
// IndexTemplateStatus defines the observed state of IndexTemplate
type IndexTemplateStatus struct {
IndexPatterns []string `json:"indexPatterns,omitempty"`
IndexTemplateSettings IndexTemplateSettings `json:"settings,omitempty"`
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// IndexTemplate is the Schema for the indextemplates API
type IndexTemplate struct {
@ -54,7 +50,7 @@ type IndexTemplate struct {
Status IndexTemplateStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
//+kubebuilder:object:root=true
// IndexTemplateList contains a list of IndexTemplate
type IndexTemplateList struct {

View File

@ -0,0 +1,60 @@
/*
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// KibanaSpec defines the desired state of Kibana
type KibanaSpec struct {
// +kubebuilder:default:=1
Replicas *int32 `json:"replicas,omitempty"`
ElasticConfig ElasticConfig `json:"esCluster"`
Security *Security `json:"esSecurity,omitempty"`
KubernetesConfig *KubernetesConfig `json:"kubernetesConfig,omitempty"`
}
// KibanaStatus defines the observed state of Kibana
type KibanaStatus struct {
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Version",type=string,priority=0,JSONPath=`.spec.esCluster.esVersion`
// +kubebuilder:printcolumn:name="Es Cluster",type=string,priority=0,JSONPath=`.spec.esCluster.clusterName`
// Kibana is the Schema for the kibanas API
type Kibana struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KibanaSpec `json:"spec,omitempty"`
Status KibanaStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// KibanaList contains a list of Kibana
type KibanaList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Kibana `json:"items"`
}
func init() {
SchemeBuilder.Register(&Kibana{}, &KibanaList{})
}

View File

@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,35 +19,20 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
package v1beta1
import (
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Delete) DeepCopyInto(out *Delete) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Delete.
func (in *Delete) DeepCopy() *Delete {
if in == nil {
return nil
}
out := new(Delete)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Elasticsearch) DeepCopyInto(out *Elasticsearch) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Elasticsearch.
@ -102,22 +88,31 @@ func (in *ElasticsearchList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) {
*out = *in
in.Security.DeepCopyInto(&out.Security)
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = make([]*string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(string)
**out = **in
}
}
if in.Security != nil {
in, out := &in.Security, &out.Security
*out = new(Security)
(*in).DeepCopyInto(*out)
}
if in.ESMaster != nil {
in, out := &in.ESMaster, &out.ESMaster
*out = new(NodeSpecificConfig)
(*in).DeepCopyInto(*out)
}
if in.ESData != nil {
in, out := &in.ESData, &out.ESData
*out = new(NodeSpecificConfig)
(*in).DeepCopyInto(*out)
}
if in.ESIngestion != nil {
in, out := &in.ESIngestion, &out.ESIngestion
*out = new(NodeSpecificConfig)
(*in).DeepCopyInto(*out)
}
if in.ESClient != nil {
in, out := &in.ESClient, &out.ESClient
*out = new(NodeSpecificConfig)
(*in).DeepCopyInto(*out)
}
in.Master.DeepCopyInto(&out.Master)
in.Data.DeepCopyInto(&out.Data)
in.Ingestion.DeepCopyInto(&out.Ingestion)
in.Client.DeepCopyInto(&out.Client)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec.
@ -133,26 +128,6 @@ func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) {
*out = *in
if in.Master != nil {
in, out := &in.Master, &out.Master
*out = new(int32)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = new(int32)
**out = **in
}
if in.Ingestion != nil {
in, out := &in.Ingestion, &out.Ingestion
*out = new(int32)
**out = **in
}
if in.Client != nil {
in, out := &in.Client, &out.Client
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStatus.
@ -170,7 +145,7 @@ func (in *Fluentd) DeepCopyInto(out *Fluentd) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Spec = in.Spec
out.Status = in.Status
}
@ -192,21 +167,6 @@ func (in *Fluentd) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentdElasticsearch) DeepCopyInto(out *FluentdElasticsearch) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdElasticsearch.
func (in *FluentdElasticsearch) DeepCopy() *FluentdElasticsearch {
if in == nil {
return nil
}
out := new(FluentdElasticsearch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentdList) DeepCopyInto(out *FluentdList) {
*out = *in
@ -242,39 +202,6 @@ func (in *FluentdList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) {
*out = *in
out.FluentdElasticsearch = in.FluentdElasticsearch
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
if in.LogPrefix != nil {
in, out := &in.LogPrefix, &out.LogPrefix
*out = new(string)
**out = **in
}
if in.CustomConfiguration != nil {
in, out := &in.CustomConfiguration, &out.CustomConfiguration
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(Resources)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec.
@ -303,26 +230,26 @@ func (in *FluentdStatus) DeepCopy() *FluentdStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexLifecycle) DeepCopyInto(out *IndexLifecycle) {
func (in *IndexLifeCycle) DeepCopyInto(out *IndexLifeCycle) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Spec = in.Spec
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifecycle.
func (in *IndexLifecycle) DeepCopy() *IndexLifecycle {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifeCycle.
func (in *IndexLifeCycle) DeepCopy() *IndexLifeCycle {
if in == nil {
return nil
}
out := new(IndexLifecycle)
out := new(IndexLifeCycle)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IndexLifecycle) DeepCopyObject() runtime.Object {
func (in *IndexLifeCycle) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -330,31 +257,31 @@ func (in *IndexLifecycle) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexLifecycleList) DeepCopyInto(out *IndexLifecycleList) {
func (in *IndexLifeCycleList) DeepCopyInto(out *IndexLifeCycleList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IndexLifecycle, len(*in))
*out = make([]IndexLifeCycle, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifecycleList.
func (in *IndexLifecycleList) DeepCopy() *IndexLifecycleList {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifeCycleList.
func (in *IndexLifeCycleList) DeepCopy() *IndexLifeCycleList {
if in == nil {
return nil
}
out := new(IndexLifecycleList)
out := new(IndexLifeCycleList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IndexLifecycleList) DeepCopyObject() runtime.Object {
func (in *IndexLifeCycleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
@ -362,41 +289,31 @@ func (in *IndexLifecycleList) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexLifecycleSpec) DeepCopyInto(out *IndexLifecycleSpec) {
func (in *IndexLifeCycleSpec) DeepCopyInto(out *IndexLifeCycleSpec) {
*out = *in
out.Rollover = in.Rollover
out.Delete = in.Delete
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
in.Elasticsearch.DeepCopyInto(&out.Elasticsearch)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifecycleSpec.
func (in *IndexLifecycleSpec) DeepCopy() *IndexLifecycleSpec {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifeCycleSpec.
func (in *IndexLifeCycleSpec) DeepCopy() *IndexLifeCycleSpec {
if in == nil {
return nil
}
out := new(IndexLifecycleSpec)
out := new(IndexLifeCycleSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexLifecycleStatus) DeepCopyInto(out *IndexLifecycleStatus) {
func (in *IndexLifeCycleStatus) DeepCopyInto(out *IndexLifeCycleStatus) {
*out = *in
out.Rollover = in.Rollover
out.Delete = in.Delete
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifecycleStatus.
func (in *IndexLifecycleStatus) DeepCopy() *IndexLifecycleStatus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexLifeCycleStatus.
func (in *IndexLifeCycleStatus) DeepCopy() *IndexLifeCycleStatus {
if in == nil {
return nil
}
out := new(IndexLifecycleStatus)
out := new(IndexLifeCycleStatus)
in.DeepCopyInto(out)
return out
}
@ -406,8 +323,8 @@ func (in *IndexTemplate) DeepCopyInto(out *IndexTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
out.Spec = in.Spec
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplate.
@ -460,36 +377,9 @@ func (in *IndexTemplateList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexTemplateSettings) DeepCopyInto(out *IndexTemplateSettings) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateSettings.
func (in *IndexTemplateSettings) DeepCopy() *IndexTemplateSettings {
if in == nil {
return nil
}
out := new(IndexTemplateSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexTemplateSpec) DeepCopyInto(out *IndexTemplateSpec) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
if in.IndexPatterns != nil {
in, out := &in.IndexPatterns, &out.IndexPatterns
*out = make([]string, len(*in))
copy(*out, *in)
}
out.IndexTemplateSettings = in.IndexTemplateSettings
in.Elasticsearch.DeepCopyInto(&out.Elasticsearch)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateSpec.
@ -505,12 +395,6 @@ func (in *IndexTemplateSpec) DeepCopy() *IndexTemplateSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IndexTemplateStatus) DeepCopyInto(out *IndexTemplateStatus) {
*out = *in
if in.IndexPatterns != nil {
in, out := &in.IndexPatterns, &out.IndexPatterns
*out = make([]string, len(*in))
copy(*out, *in)
}
out.IndexTemplateSettings = in.IndexTemplateSettings
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexTemplateStatus.
@ -523,27 +407,12 @@ func (in *IndexTemplateStatus) DeepCopy() *IndexTemplateStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JVMOptions) DeepCopyInto(out *JVMOptions) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JVMOptions.
func (in *JVMOptions) DeepCopy() *JVMOptions {
if in == nil {
return nil
}
out := new(JVMOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Kibana) DeepCopyInto(out *Kibana) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Spec = in.Spec
out.Status = in.Status
}
@ -565,21 +434,6 @@ func (in *Kibana) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KibanaElasticsearch) DeepCopyInto(out *KibanaElasticsearch) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaElasticsearch.
func (in *KibanaElasticsearch) DeepCopy() *KibanaElasticsearch {
if in == nil {
return nil
}
out := new(KibanaElasticsearch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KibanaList) DeepCopyInto(out *KibanaList) {
*out = *in
@ -615,27 +469,6 @@ func (in *KibanaList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KibanaSpec) DeepCopyInto(out *KibanaSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
out.KibanaElasticsearch = in.KibanaElasticsearch
if in.ElasticSecretName != nil {
in, out := &in.ElasticSecretName, &out.ElasticSecretName
*out = new(string)
**out = **in
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(Resources)
**out = **in
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaSpec.
@ -664,125 +497,92 @@ func (in *KibanaStatus) DeepCopy() *KibanaStatus {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagementElasticsearch) DeepCopyInto(out *ManagementElasticsearch) {
func (in *KubernetesConfig) DeepCopyInto(out *KubernetesConfig) {
*out = *in
if in.Host != nil {
in, out := &in.Host, &out.Host
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(v1.ResourceRequirements)
(*in).DeepCopyInto(*out)
}
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = new([]v1.Toleration)
if **in != nil {
in, out := *in, *out
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
if in.PriorityClassName != nil {
in, out := &in.PriorityClassName, &out.PriorityClassName
*out = new(string)
**out = **in
}
if in.Username != nil {
in, out := &in.Username, &out.Username
*out = new(string)
**out = **in
}
if in.Password != nil {
in, out := &in.Password, &out.Password
*out = new(string)
**out = **in
if in.SecurityContext != nil {
in, out := &in.SecurityContext, &out.SecurityContext
*out = new(v1.PodSecurityContext)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementElasticsearch.
func (in *ManagementElasticsearch) DeepCopy() *ManagementElasticsearch {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesConfig.
func (in *KubernetesConfig) DeepCopy() *KubernetesConfig {
if in == nil {
return nil
}
out := new(ManagementElasticsearch)
out := new(KubernetesConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
func (in *NodeSpecificConfig) DeepCopyInto(out *NodeSpecificConfig) {
*out = *in
if in.Count != nil {
in, out := &in.Count, &out.Count
if in.KubernetesConfig != nil {
in, out := &in.KubernetesConfig, &out.KubernetesConfig
*out = new(KubernetesConfig)
(*in).DeepCopyInto(*out)
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(Resources)
if in.CustomConfig != nil {
in, out := &in.CustomConfig, &out.CustomConfig
*out = new(string)
**out = **in
}
if in.ExtraEnvVariables != nil {
in, out := &in.ExtraEnvVariables, &out.ExtraEnvVariables
*out = new(map[string]string)
if **in != nil {
in, out := *in, *out
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
if in.Storage != nil {
in, out := &in.Storage, &out.Storage
*out = new(Storage)
(*in).DeepCopyInto(*out)
}
out.JVMOptions = in.JVMOptions
if in.Affinity != nil {
in, out := &in.Affinity, &out.Affinity
*out = new(v1.Affinity)
(*in).DeepCopyInto(*out)
if in.JvmMaxMemory != nil {
in, out := &in.JvmMaxMemory, &out.JvmMaxMemory
*out = new(string)
**out = **in
}
if in.JvmMinMemory != nil {
in, out := &in.JvmMinMemory, &out.JvmMinMemory
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
func (in *NodeSpec) DeepCopy() *NodeSpec {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpecificConfig.
func (in *NodeSpecificConfig) DeepCopy() *NodeSpecificConfig {
if in == nil {
return nil
}
out := new(NodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDescription.
func (in *ResourceDescription) DeepCopy() *ResourceDescription {
if in == nil {
return nil
}
out := new(ResourceDescription)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Resources) DeepCopyInto(out *Resources) {
*out = *in
out.ResourceRequests = in.ResourceRequests
out.ResourceLimits = in.ResourceLimits
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources.
func (in *Resources) DeepCopy() *Resources {
if in == nil {
return nil
}
out := new(Resources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rollover) DeepCopyInto(out *Rollover) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rollover.
func (in *Rollover) DeepCopy() *Rollover {
if in == nil {
return nil
}
out := new(Rollover)
out := new(NodeSpecificConfig)
in.DeepCopyInto(out)
return out
}
@ -790,11 +590,21 @@ func (in *Rollover) DeepCopy() *Rollover {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Security) DeepCopyInto(out *Security) {
*out = *in
if in.ExistingSecret != nil {
in, out := &in.ExistingSecret, &out.ExistingSecret
*out = new(string)
**out = **in
}
if in.TLSEnabled != nil {
in, out := &in.TLSEnabled, &out.TLSEnabled
*out = new(bool)
**out = **in
}
if in.AutoGeneratePassword != nil {
in, out := &in.AutoGeneratePassword, &out.AutoGeneratePassword
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Security.
@ -810,7 +620,16 @@ func (in *Security) DeepCopy() *Security {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Storage) DeepCopyInto(out *Storage) {
*out = *in
in.VolumeClaimTemplate.DeepCopyInto(&out.VolumeClaimTemplate)
if in.AccessModes != nil {
in, out := &in.AccessModes, &out.AccessModes
*out = make([]v1.PersistentVolumeAccessMode, len(*in))
copy(*out, *in)
}
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
creationTimestamp: null
name: indexlifecycles.logging.logging.opstreelabs.in
spec:
group: logging.logging.opstreelabs.in
names:
kind: IndexLifeCycle
listKind: IndexLifeCycleList
plural: indexlifecycles
singular: indexlifecycle
scope: Namespaced
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: IndexLifeCycle is the Schema for the indexlifecycles API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IndexLifeCycleSpec defines the desired state of IndexLifeCycle
properties:
foo:
description: Foo is an example field of IndexLifeCycle. Edit indexlifecycle_types.go
to remove/update
type: string
type: object
status:
description: IndexLifeCycleStatus defines the observed state of IndexLifeCycle
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,56 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
creationTimestamp: null
name: indextemplates.logging.logging.opstreelabs.in
spec:
group: logging.logging.opstreelabs.in
names:
kind: IndexTemplate
listKind: IndexTemplateList
plural: indextemplates
singular: indextemplate
scope: Namespaced
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: IndexTemplate is the Schema for the indextemplates API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IndexTemplateSpec defines the desired state of IndexTemplate
properties:
foo:
description: Foo is an example field of IndexTemplate. Edit indextemplate_types.go
to remove/update
type: string
type: object
status:
description: IndexTemplateStatus defines the observed state of IndexTemplate
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,124 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: fluentds.logging.opstreelabs.in
spec:
additionalPrinterColumns:
- JSONPath: .spec.elasticsearch.host
name: Elasticsearch
type: string
- JSONPath: .spec.elasticsearch.tlsEnabled
name: TLS Enabled
type: boolean
group: logging.opstreelabs.in
names:
kind: Fluentd
listKind: FluentdList
plural: fluentds
singular: fluentd
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: Fluentd is the Schema for the fluentds API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: FluentdSpec defines the desired state of Fluentd
properties:
customConfiguration:
additionalProperties:
type: string
type: object
elasticsearch:
description: FluentdElasticsearch is the struct for elasticsearch configuration
for fluentd
properties:
host:
type: string
password:
type: string
tlsEnabled:
type: boolean
username:
type: string
type: object
image:
type: string
imagePullPolicy:
description: PullPolicy describes a policy for if/when to pull a container
image
type: string
logPrefix:
type: string
nodeSelector:
additionalProperties:
type: string
type: object
resources:
description: Resources describes requests and limits for the cluster
resouces.
properties:
limits:
description: ResourceDescription describes CPU and memory resources
defined for a cluster.
properties:
cpu:
type: string
memory:
type: string
required:
- cpu
- memory
type: object
requests:
description: ResourceDescription describes CPU and memory resources
defined for a cluster.
properties:
cpu:
type: string
memory:
type: string
required:
- cpu
- memory
type: object
type: object
required:
- image
type: object
status:
description: FluentdStatus defines the observed state of Fluentd
properties:
elasticsearch:
type: string
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,105 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: indexlifecycles.logging.opstreelabs.in
spec:
group: logging.opstreelabs.in
names:
kind: IndexLifecycle
listKind: IndexLifecycleList
plural: indexlifecycles
singular: indexlifecycle
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: IndexLifecycle is the Schema for the indexlifecycles API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IndexLifecycleSpec defines the desired state of IndexLifecycle
properties:
delete:
description: Delete is the struct for index deletion
properties:
minAge:
type: string
type: object
elasticsearch:
description: ManagementElasticsearch is the struct for elasticsearch
configuration for fluentd
properties:
host:
type: string
password:
type: string
username:
type: string
type: object
enabled:
type: boolean
rollover:
description: Rollover is the struct for index roll overing
properties:
maxAge:
type: string
maxSize:
type: string
type: object
type: object
status:
description: IndexLifecycleStatus defines the observed state of IndexLifecycle
properties:
delete:
description: Delete is the struct for index deletion
properties:
minAge:
type: string
type: object
rollover:
description: Rollover is the struct for index roll overing
properties:
maxAge:
type: string
maxSize:
type: string
type: object
type: object
type: object
additionalPrinterColumns:
- jsonPath: .spec.rollover.maxSize
name: Rollover Size
type: string
- jsonPath: .spec.rollover.maxAge
name: Rollover Age
type: string
- jsonPath: .spec.delete.minAge
name: Deletion Age
type: string
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,117 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: indextemplates.logging.opstreelabs.in
spec:
group: logging.opstreelabs.in
names:
kind: IndexTemplate
listKind: IndexTemplateList
plural: indextemplates
singular: indextemplate
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: IndexTemplate is the Schema for the indextemplates API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IndexTemplateSpec defines the desired state of IndexTemplate
properties:
elasticsearch:
description: ManagementElasticsearch is the struct for elasticsearch
configuration for fluentd
properties:
host:
type: string
password:
type: string
username:
type: string
type: object
enabled:
type: boolean
indexPatterns:
items:
type: string
type: array
settings:
description: IndexTemplateSettings defines the desired state for settings
of index
properties:
indexLifeCycleName:
type: string
replicas:
format: int32
type: integer
rolloverAlias:
type: string
shards:
format: int32
type: integer
type: object
type: object
status:
description: IndexTemplateStatus defines the observed state of IndexTemplate
properties:
indexPatterns:
items:
type: string
type: array
settings:
description: IndexTemplateSettings defines the desired state for settings
of index
properties:
indexLifeCycleName:
type: string
replicas:
format: int32
type: integer
rolloverAlias:
type: string
shards:
format: int32
type: integer
type: object
type: object
type: object
additionalPrinterColumns:
- jsonPath: .spec.settings.shards
name: Shards
type: integer
format: int32
- jsonPath: .spec.settings.replicas
name: Replicas
type: integer
format: int32
- jsonPath: .spec.settings.indexLifeCycleName
name: Lifecycle Policy
type: string
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,695 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.3.0
creationTimestamp: null
name: kibanas.logging.opstreelabs.in
spec:
additionalPrinterColumns:
- JSONPath: .spec.elasticsearch.host
name: Elasticsearch
type: string
- JSONPath: .spec.elasticsearch.tlsEnabled
name: TLS Enabled
type: boolean
group: logging.opstreelabs.in
names:
kind: Kibana
listKind: KibanaList
plural: kibanas
singular: kibana
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: Kibana is the Schema for the kibanas API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: KibanaSpec defines the desired state of Kibana
properties:
affinity:
description: Affinity is a group of affinity scheduling rules.
properties:
nodeAffinity:
description: Describes node affinity scheduling rules for the pod.
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes
that satisfy the affinity expressions specified by this field,
but it may choose a node that violates one or more of the
expressions. The node that is most preferred is the one with
the greatest sum of weights, i.e. for each node that meets
all of the scheduling requirements (resource request, requiredDuringScheduling
affinity expressions, etc.), compute a sum by iterating through
the elements of this field and adding "weight" to the sum
if the node matches the corresponding matchExpressions; the
node(s) with the highest sum are the most preferred.
items:
description: An empty preferred scheduling term matches all
objects with implicit weight 0 (i.e. it's a no-op). A null
preferred scheduling term matches no objects (i.e. is also
a no-op).
properties:
preference:
description: A node selector term, associated with the
corresponding weight.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the
operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be
empty. If the operator is Gt or Lt, the values
array must have a single element, which will
be interpreted as an integer. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the
operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be
empty. If the operator is Gt or Lt, the values
array must have a single element, which will
be interpreted as an integer. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
weight:
description: Weight associated with matching the corresponding
nodeSelectorTerm, in the range 1-100.
format: int32
type: integer
required:
- preference
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to an update), the system may or may not try to
eventually evict the pod from its node.
properties:
nodeSelectorTerms:
description: Required. A list of node selector terms. The
terms are ORed.
items:
description: A null or empty node selector term matches
no objects. The requirements of them are ANDed. The
TopologySelectorTerm type implements a subset of the
NodeSelectorTerm.
properties:
matchExpressions:
description: A list of node selector requirements
by node's labels.
items:
description: A node selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the
operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be
empty. If the operator is Gt or Lt, the values
array must have a single element, which will
be interpreted as an integer. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchFields:
description: A list of node selector requirements
by node's fields.
items:
description: A node selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists, DoesNotExist. Gt, and Lt.
type: string
values:
description: An array of string values. If the
operator is In or NotIn, the values array
must be non-empty. If the operator is Exists
or DoesNotExist, the values array must be
empty. If the operator is Gt or Lt, the values
array must have a single element, which will
be interpreted as an integer. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
type: array
required:
- nodeSelectorTerms
type: object
type: object
podAffinity:
description: Describes pod affinity scheduling rules (e.g. co-locate
this pod in the same node, zone, etc. as some other pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes
that satisfy the affinity expressions specified by this field,
but it may choose a node that violates one or more of the
expressions. The node that is most preferred is the one with
the greatest sum of weights, i.e. for each node that meets
all of the scheduling requirements (resource request, requiredDuringScheduling
affinity expressions, etc.), compute a sum by iterating through
the elements of this field and adding "weight" to the sum
if the node has pods which matches the corresponding podAffinityTerm;
the node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement is
a selector that contains values, a key, and
an operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If
the operator is Exists or DoesNotExist,
the values array must be empty. This array
is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
namespaces:
description: namespaces specifies which namespaces
the labelSelector applies to (matches against);
null or empty list means "this pod's namespace"
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey matches
that of any node on which any of the selected pods
is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the affinity requirements specified by this
field are not met at scheduling time, the pod will not be
scheduled onto the node. If the affinity requirements specified
by this field cease to be met at some point during pod execution
(e.g. due to a pod label update), the system may or may not
try to eventually evict the pod from its node. When there
are multiple elements, the lists of nodes corresponding to
each podAffinityTerm are intersected, i.e. all terms must
be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s)) that
this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key <topologyKey>
matches that of any node on which a pod of the set of pods
is running
properties:
labelSelector:
description: A label query over a set of resources, in
this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values
array must be non-empty. If the operator is
Exists or DoesNotExist, the values array must
be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
namespaces:
description: namespaces specifies which namespaces the
labelSelector applies to (matches against); null or
empty list means "this pod's namespace"
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of any
node on which any of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
podAntiAffinity:
description: Describes pod anti-affinity scheduling rules (e.g.
avoid putting this pod in the same node, zone, etc. as some other
pod(s)).
properties:
preferredDuringSchedulingIgnoredDuringExecution:
description: The scheduler will prefer to schedule pods to nodes
that satisfy the anti-affinity expressions specified by this
field, but it may choose a node that violates one or more
of the expressions. The node that is most preferred is the
one with the greatest sum of weights, i.e. for each node that
meets all of the scheduling requirements (resource request,
requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field
and adding "weight" to the sum if the node has pods which
matches the corresponding podAffinityTerm; the node(s) with
the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm
fields are added per-node to find the most preferred node(s)
properties:
podAffinityTerm:
description: Required. A pod affinity term, associated
with the corresponding weight.
properties:
labelSelector:
description: A label query over a set of resources,
in this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are
ANDed.
items:
description: A label selector requirement is
a selector that contains values, a key, and
an operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's
relationship to a set of values. Valid
operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If
the operator is Exists or DoesNotExist,
the values array must be empty. This array
is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value".
The requirements are ANDed.
type: object
type: object
namespaces:
description: namespaces specifies which namespaces
the labelSelector applies to (matches against);
null or empty list means "this pod's namespace"
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods
matching the labelSelector in the specified namespaces,
where co-located is defined as running on a node
whose value of the label with key topologyKey matches
that of any node on which any of the selected pods
is running. Empty topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
weight:
description: weight associated with matching the corresponding
podAffinityTerm, in the range 1-100.
format: int32
type: integer
required:
- podAffinityTerm
- weight
type: object
type: array
requiredDuringSchedulingIgnoredDuringExecution:
description: If the anti-affinity requirements specified by
this field are not met at scheduling time, the pod will not
be scheduled onto the node. If the anti-affinity requirements
specified by this field cease to be met at some point during
pod execution (e.g. due to a pod label update), the system
may or may not try to eventually evict the pod from its node.
When there are multiple elements, the lists of nodes corresponding
to each podAffinityTerm are intersected, i.e. all terms must
be satisfied.
items:
description: Defines a set of pods (namely those matching
the labelSelector relative to the given namespace(s)) that
this pod should be co-located (affinity) or not co-located
(anti-affinity) with, where co-located is defined as running
on a node whose value of the label with key <topologyKey>
matches that of any node on which a pod of the set of pods
is running
properties:
labelSelector:
description: A label query over a set of resources, in
this case pods.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In,
NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values
array must be non-empty. If the operator is
Exists or DoesNotExist, the values array must
be empty. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field
is "key", the operator is "In", and the values array
contains only "value". The requirements are ANDed.
type: object
type: object
namespaces:
description: namespaces specifies which namespaces the
labelSelector applies to (matches against); null or
empty list means "this pod's namespace"
items:
type: string
type: array
topologyKey:
description: This pod should be co-located (affinity)
or not co-located (anti-affinity) with the pods matching
the labelSelector in the specified namespaces, where
co-located is defined as running on a node whose value
of the label with key topologyKey matches that of any
node on which any of the selected pods is running. Empty
topologyKey is not allowed.
type: string
required:
- topologyKey
type: object
type: array
type: object
type: object
elasticSecretName:
type: string
elasticsearch:
description: KibanaElasticsearch is the struct for elasticsearch configuration
for fluentd
properties:
host:
type: string
password:
type: string
tlsEnabled:
type: boolean
username:
type: string
type: object
image:
type: string
imagePullPolicy:
description: PullPolicy describes a policy for if/when to pull a container
image
type: string
replicas:
format: int32
type: integer
resources:
description: Resources describes requests and limits for the cluster
resouces.
properties:
limits:
description: ResourceDescription describes CPU and memory resources
defined for a cluster.
properties:
cpu:
type: string
memory:
type: string
required:
- cpu
- memory
type: object
requests:
description: ResourceDescription describes CPU and memory resources
defined for a cluster.
properties:
cpu:
type: string
memory:
type: string
required:
- cpu
- memory
type: object
type: object
required:
- image
- replicas
type: object
status:
description: KibanaStatus defines the observed state of Kibana
properties:
elasticsearch:
description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
of cluster Important: Run "make" to regenerate code after modifying
this file'
type: string
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -2,31 +2,31 @@
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/logging.opstreelabs.in_elasticsearches.yaml
- bases/logging.opstreelabs.in_kibanas.yaml
- bases/logging.opstreelabs.in_fluentds.yaml
- bases/logging.opstreelabs.in_indexlifecycles.yaml
- bases/logging.opstreelabs.in_indextemplates.yaml
# +kubebuilder:scaffold:crdkustomizeresource
- bases/logging.logging.opstreelabs.in_elasticsearches.yaml
- bases/logging.logging.opstreelabs.in_fluentds.yaml
- bases/logging.logging.opstreelabs.in_kibanas.yaml
- bases/logging.logging.opstreelabs.in_indexlifecycles.yaml
- bases/logging.logging.opstreelabs.in_indextemplates.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_elasticsearches.yaml
#- patches/webhook_in_kibanas.yaml
#- patches/webhook_in_fluentds.yaml
#- patches/webhook_in_kibanas.yaml
#- patches/webhook_in_indexlifecycles.yaml
#- patches/webhook_in_indextemplates.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_elasticsearches.yaml
#- patches/cainjection_in_kibanas.yaml
#- patches/cainjection_in_fluentds.yaml
#- patches/cainjection_in_kibanas.yaml
#- patches/cainjection_in_indexlifecycles.yaml
#- patches/cainjection_in_indextemplates.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:

View File

@ -4,13 +4,15 @@ nameReference:
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/name
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/namespace
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:

View File

@ -1,8 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: elasticsearches.logging.opstreelabs.in
name: elasticsearches.logging.logging.opstreelabs.in

View File

@ -1,8 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: fluentds.logging.opstreelabs.in
name: fluentds.logging.logging.opstreelabs.in

View File

@ -1,8 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: indexlifecycles.logging.opstreelabs.in
name: indexlifecycles.logging.logging.opstreelabs.in

View File

@ -1,8 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: indextemplates.logging.opstreelabs.in
name: indextemplates.logging.logging.opstreelabs.in

View File

@ -1,8 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: kibanas.logging.opstreelabs.in
name: kibanas.logging.logging.opstreelabs.in

View File

@ -1,17 +1,16 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: elasticsearches.logging.opstreelabs.in
name: elasticsearches.logging.logging.opstreelabs.in
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -1,17 +1,16 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: fluentds.logging.opstreelabs.in
name: fluentds.logging.logging.opstreelabs.in
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -1,17 +1,16 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: indexlifecycles.logging.opstreelabs.in
name: indexlifecycles.logging.logging.opstreelabs.in
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -1,17 +1,16 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: indextemplates.logging.opstreelabs.in
name: indextemplates.logging.logging.opstreelabs.in
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -1,17 +1,16 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kibanas.logging.opstreelabs.in
name: kibanas.logging.logging.opstreelabs.in
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -0,0 +1,74 @@
# Adds namespace to all resources.
namespace: logging-operator-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: logging-operator-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

View File

@ -0,0 +1,34 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

View File

@ -0,0 +1,11 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: c5f74071.logging.opstreelabs.in

View File

@ -0,0 +1,10 @@
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: manager-config
files:
- controller_manager_config.yaml

View File

@ -1,8 +1,13 @@
apiVersion: v1
kind: Namespace
metadata:
name: ot-operators
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logging-operator
namespace: ot-operators
labels:
control-plane: logging-operator
spec:
@ -12,24 +17,40 @@ spec:
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: logging-operator
spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /manager
args:
- --enable-leader-election
image: quay.io/opstree/logging-operator:v0.2.0
imagePullPolicy: Always
name: logging-operator
- --leader-elect
image: quay.io/opstree/logging-operator:v0.3.0
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 100m
memory: 100Mi
cpu: 500m
memory: 128Mi
requests:
cpu: 100m
memory: 100Mi
terminationGracePeriodSeconds: 10
serviceAccount: logging-operator
cpu: 10m
memory: 64Mi
serviceAccountName: logging-operator
terminationGracePeriodSeconds: 10

View File

@ -0,0 +1,27 @@
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/logging-operator.clusterserviceversion.yaml
- ../default
- ../samples
- ../scorecard
# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
# These patches remove the unnecessary "cert" volume and its manager container volumeMount.
#patchesJson6902:
#- target:
# group: apps
# version: v1
# kind: Deployment
# name: controller-manager
# namespace: system
# patch: |-
# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/containers/1/volumeMounts/0
# # Remove the "cert" volume, since OLM will create and mount a set of certs.
# # Update the indices in this path if adding or removing volumes in the manager's Deployment.
# - op: remove
# path: /spec/template/spec/volumes/0

View File

@ -11,6 +11,10 @@ spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager

View File

@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
control-plane: controller-manager

View File

@ -0,0 +1,24 @@
# permissions for end users to edit elasticsearches.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elasticsearch-editor-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view elasticsearches.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elasticsearch-viewer-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches
verbs:
- get
- list
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches/status
verbs:
- get

View File

@ -0,0 +1,24 @@
# permissions for end users to edit fluentds.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd-editor-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- fluentds
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- fluentds/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view fluentds.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd-viewer-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- fluentds
verbs:
- get
- list
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- fluentds/status
verbs:
- get

View File

@ -5,7 +5,7 @@ metadata:
name: indexlifecycle-editor-role
rules:
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles
verbs:
@ -17,7 +17,7 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles/status
verbs:

View File

@ -5,7 +5,7 @@ metadata:
name: indexlifecycle-viewer-role
rules:
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles
verbs:
@ -13,7 +13,7 @@ rules:
- list
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles/status
verbs:

View File

@ -5,7 +5,7 @@ metadata:
name: indextemplate-editor-role
rules:
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates
verbs:
@ -17,7 +17,7 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates/status
verbs:

View File

@ -5,7 +5,7 @@ metadata:
name: indextemplate-viewer-role
rules:
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates
verbs:
@ -13,7 +13,7 @@ rules:
- list
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates/status
verbs:

View File

@ -0,0 +1,24 @@
# permissions for end users to edit kibanas.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kibana-editor-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- kibanas
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- kibanas/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view kibanas.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kibana-viewer-role
rules:
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- kibanas
verbs:
- get
- list
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- kibanas/status
verbs:
- get

View File

@ -0,0 +1,18 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml

View File

@ -0,0 +1,37 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -1,13 +1,89 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
name: logging-operator
rules:
- apiGroups:
- logging.opstreelabs.in
- ""
resources:
- configmaps
- events
- secrets
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- namespaces
- pods
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches
verbs:
@ -19,7 +95,13 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- elasticsearches/finalizers
verbs:
- update
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- elasticsearches/status
verbs:
@ -27,7 +109,7 @@ rules:
- patch
- update
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- fluentds
verbs:
@ -39,7 +121,13 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- fluentds/finalizers
verbs:
- update
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- fluentds/status
verbs:
@ -47,7 +135,7 @@ rules:
- patch
- update
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles
verbs:
@ -59,7 +147,13 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indexlifecycles/finalizers
verbs:
- update
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- indexlifecycles/status
verbs:
@ -67,7 +161,7 @@ rules:
- patch
- update
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates
verbs:
@ -79,7 +173,13 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- indextemplates/finalizers
verbs:
- update
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- indextemplates/status
verbs:
@ -87,7 +187,7 @@ rules:
- patch
- update
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- kibanas
verbs:
@ -99,10 +199,29 @@ rules:
- update
- watch
- apiGroups:
- logging.opstreelabs.in
- logging.logging.opstreelabs.in
resources:
- kibanas/finalizers
verbs:
- update
- apiGroups:
- logging.logging.opstreelabs.in
resources:
- kibanas/status
verbs:
- get
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
verbs:
- create
- delete
- get
- list
- patch
- update
- watch

View File

@ -1,12 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
name: logging-operator-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
name: logging-operator
subjects:
- kind: ServiceAccount
name: logging-operator
namespace: logging-operator
namespace: ot-operators

View File

@ -1,5 +1,5 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: logging-operator
namespace: ot-operators

View File

@ -1,141 +0,0 @@
---
apiVersion: logging.opstreelabs.in/v1alpha1
kind: Elasticsearch
metadata:
name: elasticsearch-cluster
labels:
organization: opstree
owned-by: Abhishek
namespace: logging-operator
spec:
clusterName: "production"
image: "docker.elastic.co/elasticsearch/elasticsearch:7.8.0"
imagePullPolicy: "IfNotPresent"
security:
tlsEnabled: true
password: "Opstree@1234"
plugins: ["repository-s3"]
master:
enabled: true
count: 3
resources:
requests:
cpu: 500m
memory: 2048Mi
limits:
cpu: 500m
memory: 2048Mi
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
selector: {}
jvmOptions:
Xmx: "1g"
Xms: "1g"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-cluster-master"
topologyKey: kubernetes.io/hostname
data:
enabled: true
count: 2
resources:
requests:
cpu: 500m
memory: 2048Mi
limits:
cpu: 500m
memory: 2048Mi
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
selector: {}
jvmOptions:
Xmx: "1g"
Xms: "1g"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-cluster-data"
topologyKey: kubernetes.io/hostname
ingestion:
enabled: true
count: 1
resources:
requests:
cpu: 500m
memory: 2048Mi
limits:
cpu: 500m
memory: 2048Mi
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
selector: {}
jvmOptions:
Xmx: "1g"
Xms: "1g"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-cluster-ingestion"
topologyKey: kubernetes.io/hostname
client:
enabled: true
count: 1
resources:
requests:
cpu: 500m
memory: 2048Mi
limits:
cpu: 500m
memory: 2048Mi
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
selector: {}
jvmOptions:
Xmx: "1g"
Xms: "1g"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-cluster-ingestion"
topologyKey: kubernetes.io/hostname

View File

@ -1,42 +0,0 @@
---
apiVersion: logging.opstreelabs.in/v1alpha1
kind: Fluentd
metadata:
name: fluentd-oos
labels:
organization: opstree
owned-by: Abhishek
namespace: logging-operator
spec:
image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
imagePullPolicy: "IfNotPresent"
elasticsearch:
host: "elasticsearch-cluster-ingestion"
username: elastic
password: "Opstree@1234"
tlsEnabled: true
logPrefix: namespace # namespace or podName
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 100m
memory: 200Mi
customConfiguration:
kubelet.conf: |-
<source>
@type systemd
@id in_systemd_kubelet
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/fluentd-journald-kubelet-cursor.json
</storage>
<entry>
fields_strip_underscores true
</entry>
read_from_head true
tag kubelet
</source>

View File

@ -1,17 +0,0 @@
---
apiVersion: logging.opstreelabs.in/v1alpha1
kind: IndexTemplate
metadata:
name: indextemplate-sample
spec:
enabled: true
indexPatterns: ["kubernetes-default-*", "kubernetes-istio-system-*"]
settings:
shards: 1
replicas: 1
indexLifeCycleName: indexlifecycle-example
rolloverAlias: "kubernetes"
elasticsearch:
host: "https://elasticsearch-cluster-ingestion:9200"
username: elastic
password: "Opstree@1234"

View File

@ -1,36 +0,0 @@
---
apiVersion: logging.opstreelabs.in/v1alpha1
kind: Kibana
metadata:
name: kibana
labels:
organization: opstree
owned-by: Abhishek
namespace: logging-operator
spec:
replicas: 1
image: docker.elastic.co/kibana/kibana:7.8.0
imagePullPolicy: "IfNotPresent"
elasticsearch:
host: "https://elasticsearch-cluster-client:9200"
username: elastic
password: "Opstree@1234"
tlsEnabled: true
elasticSecretName: elasticsearch-cluster-tls
resources:
requests:
cpu: 1000m
memory: 1Gi
limits:
cpu: 1000m
memory: 1Gi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "kibana"
topologyKey: kubernetes.io/hostname

View File

@ -1,7 +1,8 @@
## This file is auto-generated, do not modify ##
## Append samples you want in your CSV to this file as resources ##
resources:
- logging_v1alpha1_elasticsearch.yaml
- logging_v1alpha1_kibana.yaml
- logging_v1alpha1_fluentd.yaml
- logging_v1alpha1_indexlifecycle.yaml
- logging_v1alpha1_indextemplate.yaml
- logging_v1beta1_elasticsearch.yaml
- logging_v1beta1_fluentd.yaml
- logging_v1beta1_kibana.yaml
- logging_v1beta1_indexlifecycle.yaml
- logging_v1beta1_indextemplate.yaml
#+kubebuilder:scaffold:manifestskustomizesamples

View File

@ -1,16 +0,0 @@
---
apiVersion: logging.opstreelabs.in/v1alpha1
kind: IndexLifecycle
metadata:
name: indexlifecycle-example
spec:
enabled: true
rollover:
maxSize: 2GB
maxAge: 30d
delete:
minAge: 15d
elasticsearch:
host: "https://elasticsearch-cluster-ingestion:9200"
username: elastic
password: "Opstree@1234"

View File

@ -0,0 +1,6 @@
apiVersion: logging.logging.opstreelabs.in/v1beta1
kind: Elasticsearch
metadata:
name: elasticsearch-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,6 @@
apiVersion: logging.logging.opstreelabs.in/v1beta1
kind: Fluentd
metadata:
name: fluentd-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,6 @@
apiVersion: logging.logging.opstreelabs.in/v1beta1
kind: IndexLifeCycle
metadata:
name: indexlifecycle-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,6 @@
apiVersion: logging.logging.opstreelabs.in/v1beta1
kind: IndexTemplate
metadata:
name: indextemplate-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,6 @@
apiVersion: logging.logging.opstreelabs.in/v1beta1
kind: Kibana
metadata:
name: kibana-sample
spec:
# TODO(user): Add fields here

View File

@ -0,0 +1,7 @@
apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration
metadata:
name: config
stages:
- parallel: true
tests: []

View File

@ -0,0 +1,16 @@
resources:
- bases/config.yaml
patchesJson6902:
- path: patches/basic.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
- path: patches/olm.config.yaml
target:
group: scorecard.operatorframework.io
version: v1alpha3
kind: Configuration
name: config
#+kubebuilder:scaffold:patchesJson6902

View File

@ -0,0 +1,10 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: basic
test: basic-check-spec-test

View File

@ -0,0 +1,50 @@
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: olm
test: olm-bundle-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: olm
test: olm-crds-have-validation-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: olm
test: olm-crds-have-resources-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: olm
test: olm-spec-descriptors-test
- op: add
path: /stages/0/tests/-
value:
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.20.0
labels:
suite: olm
test: olm-status-descriptors-test

View File

@ -1,6 +0,0 @@
resources:
- manifests.yaml
- service.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -1,25 +0,0 @@
# the following config is for teaching kustomize where to look at when substituting vars.
# It requires kustomize v2.1.0 or newer to work properly.
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
namespace:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
varReference:
- path: metadata/annotations

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: webhook-service
namespace: system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,46 +18,41 @@ package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"logging-operator/k8sutils/secret"
clientservice "logging-operator/k8sutils/service/client"
dataservice "logging-operator/k8sutils/service/data"
ingestionservice "logging-operator/k8sutils/service/ingestion"
masterservice "logging-operator/k8sutils/service/master"
clientnode "logging-operator/k8sutils/statefulset/client"
"logging-operator/k8sutils/statefulset/data"
"logging-operator/k8sutils/statefulset/ingestion"
"logging-operator/k8sutils/statefulset/master"
elasticutils "logging-operator/utils/elasticsearch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
// "sigs.k8s.io/controller-runtime/pkg/log"
loggingv1alpha1 "logging-operator/api/v1alpha1"
loggingv1beta1 "logging-operator/api/v1beta1"
"logging-operator/elasticgo"
"logging-operator/k8sgo"
"logging-operator/k8sgo/elasticsearch"
)
// ElasticsearchReconciler reconciles a Elasticsearch object
type ElasticsearchReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=elasticsearches,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=elasticsearches/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=elasticsearches,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=elasticsearches/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=elasticsearches/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=configmaps;events;services;secrets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// Reconcile will reconcile for elasticsearch
func (r *ElasticsearchReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
// _ = context.Background()
var defaultCountForNode int32 = 0
reqLogger := r.Log.WithValues("elasticsearch", req.NamespacedName)
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
func (r *ElasticsearchReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
instance := &loggingv1beta1.Elasticsearch{}
err := r.Client.Get(context.TODO(), req.NamespacedName, instance)
instance := &loggingv1alpha1.Elasticsearch{}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
@ -65,74 +60,144 @@ func (r *ElasticsearchReconciler) Reconcile(req ctrl.Request) (ctrl.Result, erro
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = secretManager(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8selastic.CreateElasticSearchService(instance, "master")
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8selastic.SetupElasticSearchMaster(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if instance.Spec.ESData != nil {
err = k8selastic.SetupElasticSearchData(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8selastic.CreateElasticSearchService(instance, "data")
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
}
if instance.Spec.ESIngestion != nil {
err = k8selastic.SetupElasticSearchIngestion(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8selastic.CreateElasticSearchService(instance, "ingestion")
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
}
if instance.Spec.ESClient != nil {
err = k8selastic.SetupElasticSearchClient(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8selastic.CreateElasticSearchService(instance, "client")
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
}
if err := controllerutil.SetControllerReference(instance, instance, r.Scheme); err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if *instance.Spec.Security.TLSEnabled != false && instance.Spec.Security.TLSEnabled != nil {
tlsCert := secret.GenerateTLSecret(instance)
secret.CreateAndUpdateSecret(instance, tlsCert)
password := secret.GenerateElasticPassword(instance)
secret.CreateAndUpdateSecret(instance, password)
instance.Status.ESVersion = instance.Spec.ESVersion
instance.Status.ESMaster = instance.Spec.ESMaster.Replicas
if instance.Spec.ESData != nil {
instance.Status.ESData = instance.Spec.ESData.Replicas
}
if instance.Spec.ESIngestion != nil {
instance.Status.ESIngestion = instance.Spec.ESIngestion.Replicas
}
if instance.Spec.ESClient != nil {
instance.Status.ESClient = instance.Spec.ESClient.Replicas
}
if instance.Spec.Master.Enabled != false {
master.ElasticSearchMaster(instance)
masterservice.MasterElasticSearchService(instance)
instance.Status.Master = instance.Spec.Master.Count
} else {
instance.Status.Master = &defaultCountForNode
}
if instance.Spec.Data.Enabled != false {
data.ElasticSearchData(instance)
dataservice.DataElasticSearchService(instance)
instance.Status.Data = instance.Spec.Data.Count
} else {
instance.Status.Data = &defaultCountForNode
}
if instance.Spec.Ingestion.Enabled != false {
ingestion.ElasticSearchIngestion(instance)
ingestionservice.IngestionElasticSearchService(instance)
instance.Status.Ingestion = instance.Spec.Ingestion.Count
} else {
instance.Status.Ingestion = &defaultCountForNode
}
if instance.Spec.Client.Enabled != false {
clientnode.ElasticSearchClient(instance)
clientservice.ClientElasticSearchService(instance)
instance.Status.Client = instance.Spec.Client.Count
} else {
instance.Status.Client = &defaultCountForNode
}
instance.Status.ClusterName = instance.Spec.ClusterName
clusterStatus, err := elasticutils.GetElasticHealth(instance)
clusterInfo, err := elasticgo.GetElasticClusterDetails(instance)
if err != nil {
instance.Status.ClusterState = "Not Ready"
} else {
instance.Status.ClusterState = *clusterStatus
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
instance.Status.ClusterState = clusterInfo.ClusterState
instance.Status.ActiveShards = &clusterInfo.Shards
instance.Status.Indices = &clusterInfo.Shards
if clusterInfo.ClusterState == "green" {
err = serviceAccountSecretManager(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
}
if err := r.Status().Update(context.TODO(), instance); err != nil {
if errors.IsConflict(err) {
reqLogger.Error(err, "Conflict updating Elasticsearch status, requeueing")
return ctrl.Result{Requeue: true}, nil
}
return ctrl.Result{}, err
}
reqLogger.Info("Will reconcile after 10 seconds", "Elasticsearch.Namespace", instance.Namespace, "Elasticsearch.Name", instance.Name)
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
// SetupWithManager will setup manager for elasticsearch
// secretManager is a method to create and manage secrets
func secretManager(instance *loggingv1beta1.Elasticsearch) error {
if instance.Spec.Security != nil {
if instance.Spec.Security.AutoGeneratePassword != nil && *instance.Spec.Security.AutoGeneratePassword {
secretName := fmt.Sprintf("%s-%s", instance.ObjectMeta.Name, "password")
_, err := k8sgo.GetSecret(secretName, instance.Namespace)
if err != nil {
err = k8selastic.CreateElasticAutoSecret(instance)
if err != nil {
return err
}
}
}
}
if instance.Spec.Security != nil {
if instance.Spec.Security.TLSEnabled != nil && *instance.Spec.Security.TLSEnabled {
tlsSecretName := fmt.Sprintf("%s-%s", instance.ObjectMeta.Name, "tls-cert")
_, err := k8sgo.GetSecret(tlsSecretName, instance.Namespace)
if err != nil {
err = k8selastic.CreateElasticTLSSecret(instance)
if err != nil {
return err
}
}
}
}
return nil
}
// serviceAccountSecretManager is a method for service account
func serviceAccountSecretManager(instance *loggingv1beta1.Elasticsearch) error {
if instance.Spec.Security != nil {
tokenSecretName := fmt.Sprintf("%s-sa-token", instance.ObjectMeta.Name)
_, err := k8sgo.GetSecret(tokenSecretName, instance.Namespace)
if err != nil {
err = k8selastic.CreateServiceAccountToken(instance)
if err != nil {
return err
}
}
}
return nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *ElasticsearchReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&loggingv1alpha1.Elasticsearch{}).
For(&loggingv1beta1.Elasticsearch{}).
Complete(r)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -20,65 +20,96 @@ import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"logging-operator/k8sutils/clusterrole"
"logging-operator/k8sutils/clusterrolebindings"
"logging-operator/k8sutils/configmap"
"logging-operator/k8sutils/daemonset"
"logging-operator/k8sutils/serviceaccount"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
loggingv1alpha1 "logging-operator/api/v1alpha1"
loggingv1beta1 "logging-operator/api/v1beta1"
"logging-operator/k8sgo"
"logging-operator/k8sgo/fluentd"
)
// FluentdReconciler reconciles a Fluentd object
type FluentdReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=fluentds,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=fluentds/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=fluentds,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=fluentds/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=fluentds/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=serviceaccounts;pods;namespaces,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete
// Reconcile will reconcile for fluentd
func (r *FluentdReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("fluentd", req.NamespacedName)
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
func (r *FluentdReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
instance := &loggingv1beta1.Fluentd{}
err := r.Client.Get(context.TODO(), req.NamespacedName, instance)
instance := &loggingv1alpha1.Fluentd{}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if err := controllerutil.SetControllerReference(instance, instance, r.Scheme); err != nil {
err = setupFluentdRBAC(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
configmap.CreateFluentdConfigMap(instance)
if instance.Spec.CustomConfiguration != nil {
configmap.CreateFluentdExtraConfigMap(instance)
err = k8sfluentd.CreateFluentdConfigMap(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8sfluentd.CreateFluentdDaemonSet(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
daemonSetCount, err := k8sgo.GetDaemonSetCount(instance.Namespace, instance.ObjectMeta.Name)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
instance.Status.TotalAgents = daemonSetCount
if err := r.Status().Update(context.TODO(), instance); err != nil {
if errors.IsConflict(err) {
return ctrl.Result{Requeue: true}, nil
}
return ctrl.Result{}, err
}
serviceaccount.CreateFluentdServiceAccount(instance)
clusterrole.CreateFluentdClusterRole(instance)
clusterrolebindings.CreateFluentdClusterRoleBinding(instance)
daemonset.CreateFluentdDaemonset(instance)
reqLogger.Info("Will reconcile after 10 seconds", "Fluentd.Namespace", instance.Namespace, "Fluentd.Name", instance.Name)
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
// SetupWithManager will setup manager for fluentd
// setupFluentdRBAC is a method to setup RBAC access for Fluentd
func setupFluentdRBAC(instance *loggingv1beta1.Fluentd) error {
_, err := k8sgo.GetServiceAccount(instance.ObjectMeta.Name, instance.Namespace)
if err != nil {
err = k8sfluentd.CreateFluentdServiceAccount(instance)
if err != nil {
return err
}
}
_, err = k8sgo.GetClusterRole(instance.ObjectMeta.Name)
if err != nil {
err = k8sfluentd.CreateFluentdClusterRole(instance)
if err != nil {
return err
}
}
_, err = k8sgo.GetClusterRoleBinding(instance.ObjectMeta.Name)
if err != nil {
err = k8sfluentd.CreateFluentdClusterRoleBinding(instance)
if err != nil {
return err
}
}
return nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *FluentdReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&loggingv1alpha1.Fluentd{}).
For(&loggingv1beta1.Fluentd{}).
Complete(r)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,60 +18,45 @@ package controllers
import (
"context"
"k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
elasticutils "logging-operator/utils/elasticsearch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
loggingv1alpha1 "logging-operator/api/v1alpha1"
loggingv1beta1 "logging-operator/api/v1beta1"
)
// IndexLifecycleReconciler reconciles a IndexLifecycle object
type IndexLifecycleReconciler struct {
// IndexLifeCycleReconciler reconciles a IndexLifeCycle object
type IndexLifeCycleReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=indexlifecycles,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=indexlifecycles/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indexlifecycles,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indexlifecycles/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indexlifecycles/finalizers,verbs=update
func (r *IndexLifecycleReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
reqLogger := r.Log.WithValues("index-lifecycle", req.NamespacedName)
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the IndexLifeCycle object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
func (r *IndexLifeCycleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
instance := &loggingv1alpha1.IndexLifecycle{}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if err := controllerutil.SetControllerReference(instance, instance, r.Scheme); err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if instance.Spec.Enabled != nil && *instance.Spec.Enabled != false {
elasticutils.CompareandUpdatePolicy(instance)
} else {
elasticutils.DeleteIndexLifeCyclePolicy(instance)
}
reqLogger.Info("Will reconcile after 10 seconds", "IndexLifeCycle.Namespace", instance.Namespace, "IndexLifeCycle.Name", instance.Name)
// TODO(user): your logic here
return ctrl.Result{}, nil
}
func (r *IndexLifecycleReconciler) SetupWithManager(mgr ctrl.Manager) error {
// SetupWithManager sets up the controller with the Manager.
func (r *IndexLifeCycleReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&loggingv1alpha1.IndexLifecycle{}).
For(&loggingv1beta1.IndexLifeCycle{}).
Complete(r)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -18,60 +18,45 @@ package controllers
import (
"context"
"k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
elasticutils "logging-operator/utils/elasticsearch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
loggingv1alpha1 "logging-operator/api/v1alpha1"
loggingv1beta1 "logging-operator/api/v1beta1"
)
// IndexTemplateReconciler reconciles a IndexTemplate object
type IndexTemplateReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=indextemplates,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=indextemplates/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indextemplates,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indextemplates/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=indextemplates/finalizers,verbs=update
func (r *IndexTemplateReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
reqLogger := r.Log.WithValues("index-template", req.NamespacedName)
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the IndexTemplate object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
func (r *IndexTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = log.FromContext(ctx)
instance := &loggingv1alpha1.IndexTemplate{}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if err := controllerutil.SetControllerReference(instance, instance, r.Scheme); err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if instance.Spec.Enabled != nil && *instance.Spec.Enabled != false {
elasticutils.CompareandUpdateIndexTemplate(instance)
} else {
elasticutils.DeleteIndexTemplate(instance)
}
reqLogger.Info("Will reconcile after 10 seconds", "IndexTemplate.Namespace", instance.Namespace, "IndexTemplate.Name", instance.Name)
// TODO(user): your logic here
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *IndexTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&loggingv1alpha1.IndexTemplate{}).
For(&loggingv1beta1.IndexTemplate{}).
Complete(r)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -20,57 +20,52 @@ import (
"context"
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"logging-operator/k8sutils/configmap"
"logging-operator/k8sutils/deployment"
kibanaservice "logging-operator/k8sutils/service/kibana"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
loggingv1alpha1 "logging-operator/api/v1alpha1"
loggingv1beta1 "logging-operator/api/v1beta1"
"logging-operator/k8sgo/kibana"
)
// KibanaReconciler reconciles a Kibana object
type KibanaReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=kibanas,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=logging.opstreelabs.in,resources=kibanas/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=kibanas,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=kibanas/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=logging.logging.opstreelabs.in,resources=kibanas/finalizers,verbs=update
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// Reconcile will reconcile for kibana
func (r *KibanaReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
reqLogger := r.Log.WithValues("kibana", req.NamespacedName)
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
func (r *KibanaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
instance := &loggingv1beta1.Kibana{}
err := r.Client.Get(context.TODO(), req.NamespacedName, instance)
instance := &loggingv1alpha1.Kibana{}
err := r.Get(context.TODO(), req.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
if err := controllerutil.SetControllerReference(instance, instance, r.Scheme); err != nil {
err = k8skibana.CreateKibanaSetup(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
err = k8skibana.CreateKibanaService(instance)
if err != nil {
return ctrl.Result{RequeueAfter: time.Second * 10}, err
}
configmap.CreateKibanaConfigMap(instance)
deployment.CreateKibanaDeployment(instance)
kibanaservice.CreateKibanaService(instance)
reqLogger.Info("Will reconcile after 10 seconds", "Kibana.Namespace", instance.Namespace, "Kibana.Name", instance.Name)
return ctrl.Result{RequeueAfter: time.Second * 10}, nil
}
// SetupWithManager will setup manager for kibana
// SetupWithManager sets up the controller with the Manager.
func (r *KibanaReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&loggingv1alpha1.Kibana{}).
For(&loggingv1beta1.Kibana{}).
Complete(r)
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2020 Opstree Solutions.
Copyright 2022 Opstree Solutions.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -23,21 +23,19 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
loggingv1alpha1 "logging-operator/api/v1alpha1"
// +kubebuilder:scaffold:imports
loggingv1beta1 "logging-operator/api/v1beta1"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
@ -49,45 +47,32 @@ func TestAPIs(t *testing.T) {
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
cfg, err := testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = loggingv1alpha1.AddToScheme(scheme.Scheme)
err = loggingv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = loggingv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = loggingv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = loggingv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = loggingv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
Expect(err).NotTo(HaveOccurred())
})

14
docs/.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,14 @@
version: 2
updates:
- package-ecosystem: npm
directory: "/"
schedule:
interval: daily
time: '20:00'
open-pull-requests-limit: 10
- package-ecosystem: bundler
directory: "/"
schedule:
interval: daily
time: '20:00'
open-pull-requests-limit: 10

5
docs/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
/public
resources/
node_modules/
package-lock.json
.hugo_build.lock

Some files were not shown because too many files have changed in this diff Show More