Compare commits

..

No commits in common. "main" and "v0.10.0" have entirely different histories.

198 changed files with 2502 additions and 9267 deletions

15
.codespellrc Normal file
View File

@ -0,0 +1,15 @@
# https://github.com/codespell-project/codespell#using-a-config-file
[codespell]
# Comma-separated list of files to skip.
skip = build,ramalama.egg-info,logos,.git #,bin,vendor,.git,go.sum,changelog.txt,.cirrus.yml,"RELEASE_NOTES.md,*.xz,*.gz,*.tar,*.tgz,bin2img,*ico,*.png,*.1,*.5,*.7,copyimg,*.orig,apidoc.go"
# Comma separated list of words to be ignored. Words must be lowercased.
ignore-words-list = clos,creat,ro,hastable,shouldnot,mountns,passt,assertin
# Custom dictionary file that contains spelling corrections.
# Run with option '--dictionary=-' to include also default dictionary.
dictionary = .codespelldict
# Check file names as well.
check-filenames = true

View File

@ -2,4 +2,3 @@
max-line-length = 120 max-line-length = 120
# E203,E221,E231 conflict with black formatting # E203,E221,E231 conflict with black formatting
extend-ignore = E203,E221,E231,E702,F824 extend-ignore = E203,E221,E231,E702,F824
extend-exclude = .venv,venv

View File

@ -10,7 +10,7 @@ jobs:
lint: lint:
name: Lint Code name: Lint Code
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
timeout-minutes: 20 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -77,7 +77,7 @@ jobs:
unit-test: unit-test:
name: Unit Tests name: Unit Tests
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
timeout-minutes: 20 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install the latest version of uv and activate the environment - name: Install the latest version of uv and activate the environment
@ -265,11 +265,6 @@ jobs:
with: with:
activate-environment: true activate-environment: true
- name: install mlx-lm
shell: bash
run: |
uv pip install mlx-lm
- name: install golang - name: install golang
shell: bash shell: bash
run: | run: |

View File

@ -21,7 +21,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Set Up Dependencies (Ubuntu) - name: Set Up Dependencies (Ubuntu)
timeout-minutes: 20 timeout-minutes: 10
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
run: | run: |
sudo apt-get install -y lshw curl sudo apt-get install -y lshw curl

2
.gitignore vendored
View File

@ -19,5 +19,3 @@ __pycache__/
coverage.* coverage.*
htmlcov/ htmlcov/
.idea/ .idea/
.hypothesis/
uv.lock

View File

@ -9,7 +9,7 @@ set -exo pipefail
# Extract version from pyproject.toml instead of setup.py # Extract version from pyproject.toml instead of setup.py
VERSION=$(awk -F'[""]' ' /^\s*version\s*/ {print $(NF-1)}' pyproject.toml ) VERSION=$(awk -F'[""]' ' /^\s*version\s*/ {print $(NF-1)}' pyproject.toml )
SPEC_FILE=rpm/ramalama.spec SPEC_FILE=rpm/python-ramalama.spec
# RPM Spec modifications # RPM Spec modifications

View File

@ -7,11 +7,11 @@ upstream_tag_template: v{version}
packages: packages:
ramalama-fedora: ramalama-fedora:
pkg_tool: fedpkg pkg_tool: fedpkg
downstream_package_name: ramalama downstream_package_name: python-ramalama
specfile_path: rpm/ramalama.spec specfile_path: rpm/python-ramalama.spec
ramalama-centos: ramalama-centos:
downstream_package_name: ramalama downstream_package_name: python-ramalama
specfile_path: rpm/ramalama.spec specfile_path: rpm/python-ramalama.spec
srpm_build_deps: srpm_build_deps:
- make - make
@ -79,7 +79,7 @@ jobs:
dist_git_branches: &fedora_targets dist_git_branches: &fedora_targets
- fedora-all - fedora-all
- epel10 - epel10
- epel10.0 - epel9
- job: koji_build - job: koji_build
trigger: commit trigger: commit
@ -92,4 +92,4 @@ jobs:
dist_git_branches: dist_git_branches:
- fedora-branched # rawhide updates are created automatically - fedora-branched # rawhide updates are created automatically
- epel10 - epel10
- epel10.0 - epel9

View File

@ -1,37 +0,0 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
- id: check-toml
- id: check-added-large-files
- id: check-merge-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- repo: https://github.com/pycqa/flake8
rev: 7.3.0
hooks:
- id: flake8
- repo: https://github.com/codespell-project/codespell
rev: v2.4.1
hooks:
- id: codespell
args: ["-w"]
- repo: https://github.com/psf/black
rev: 25.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 6.0.1
hooks:
- id: isort
- repo: local
hooks:
- id: run-unit-tests
name: run unit tests
entry: make unit-tests
language: system
pass_filenames: false

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats appstudio.openshift.io/component: bats
@ -28,14 +28,12 @@ spec:
value: 5d value: 5d
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64 - linux/arm64
- name: dockerfile - name: dockerfile
value: container-images/bats/Containerfile value: container-images/bats/Containerfile
pipelineRef: pipelineRef:
name: pull-request-pipeline name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
event == "push" && target_branch == "main" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats appstudio.openshift.io/component: bats
@ -25,14 +25,12 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/bats:{{revision}} value: quay.io/redhat-user-workloads/ramalama-tenant/bats:{{revision}}
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64 - linux/arm64
- name: dockerfile - name: dockerfile
value: container-images/bats/Containerfile value: container-images/bats/Containerfile
pipelineRef: pipelineRef:
name: push-pipeline name: push-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
pipelines.appstudio.openshift.io/type: build
name: cuda-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
pipelines.appstudio.openshift.io/type: build
name: cuda-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
pipelines.appstudio.openshift.io/type: build
name: cuda-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- GPU=cuda
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
pipelines.appstudio.openshift.io/type: build
name: cuda-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- GPU=cuda
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cuda-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cuda-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda appstudio.openshift.io/component: cuda
@ -28,14 +28,11 @@ spec:
value: 5d value: 5d
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64
- name: dockerfile - name: dockerfile
value: container-images/cuda/Containerfile value: container-images/cuda/Containerfile
pipelineRef: pipelineRef:
name: pull-request-pipeline name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
event == "push" && target_branch == "main" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda appstudio.openshift.io/component: cuda
@ -25,14 +25,11 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}} value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64
- name: dockerfile - name: dockerfile
value: container-images/cuda/Containerfile value: container-images/cuda/Containerfile
pipelineRef: pipelineRef:
name: push-pipeline name: push-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -1,66 +0,0 @@
kind: Pipeline
apiVersion: tekton.dev/v1
metadata:
name: bats-integration
spec:
description: |
Test the newly-built ramalama image and layered images on all supported architectures.
params:
- name: SNAPSHOT
description: >-
Information about the components included in the current snapshot under test.
- name: platforms
description: VM platforms on which to run test commands
type: array
default:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: commands
description: Test commands to run
type: array
default:
- make bats
- name: git-url
description: URL of the Git repository containing pipeline and task definitions
default: https://github.com/containers/ramalama.git
- name: git-revision
description: Revision of the Git repository containing pipeline and task definitions
default: main
tasks:
- name: init
params:
- name: SNAPSHOT
value: $(params.SNAPSHOT)
taskRef:
resolver: git
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: pathInRepo
value: .tekton/integration/tasks/init-snapshot.yaml
- name: test
matrix:
params:
- name: PLATFORM
value:
- $(params.platforms)
- name: cmd
value:
- $(params.commands)
params:
- name: image
value: $(tasks.init.results.bats-image)
- name: envs
value:
- RAMALAMA_IMAGE=$(tasks.init.results.ramalama-image)
taskRef:
resolver: git
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: pathInRepo
value: .tekton/integration/tasks/test-vm-cmd.yaml

View File

@ -1,55 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: init-snapshot
spec:
description: Extract information from the SNAPSHOT and make it available as Tekton results
params:
- name: SNAPSHOT
description: >-
Information about the components included in the current snapshot under test.
results:
- name: event-type
description: The type of event that triggered the pipeline
- name: bats-image
description: URI of the bats image included in the snapshot
- name: ramalama-image
description: URI of the ramalama image included in the snapshot
- name: TEST_OUTPUT
description: Test result in json format
steps:
- name: process
image: registry.access.redhat.com/ubi10/ubi:latest
env:
- name: SNAPSHOT
value: $(params.SNAPSHOT)
- name: EVENT_TYPE
valueFrom:
fieldRef:
fieldPath: metadata.labels['pac.test.appstudio.openshift.io/event-type']
- name: RESULTS_EVENT_TYPE_PATH
value: $(results.event-type.path)
- name: RESULTS_BATS_IMAGE_PATH
value: $(results.bats-image.path)
- name: RESULTS_RAMALAMA_IMAGE_PATH
value: $(results.ramalama-image.path)
- name: RESULTS_TEST_OUTPUT_PATH
value: $(results.TEST_OUTPUT.path)
script: |
#!/bin/bash -ex
dnf -y install jq
echo -n "$EVENT_TYPE" | tee "$RESULTS_EVENT_TYPE_PATH"
echo
component_image() {
TAGSEP=":"
if [ "$EVENT_TYPE" == "pull_request" ]; then
TAGSEP+="on-pr-"
fi
jq -j --arg name "$1" --arg tagsep "$TAGSEP" '.components[] | select(.name == $name) | [(.containerImage | split("@")[0]), .source.git.revision] | join($tagsep)' <<< "$SNAPSHOT"
}
component_image bats | tee "$RESULTS_BATS_IMAGE_PATH"
echo
component_image ramalama | tee "$RESULTS_RAMALAMA_IMAGE_PATH"
echo
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
echo

View File

@ -1,118 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: test-vm-cmd
spec:
description: Run a command in a test VM
params:
- name: PLATFORM
description: The platform of the VM to provision
- name: image
description: The image to use when setting up the test environment
- name: cmd
description: The command to run
- name: envs
description: List of environment variables (NAME=VALUE) to be set in the test environment
type: array
default: []
results:
- name: TEST_OUTPUT
description: Test result in json format
volumes:
- name: workdir
emptyDir: {}
- name: ssh
secret:
secretName: multi-platform-ssh-$(context.taskRun.name)
steps:
- name: run-in-vm
image: registry.access.redhat.com/ubi10/ubi:latest
volumeMounts:
- mountPath: /var/workdir
name: workdir
- mountPath: /ssh
name: ssh
workingDir: /var/workdir
env:
- name: TEST_IMAGE
value: $(params.image)
- name: TEST_CMD
value: $(params.cmd)
- name: RESULTS_TEST_OUTPUT_PATH
value: $(results.TEST_OUTPUT.path)
args:
- $(params.envs[*])
script: |
#!/bin/bash -ex
log() {
echo "[$(date -uIns)]" $*
}
log Install packages
dnf -y install openssh-clients rsync jq
log Prepare connection
if [ -e "/ssh/error" ]; then
log Error provisioning VM
cat /ssh/error
exit 1
fi
export SSH_HOST=$(cat /ssh/host)
mkdir -p ~/.ssh
if [ "$SSH_HOST" == "localhost" ] ; then
IS_LOCALHOST=true
log Localhost detected, running build in cluster
elif [ -s "/ssh/otp" ]; then
log Fetching OTP token
curl --cacert /ssh/otp-ca -d @/ssh/otp $(cat /ssh/otp-server) > ~/.ssh/id_rsa
echo >> ~/.ssh/id_rsa
chmod 0400 ~/.ssh/id_rsa
elif [ -s "/ssh/id_rsa" ]; then
log Copying ssh key
cp /ssh/id_rsa ~/.ssh
chmod 0400 ~/.ssh/id_rsa
else
log No authentication mechanism found
exit 1
fi
mkdir -p scripts
PODMAN_ENV=()
while [ $# -ne 0 ]; do
PODMAN_ENV+=("-e" "$1")
shift
done
cat > scripts/test.sh <<SCRIPTEOF
#!/bin/bash -ex
podman run \
--userns=keep-id \
--security-opt label=disable \
--security-opt unmask=/proc/* \
--device /dev/net/tun \
--device /dev/fuse \
${PODMAN_ENV[*]} \
$TEST_IMAGE $TEST_CMD
SCRIPTEOF
chmod +x scripts/test.sh
if ! [[ $IS_LOCALHOST ]]; then
log VM exec
export BUILD_DIR=$(cat /ssh/user-dir)
export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10"
# ssh once before rsync to retrieve the host key
ssh $SSH_ARGS "$SSH_HOST" "uname -a"
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
ssh $SSH_ARGS "$SSH_HOST" "$BUILD_DIR/scripts/test.sh"
log End VM exec
else
log Local exec
scripts/test.sh
log End local exec
fi
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
echo

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- GPU=musa
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- GPU=musa
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -20,18 +20,20 @@ spec:
name: output-image name: output-image
type: string type: string
- default: . - default: .
description: Path to the source code of an application's component from where to build image. description: Path to the source code of an application's component from where
to build image.
name: path-context name: path-context
type: string type: string
- default: Dockerfile - default: Dockerfile
description: Path to the Dockerfile inside the context specified by parameter path-context description: Path to the Dockerfile inside the context specified by parameter
path-context
name: dockerfile name: dockerfile
type: string type: string
- default: "false" - default: "false"
description: Force rebuild image description: Force rebuild image
name: rebuild name: rebuild
type: string type: string
- default: "true" - default: "false"
description: Skip checks against built image description: Skip checks against built image
name: skip-checks name: skip-checks
type: string type: string
@ -44,7 +46,8 @@ spec:
name: prefetch-input name: prefetch-input
type: string type: string
- default: "" - default: ""
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
name: image-expires-after name: image-expires-after
- default: "false" - default: "false"
description: Build a source image. description: Build a source image.
@ -63,29 +66,16 @@ spec:
name: build-args-file name: build-args-file
type: string type: string
- default: "false" - default: "false"
description: Whether to enable privileged mode, should be used only with remote VMs description: Whether to enable privileged mode, should be used only with remote
VMs
name: privileged-nested name: privileged-nested
type: string type: string
- default: - default:
- linux-c4xlarge/amd64 - linux/x86_64
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller. description: List of platforms to build the container images on. The available
set of values is determined by the configuration of the multi-platform-controller.
name: build-platforms name: build-platforms
type: array type: array
- default: ""
description: The parent image of the image being built.
name: parent-image
- default: ""
description: The image to use for running tests.
name: test-image
- default: []
description: List of environment variables (NAME=VALUE) to be set in the test environment.
name: test-envs
type: array
- default:
- echo "No tests defined"
description: List of test commands to run after the image is built.
name: test-commands
type: array
results: results:
- description: "" - description: ""
name: IMAGE_URL name: IMAGE_URL
@ -113,7 +103,7 @@ spec:
- name: name - name: name
value: init value: init
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6 value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -163,7 +153,7 @@ spec:
- name: name - name: name
value: prefetch-dependencies-oci-ta value: prefetch-dependencies-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1 value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -172,17 +162,6 @@ spec:
workspace: git-auth workspace: git-auth
- name: netrc - name: netrc
workspace: netrc workspace: netrc
- name: wait-for-parent-image
params:
- name: ref
value: $(params.parent-image)
taskRef:
name: wait-for-image
when:
- input: $(params.parent-image)
operator: notin
values:
- ""
- matrix: - matrix:
params: params:
- name: PLATFORM - name: PLATFORM
@ -218,14 +197,13 @@ spec:
- name: IMAGE_APPEND_PLATFORM - name: IMAGE_APPEND_PLATFORM
value: "true" value: "true"
runAfter: runAfter:
- wait-for-parent-image
- prefetch-dependencies - prefetch-dependencies
taskRef: taskRef:
params: params:
- name: name - name: name
value: buildah-remote-oci-ta value: buildah-remote-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0 value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -254,7 +232,7 @@ spec:
- name: name - name: name
value: build-image-index value: build-image-index
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -263,50 +241,14 @@ spec:
operator: in operator: in
values: values:
- "true" - "true"
- name: wait-for-test-image
params:
- name: ref
value: $(params.test-image)
runAfter:
- build-image-index
taskRef:
name: wait-for-image
when:
- input: $(params.test-image)
operator: notin
values:
- ""
- name: run-tests
matrix:
params:
- name: cmd
value:
- $(params.test-commands)
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: envs
value:
- $(params.test-envs[*])
runAfter:
- wait-for-test-image
taskRef:
name: test-cmd
when:
- input: $(params.test-image)
operator: notin
values:
- ""
- name: build-source-image - name: build-source-image
params: params:
- name: BINARY_IMAGE - name: BINARY_IMAGE
value: $(tasks.build-image-index.results.IMAGE_URL) value: $(params.output-image)
- name: SOURCE_ARTIFACT - name: SOURCE_ARTIFACT
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT - name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: BINARY_IMAGE_DIGEST
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
runAfter: runAfter:
- build-image-index - build-image-index
taskRef: taskRef:
@ -314,7 +256,7 @@ spec:
- name: name - name: name
value: source-build-oci-ta value: source-build-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728 value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -362,7 +304,7 @@ spec:
- name: name - name: name
value: clair-scan value: clair-scan
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2 value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -382,7 +324,7 @@ spec:
- name: name - name: name
value: ecosystem-cert-preflight-checks value: ecosystem-cert-preflight-checks
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -408,7 +350,7 @@ spec:
- name: name - name: name
value: sast-snyk-check-oci-ta value: sast-snyk-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771 value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -430,7 +372,7 @@ spec:
- name: name - name: name
value: clamav-scan value: clamav-scan
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -475,7 +417,7 @@ spec:
- name: name - name: name
value: sast-coverity-check-oci-ta value: sast-coverity-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -522,7 +464,7 @@ spec:
- name: name - name: name
value: sast-shell-check-oci-ta value: sast-shell-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9 value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -548,7 +490,7 @@ spec:
- name: name - name: name
value: sast-unicode-check-oci-ta value: sast-unicode-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651 value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -593,7 +535,7 @@ spec:
- name: name - name: name
value: push-dockerfile-oci-ta value: push-dockerfile-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -629,7 +571,7 @@ spec:
- name: name - name: name
value: show-sbom value: show-sbom
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05 value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles

View File

@ -20,11 +20,13 @@ spec:
name: output-image name: output-image
type: string type: string
- default: . - default: .
description: Path to the source code of an application's component from where to build image. description: Path to the source code of an application's component from where
to build image.
name: path-context name: path-context
type: string type: string
- default: Dockerfile - default: Dockerfile
description: Path to the Dockerfile inside the context specified by parameter path-context description: Path to the Dockerfile inside the context specified by parameter
path-context
name: dockerfile name: dockerfile
type: string type: string
- default: "false" - default: "false"
@ -44,7 +46,8 @@ spec:
name: prefetch-input name: prefetch-input
type: string type: string
- default: "" - default: ""
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
name: image-expires-after name: image-expires-after
- default: "false" - default: "false"
description: Build a source image. description: Build a source image.
@ -63,29 +66,16 @@ spec:
name: build-args-file name: build-args-file
type: string type: string
- default: "false" - default: "false"
description: Whether to enable privileged mode, should be used only with remote VMs description: Whether to enable privileged mode, should be used only with remote
VMs
name: privileged-nested name: privileged-nested
type: string type: string
- default: - default:
- linux-c4xlarge/amd64 - linux/x86_64
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller. description: List of platforms to build the container images on. The available
set of values is determined by the configuration of the multi-platform-controller.
name: build-platforms name: build-platforms
type: array type: array
- default: ""
description: The parent image of the image being built.
name: parent-image
- default: ""
description: The image to use for running tests.
name: test-image
- default: []
description: List of environment variables (NAME=VALUE) to be set in the test environment.
name: test-envs
type: array
- default:
- echo "No tests defined"
description: List of test commands to run after the image is built.
name: test-commands
type: array
results: results:
- description: "" - description: ""
name: IMAGE_URL name: IMAGE_URL
@ -113,7 +103,7 @@ spec:
- name: name - name: name
value: init value: init
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6 value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -163,7 +153,7 @@ spec:
- name: name - name: name
value: prefetch-dependencies-oci-ta value: prefetch-dependencies-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1 value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -172,17 +162,6 @@ spec:
workspace: git-auth workspace: git-auth
- name: netrc - name: netrc
workspace: netrc workspace: netrc
- name: wait-for-parent-image
params:
- name: ref
value: $(params.parent-image)
taskRef:
name: wait-for-image
when:
- input: $(params.parent-image)
operator: notin
values:
- ""
- matrix: - matrix:
params: params:
- name: PLATFORM - name: PLATFORM
@ -218,14 +197,13 @@ spec:
- name: IMAGE_APPEND_PLATFORM - name: IMAGE_APPEND_PLATFORM
value: "true" value: "true"
runAfter: runAfter:
- wait-for-parent-image
- prefetch-dependencies - prefetch-dependencies
taskRef: taskRef:
params: params:
- name: name - name: name
value: buildah-remote-oci-ta value: buildah-remote-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0 value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -254,7 +232,7 @@ spec:
- name: name - name: name
value: build-image-index value: build-image-index
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -263,50 +241,14 @@ spec:
operator: in operator: in
values: values:
- "true" - "true"
- name: wait-for-test-image
params:
- name: ref
value: $(params.test-image)
runAfter:
- build-image-index
taskRef:
name: wait-for-image
when:
- input: $(params.test-image)
operator: notin
values:
- ""
- name: run-tests
matrix:
params:
- name: cmd
value:
- $(params.test-commands)
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: envs
value:
- $(params.test-envs[*])
runAfter:
- wait-for-test-image
taskRef:
name: test-cmd
when:
- input: $(params.test-image)
operator: notin
values:
- ""
- name: build-source-image - name: build-source-image
params: params:
- name: BINARY_IMAGE - name: BINARY_IMAGE
value: $(tasks.build-image-index.results.IMAGE_URL) value: $(params.output-image)
- name: SOURCE_ARTIFACT - name: SOURCE_ARTIFACT
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT - name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: BINARY_IMAGE_DIGEST
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
runAfter: runAfter:
- build-image-index - build-image-index
taskRef: taskRef:
@ -314,7 +256,7 @@ spec:
- name: name - name: name
value: source-build-oci-ta value: source-build-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728 value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -362,7 +304,7 @@ spec:
- name: name - name: name
value: clair-scan value: clair-scan
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2 value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -382,7 +324,7 @@ spec:
- name: name - name: name
value: ecosystem-cert-preflight-checks value: ecosystem-cert-preflight-checks
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -408,7 +350,7 @@ spec:
- name: name - name: name
value: sast-snyk-check-oci-ta value: sast-snyk-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771 value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -430,7 +372,7 @@ spec:
- name: name - name: name
value: clamav-scan value: clamav-scan
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -475,7 +417,7 @@ spec:
- name: name - name: name
value: sast-coverity-check-oci-ta value: sast-coverity-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -522,7 +464,7 @@ spec:
- name: name - name: name
value: sast-shell-check-oci-ta value: sast-shell-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9 value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -548,7 +490,7 @@ spec:
- name: name - name: name
value: sast-unicode-check-oci-ta value: sast-unicode-check-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651 value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -593,7 +535,7 @@ spec:
- name: name - name: name
value: push-dockerfile-oci-ta value: push-dockerfile-oci-ta
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles
@ -629,7 +571,7 @@ spec:
- name: name - name: name
value: show-sbom value: show-sbom
- name: bundle - name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05 value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind - name: kind
value: task value: task
resolver: bundles resolver: bundles

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server
pipelines.appstudio.openshift.io/type: build
name: ramalama-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server
pipelines.appstudio.openshift.io/type: build
name: ramalama-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag
pipelines.appstudio.openshift.io/type: build
name: ramalama-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag
pipelines.appstudio.openshift.io/type: build
name: ramalama-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server
pipelines.appstudio.openshift.io/type: build
name: ramalama-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server
pipelines.appstudio.openshift.io/type: build
name: ramalama-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama appstudio.openshift.io/component: ramalama
@ -28,24 +28,12 @@ spec:
value: 5d value: 5d
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64 - linux/arm64
- name: dockerfile - name: dockerfile
value: container-images/ramalama/Containerfile value: container-images/ramalama/Containerfile
- name: test-image
value: quay.io/redhat-user-workloads/ramalama-tenant/bats:on-pr-{{revision}}
- name: test-envs
value:
- RAMALAMA_IMAGE=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:on-pr-{{revision}}
- name: test-commands
value:
- make validate
- make unit-tests
- make cov-tests
pipelineRef: pipelineRef:
name: pull-request-pipeline name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
event == "push" && target_branch == "main" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama appstudio.openshift.io/component: ramalama
@ -25,24 +25,12 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}} value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: build-platforms - name: build-platforms
value: value:
- linux-c4xlarge/amd64 - linux/x86_64
- linux-c4xlarge/arm64 - linux/arm64
- name: dockerfile - name: dockerfile
value: container-images/ramalama/Containerfile value: container-images/ramalama/Containerfile
- name: test-image
value: quay.io/redhat-user-workloads/ramalama-tenant/bats:{{revision}}
- name: test-envs
value:
- RAMALAMA_IMAGE=quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: test-commands
value:
- make validate
- make unit-tests
- make cov-tests
pipelineRef: pipelineRef:
name: push-pipeline name: push-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server
pipelines.appstudio.openshift.io/type: build
name: rocm-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server
pipelines.appstudio.openshift.io/type: build
name: rocm-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag
pipelines.appstudio.openshift.io/type: build
name: rocm-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- GPU=rocm
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag
pipelines.appstudio.openshift.io/type: build
name: rocm-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- GPU=rocm
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- GPU=rocm
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- GPU=rocm
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: rocm-ubi-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi appstudio.openshift.io/component: rocm-ubi
@ -28,13 +28,11 @@ spec:
value: 5d value: 5d
- name: build-platforms - name: build-platforms
value: value:
- linux-fast/amd64 - linux/x86_64
- name: dockerfile - name: dockerfile
value: container-images/rocm-ubi/Containerfile value: container-images/rocm-ubi/Containerfile
pipelineRef: pipelineRef:
name: pull-request-pipeline name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
event == "push" && target_branch == "main" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi appstudio.openshift.io/component: rocm-ubi
@ -25,13 +25,11 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}} value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- name: build-platforms - name: build-platforms
value: value:
- linux-fast/amd64 - linux/x86_64
- name: dockerfile - name: dockerfile
value: container-images/rocm-ubi/Containerfile value: container-images/rocm-ubi/Containerfile
pipelineRef: pipelineRef:
name: push-pipeline name: push-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server
pipelines.appstudio.openshift.io/type: build
name: rocm-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server
pipelines.appstudio.openshift.io/type: build
name: rocm-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm appstudio.openshift.io/component: rocm
@ -28,13 +28,11 @@ spec:
value: 5d value: 5d
- name: build-platforms - name: build-platforms
value: value:
- linux-fast/amd64 - linux/x86_64
- name: dockerfile - name: dockerfile
value: container-images/rocm/Containerfile value: container-images/rocm/Containerfile
pipelineRef: pipelineRef:
name: pull-request-pipeline name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3" pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >- pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
event == "push" && target_branch == "main" == "main"
labels: labels:
appstudio.openshift.io/application: ramalama appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm appstudio.openshift.io/component: rocm
@ -25,13 +25,11 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}} value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- name: build-platforms - name: build-platforms
value: value:
- linux-fast/amd64 - linux/x86_64
- name: dockerfile - name: dockerfile
value: container-images/rocm/Containerfile value: container-images/rocm/Containerfile
pipelineRef: pipelineRef:
name: push-pipeline name: push-pipeline
timeouts:
pipeline: 6h
workspaces: workspaces:
- name: git-auth - name: git-auth
secret: secret:

View File

@ -1,36 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: test-cmd
spec:
description: Run a command in a test environment.
params:
- name: image
description: The image to use when setting up the test environment.
- name: cmd
description: The command to run.
- name: envs
description: List of environment variables (NAME=VALUE) to be set in the test environment.
type: array
default: []
steps:
- name: run
image: $(params.image)
computeResources:
limits:
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
securityContext:
capabilities:
add:
- SETFCAP
command:
- /usr/bin/entrypoint.sh
args:
- $(params.envs[*])
- /bin/bash
- -ex
- -c
- $(params.cmd)

View File

@ -1,32 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: wait-for-image
spec:
description: Wait for an image to become available.
params:
- name: ref
description: Location of image to wait for.
results:
- name: digest
description: The digest that the image ref resolves to.
steps:
- name: check-and-wait
image: registry.redhat.io/rhel10/skopeo:latest
env:
- name: REF
value: $(params.ref)
- name: RESULTS_DIGEST_PATH
value: $(results.digest.path)
script: |
#!/bin/bash -e
echo "Fetching digest of $REF"
while true; do
DIGEST="$(skopeo inspect -n -f {{.Digest}} "docker://$REF" || :)"
if [ "${#DIGEST}" -gt 0 ]; then
echo -n "$DIGEST" | tee "$RESULTS_DIGEST_PATH"
exit
fi
echo "$(date -uIseconds): digest unavailable, waiting..."
sleep 60
done

View File

@ -8,8 +8,7 @@ PYTHON ?= $(shell command -v python3 python|head -n1)
DESTDIR ?= / DESTDIR ?= /
PATH := $(PATH):$(HOME)/.local/bin PATH := $(PATH):$(HOME)/.local/bin
IMAGE ?= ramalama IMAGE ?= ramalama
PROJECT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) PYTHON_FILES := $(shell find . -path "./.venv" -prune -o -name "*.py" -print) $(shell find . -name ".venv" -prune -o -type f -perm +111 -exec grep -l "^\#!/usr/bin/env python3" {} \; 2>/dev/null || true)
PYTHON_SCRIPTS := $(shell grep -lEr "^\#\!\s*/usr/bin/(env +)?python(3)?(\s|$$)" --exclude-dir={.venv,venv} $(PROJECT_DIR) || true)
PYTEST_COMMON_CMD ?= PYTHONPATH=. pytest test/unit/ -vv PYTEST_COMMON_CMD ?= PYTHONPATH=. pytest test/unit/ -vv
BATS_IMAGE ?= localhost/bats:latest BATS_IMAGE ?= localhost/bats:latest
@ -115,22 +114,22 @@ ifneq (,$(wildcard /usr/bin/python3))
endif endif
! grep -ri --exclude-dir ".venv" --exclude-dir "*/.venv" "#\!/usr/bin/python3" . ! grep -ri --exclude-dir ".venv" --exclude-dir "*/.venv" "#\!/usr/bin/python3" .
flake8 $(PROJECT_DIR) $(PYTHON_SCRIPTS) flake8 $(PYTHON_FILES)
shellcheck *.sh */*.sh */*/*.sh shellcheck *.sh */*.sh */*/*.sh
.PHONY: check-format .PHONY: check-format
check-format: check-format:
black --check --diff $(PROJECT_DIR) $(PYTHON_SCRIPTS) black --check --diff $(PYTHON_FILES)
isort --check --diff $(PROJECT_DIR) $(PYTHON_SCRIPTS) isort --check --diff $(PYTHON_FILES)
.PHONY: format .PHONY: format
format: format:
black $(PROJECT_DIR) $(PYTHON_SCRIPTS) black $(PYTHON_FILES)
isort $(PROJECT_DIR) $(PYTHON_SCRIPTS) isort $(PYTHON_FILES)
.PHONY: codespell .PHONY: codespell
codespell: codespell:
codespell -w $(PROJECT_DIR) $(PYTHON_SCRIPTS) codespell --dictionary=- --ignore-words-list "cann" -w --skip="*/venv*"
.PHONY: test-run .PHONY: test-run
test-run: test-run:
@ -168,7 +167,7 @@ bats-image:
podman inspect $(BATS_IMAGE) &> /dev/null || \ podman inspect $(BATS_IMAGE) &> /dev/null || \
podman build -t $(BATS_IMAGE) -f container-images/bats/Containerfile . podman build -t $(BATS_IMAGE) -f container-images/bats/Containerfile .
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun --device /dev/fuse bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun
%-in-container: bats-image %-in-container: bats-image
podman run -it --rm \ podman run -it --rm \

View File

@ -2,12 +2,14 @@
<img src="https://github.com/user-attachments/assets/1a338ecf-dc84-4495-8c70-16882955da47" width=50%> <img src="https://github.com/user-attachments/assets/1a338ecf-dc84-4495-8c70-16882955da47" width=50%>
</p> </p>
[RamaLama](https://ramalama.ai) strives to make working with AI simple, straightforward, and familiar by using OCI containers. [RamaLama](https://ramalama.ai) is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers.
<br> <br>
<br> <br>
## Description ## Description
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. It allows engineers to use container-centric development patterns and benefits to extend to AI use cases. RamaLama strives to make working with AI simple, straightforward, and familiar by using OCI containers.
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. Using a container engine like Podman, engineers can use container-centric development patterns and benefits to extend to AI use cases.
RamaLama eliminates the need to configure the host system by instead pulling a container image specific to the GPUs discovered on the host system, and allowing you to work with various models and platforms. RamaLama eliminates the need to configure the host system by instead pulling a container image specific to the GPUs discovered on the host system, and allowing you to work with various models and platforms.
@ -21,25 +23,6 @@ RamaLama eliminates the need to configure the host system by instead pulling a c
- Interact with models via REST API or as a chatbot. - Interact with models via REST API or as a chatbot.
<br> <br>
## Install
### Install on Fedora
RamaLama is available in [Fedora](https://fedoraproject.org/) and later. To install it, run:
```
sudo dnf install python3-ramalama
```
### Install via PyPI
RamaLama is available via PyPI at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
```
pip install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
```
## Accelerated images ## Accelerated images
| Accelerator | Image | | Accelerator | Image |
@ -74,7 +57,7 @@ RamaLama then pulls AI Models from model registries, starting a chatbot or REST
| :--------------------------------- | :-------------------------: | | :--------------------------------- | :-------------------------: |
| CPU | &check; | | CPU | &check; |
| Apple Silicon GPU (Linux / Asahi) | &check; | | Apple Silicon GPU (Linux / Asahi) | &check; |
| Apple Silicon GPU (macOS) | &check; llama.cpp or MLX | | Apple Silicon GPU (macOS) | &check; |
| Apple Silicon GPU (podman-machine) | &check; | | Apple Silicon GPU (podman-machine) | &check; |
| Nvidia GPU (cuda) | &check; See note below | | Nvidia GPU (cuda) | &check; See note below |
| AMD GPU (rocm, vulkan) | &check; | | AMD GPU (rocm, vulkan) | &check; |
@ -104,20 +87,28 @@ See the [Intel hardware table](https://dgpu-docs.intel.com/devices/hardware-tabl
### Moore Threads GPUs ### Moore Threads GPUs
On systems with Moore Threads GPUs, see [ramalama-musa](docs/ramalama-musa.7.md) documentation for the correct host system configuration. On systems with Moore Threads GPUs, see [ramalama-musa](docs/ramalama-musa.7.md) documentation for the correct host system configuration.
### MLX Runtime (macOS only) ## Install
The MLX runtime provides optimized inference for Apple Silicon Macs. MLX requires: ### Install on Fedora
- macOS operating system RamaLama is available in [Fedora 40](https://fedoraproject.org/) and later. To install it, run:
- Apple Silicon hardware (M1, M2, M3, or later) ```
- Usage with `--nocontainer` option (containers are not supported) sudo dnf install python3-ramalama
- The `mlx-lm` Python package installed on the host system ```
To install and run Phi-4 on MLX, use either `uv` or `pip`: ### Install via PyPi
```bash RamaLama is available via PyPi at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
uv pip install mlx-lm ```
# or pip: pip install ramalama
pip install mlx-lm ```
ramalama --runtime=mlx serve hf://mlx-community/Unsloth-Phi-4-4bit ### Install via Homebrew
```
brew install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
``` ```
#### Default Container Engine #### Default Container Engine
@ -222,7 +213,7 @@ $ cat /usr/share/ramalama/shortnames.conf
<br> <br>
``` ```
$ ramalama bench granite3-moe $ ramalama bench granite-moe3
``` ```
</details> </details>
@ -829,7 +820,7 @@ $ cat /usr/share/ramalama/shortnames.conf
Perplexity measures how well the model can predict the next token with lower values being better Perplexity measures how well the model can predict the next token with lower values being better
``` ```
$ ramalama perplexity granite3-moe $ ramalama perplexity granite-moe3
``` ```
</details> </details>
@ -1134,7 +1125,6 @@ This project wouldn't be possible without the help of other projects like:
- [llama.cpp](https://github.com/ggml-org/llama.cpp) - [llama.cpp](https://github.com/ggml-org/llama.cpp)
- [whisper.cpp](https://github.com/ggml-org/whisper.cpp) - [whisper.cpp](https://github.com/ggml-org/whisper.cpp)
- [vllm](https://github.com/vllm-project/vllm) - [vllm](https://github.com/vllm-project/vllm)
- [mlx-lm](https://github.com/ml-explore/mlx-examples)
- [podman](https://github.com/containers/podman) - [podman](https://github.com/containers/podman)
- [huggingface](https://github.com/huggingface) - [huggingface](https://github.com/huggingface)

View File

@ -1,11 +1,12 @@
FROM quay.io/fedora/fedora:42 FROM quay.io/fedora/fedora:42
ENV HOME=/tmp \ ENV HOME=/tmp \
XDG_RUNTIME_DIR=/tmp XDG_RUNTIME_DIR=/tmp \
STORAGE_DRIVER=vfs
WORKDIR /src WORKDIR /src
ENTRYPOINT ["/usr/bin/entrypoint.sh"] ENTRYPOINT ["/usr/bin/entrypoint.sh"]
RUN dnf -y install make bats jq iproute podman openssl httpd-tools diffutils \ RUN dnf -y install make bats jq iproute podman openssl httpd-tools \
python3-huggingface-hub \ python3-huggingface-hub \
$([ $(uname -m) == "x86_64" ] && echo ollama) \ $([ $(uname -m) == "x86_64" ] && echo ollama) \
# for validate and unit-tests # for validate and unit-tests
@ -25,6 +26,4 @@ RUN git clone --depth=1 https://github.com/ggml-org/llama.cpp && \
COPY container-images/bats/entrypoint.sh /usr/bin COPY container-images/bats/entrypoint.sh /usr/bin
COPY container-images/bats/containers.conf /etc/containers COPY container-images/bats/containers.conf /etc/containers
COPY . /src
RUN chmod -R a+rw /src
RUN chmod a+rw /etc/subuid /etc/subgid RUN chmod a+rw /etc/subuid /etc/subgid

View File

@ -1,17 +1,7 @@
#!/bin/bash #!/bin/bash
echo "$(id -un):10000:2000" > /etc/subuid echo "$(id -u):10000:100000" > /etc/subuid
echo "$(id -un):10000:2000" > /etc/subgid echo "$(id -g):10000:100000" > /etc/subgid
while [ $# -gt 0 ]; do
if [[ "$1" =~ = ]]; then
# shellcheck disable=SC2163
export "$1"
shift
else
break
fi
done
if [ $# -gt 0 ]; then if [ $# -gt 0 ]; then
exec "$@" exec "$@"

View File

@ -1,7 +0,0 @@
ARG PARENT
FROM $PARENT
ARG ENTRYPOINT
ENV ENTRYPOINT="$ENTRYPOINT"
ENTRYPOINT exec $ENTRYPOINT

View File

@ -1,8 +0,0 @@
ARG PARENT
FROM $PARENT
ARG GPU
USER root
RUN /usr/bin/build_rag.sh "$GPU"
ENTRYPOINT []

View File

@ -1,18 +1,16 @@
FROM quay.io/fedora/fedora:42 FROM quay.io/fedora/fedora:42
ARG RAMALAMA_STACK_VERSION=0.2.5
# hack that should be removed when the following bug is addressed # hack that should be removed when the following bug is addressed
# https://github.com/containers/ramalama-stack/issues/53 # https://github.com/containers/ramalama-stack/issues/53
RUN curl --create-dirs --output ~/.llama/providers.d/remote/inference/ramalama.yaml https://raw.githubusercontent.com/containers/ramalama-stack/refs/tags/v${RAMALAMA_STACK_VERSION}/src/ramalama_stack/providers.d/remote/inference/ramalama.yaml && \ RUN curl --create-dirs --output ~/.llama/providers.d/remote/inference/ramalama.yaml https://raw.githubusercontent.com/containers/ramalama-stack/refs/tags/v0.2.3/src/ramalama_stack/providers.d/remote/inference/ramalama.yaml && \
curl --create-dirs --output /etc/ramalama/ramalama-run.yaml https://raw.githubusercontent.com/containers/ramalama-stack/refs/tags/v${RAMALAMA_STACK_VERSION}/src/ramalama_stack/ramalama-run.yaml curl --create-dirs --output /etc/ramalama/ramalama-run.yaml https://raw.githubusercontent.com/containers/ramalama-stack/refs/tags/v0.2.3/src/ramalama_stack/ramalama-run.yaml
RUN dnf -y update && \ RUN dnf -y update && \
dnf -y install uv cmake gcc gcc-c++ python3-devel pkg-config sentencepiece-devel && \ dnf -y install uv cmake gcc gcc-c++ python3-devel pkg-config sentencepiece-devel && \
dnf -y clean all dnf -y clean all
RUN uv venv && \ RUN uv venv && \
uv pip install ramalama-stack==${RAMALAMA_STACK_VERSION} uv pip install ramalama-stack==0.2.3
COPY --chmod=755 container-images/llama-stack/entrypoint.sh /usr/bin/entrypoint.sh COPY --chmod=755 container-images/llama-stack/entrypoint.sh /usr/bin/entrypoint.sh

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787 FROM registry.access.redhat.com/ubi9/ubi:9.6-1750786174
# Install Python development dependencies # Install Python development dependencies
RUN dnf install -y python3-devel wget compat-openssl11 python3-jinja2 python3-markupsafe RUN dnf install -y python3-devel wget compat-openssl11 python3-jinja2 python3-markupsafe

View File

@ -1,17 +0,0 @@
FROM quay.io/ramalama/ramalama
ENV PATH="/root/.local/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
ENV UV_PYTHON_INSTALL_DIR="/opt/uv/python"
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
ENV UV_LINK_MODE="copy"
COPY . /src/ramalama
WORKDIR /src/ramalama
RUN container-images/scripts/build-vllm.sh
WORKDIR /

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787 FROM registry.access.redhat.com/ubi9/ubi:9.6-1750786174
COPY container-images/rocm-ubi/amdgpu.repo /etc/yum.repos.d/ COPY container-images/rocm-ubi/amdgpu.repo /etc/yum.repos.d/
COPY container-images/rocm-ubi/rocm.repo /etc/yum.repos.d/ COPY container-images/rocm-ubi/rocm.repo /etc/yum.repos.d/

View File

@ -1,90 +0,0 @@
#!/bin/bash
available() {
command -v "$1" >/dev/null
}
install_deps() {
set -eux -o pipefail
if available dnf; then
dnf install -y git curl wget ca-certificates gcc gcc-c++ \
gperftools-libs numactl-devel ffmpeg libSM libXext mesa-libGL jq lsof \
vim numactl
dnf -y clean all
rm -rf /var/cache/*dnf*
elif available apt-get; then
apt-get update -y
apt-get install -y --no-install-recommends git curl wget ca-certificates \
gcc g++ libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 \
jq lsof vim numactl
rm -rf /var/lib/apt/lists/*
fi
curl -LsSf https://astral.sh/uv/0.7.21/install.sh | bash
}
preload_and_ulimit() {
local ld_preload_file="libtcmalloc_minimal.so.4"
local ld_preload_file_1="/usr/lib/$arch-linux-gnu/$ld_preload_file"
local ld_preload_file_2="/usr/lib64/$ld_preload_file"
if [ -e "$ld_preload_file_1" ]; then
ld_preload_file="$ld_preload_file_1"
elif [ -e "$ld_preload_file_2" ]; then
ld_preload_file="$ld_preload_file_2"
fi
if [ -e "$ld_preload_file" ]; then
echo "LD_PRELOAD=$ld_preload_file" >> /etc/environment
fi
echo 'ulimit -c 0' >> ~/.bashrc
}
pip_install() {
local url="https://download.pytorch.org/whl/cpu"
uv pip install -v -r "$1" --extra-index-url $url
}
git_clone_specific_commit() {
local repo="${vllm_url##*/}"
git init "$repo"
cd "$repo"
git remote add origin "$vllm_url"
git fetch --depth 1 origin $commit
git reset --hard $commit
}
main() {
set -eux -o pipefail
install_deps
local arch
arch=$(uname -m)
preload_and_ulimit
uv venv --python 3.12 --seed "$VIRTUAL_ENV"
uv pip install --upgrade pip
local vllm_url="https://github.com/vllm-project/vllm"
local commit="ac9fb732a5c0b8e671f8c91be8b40148282bb14a"
git_clone_specific_commit
if [ "$arch" == "x86_64" ]; then
export VLLM_CPU_DISABLE_AVX512="0"
export VLLM_CPU_AVX512BF16="0"
export VLLM_CPU_AVX512VNNI="0"
elif [ "$arch" == "aarch64" ]; then
export VLLM_CPU_DISABLE_AVX512="true"
fi
pip_install requirements/cpu-build.txt
pip_install requirements/cpu.txt
MAX_JOBS=2 VLLM_TARGET_DEVICE=cpu python3 setup.py install
cd -
rm -rf vllm /root/.cache
}
main "$@"

View File

@ -99,10 +99,10 @@ is_rhel_based() { # doesn't include openEuler
dnf_install_mesa() { dnf_install_mesa() {
if [ "${ID}" = "fedora" ]; then if [ "${ID}" = "fedora" ]; then
dnf copr enable -y slp/mesa-libkrun-vulkan dnf copr enable -y slp/mesa-libkrun-vulkan
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 virglrenderer "${vulkan_rpms[@]}" dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 "${vulkan_rpms[@]}"
dnf versionlock add mesa-vulkan-drivers-25.0.7-100.fc42 dnf versionlock add mesa-vulkan-drivers-25.0.7-100.fc42
else else
dnf install -y mesa-vulkan-drivers virglrenderer "${vulkan_rpms[@]}" dnf install -y mesa-vulkan-drivers "${vulkan_rpms[@]}"
fi fi
rm_non_ubi_repos rm_non_ubi_repos
@ -282,13 +282,10 @@ clone_and_build_llama_cpp() {
} }
install_ramalama() { install_ramalama() {
if [ -e "pyproject.toml" ]; then
$PYTHON -m pip install . --prefix="$1" $PYTHON -m pip install . --prefix="$1"
fi
} }
install_entrypoints() { install_entrypoints() {
if [ -e "container-images" ]; then
install -d "$install_prefix"/bin install -d "$install_prefix"/bin
install -m 755 \ install -m 755 \
container-images/scripts/llama-server.sh \ container-images/scripts/llama-server.sh \
@ -297,7 +294,6 @@ install_entrypoints() {
container-images/scripts/doc2rag \ container-images/scripts/doc2rag \
container-images/scripts/rag_framework \ container-images/scripts/rag_framework \
"$install_prefix"/bin "$install_prefix"/bin
fi
} }
main() { main() {

View File

@ -40,18 +40,7 @@ update_python() {
} }
docling() { docling() {
case $1 in ${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url https://download.pytorch.org/whl/"$1"
cuda)
PYTORCH_DIR="cu128"
;;
rocm)
PYTORCH_DIR="rocm6.3"
;;
*)
PYTORCH_DIR="cpu"
;;
esac
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url "https://download.pytorch.org/whl/$PYTORCH_DIR"
# Preloads models (assumes its installed from container_build.sh) # Preloads models (assumes its installed from container_build.sh)
doc2rag load doc2rag load
} }
@ -62,8 +51,6 @@ rag() {
} }
to_gguf() { to_gguf() {
# required to build under GCC 15 until a new release is available, see https://github.com/google/sentencepiece/issues/1108 for details
export CXXFLAGS="-include cstdint"
${python} -m pip install --prefix=/usr "numpy~=1.26.4" "sentencepiece~=0.2.0" "transformers>=4.45.1,<5.0.0" git+https://github.com/ggml-org/llama.cpp#subdirectory=gguf-py "protobuf>=4.21.0,<5.0.0" ${python} -m pip install --prefix=/usr "numpy~=1.26.4" "sentencepiece~=0.2.0" "transformers>=4.45.1,<5.0.0" git+https://github.com/ggml-org/llama.cpp#subdirectory=gguf-py "protobuf>=4.21.0,<5.0.0"
} }
@ -73,9 +60,6 @@ main() {
# shellcheck disable=SC1091 # shellcheck disable=SC1091
source /etc/os-release source /etc/os-release
# caching in a container build is unhelpful, and can cause errors
export PIP_NO_CACHE_DIR=1
local arch local arch
arch="$(uname -m)" arch="$(uname -m)"
local gpu="${1-cpu}" local gpu="${1-cpu}"
@ -83,14 +67,18 @@ main() {
python=$(python_version) python=$(python_version)
local pkgs local pkgs
if available dnf; then if available dnf; then
pkgs=("git-core" "gcc" "gcc-c++" "cmake") pkgs=("git-core" "gcc" "gcc-c++")
else else
pkgs=("git" "gcc" "g++" "cmake") pkgs=("git" "gcc" "g++")
fi fi
if [ "${gpu}" = "cuda" ]; then if [ "${gpu}" = "cuda" ]; then
pkgs+=("libcudnn9-devel-cuda-12" "libcusparselt0" "cuda-cupti-12-*") pkgs+=("libcudnn9-devel-cuda-12" "libcusparselt0" "cuda-cupti-12-*")
fi fi
if [[ "$ID" = "fedora" && "$VERSION_ID" -ge 42 ]] ; then
pkgs+=("python3-sentencepiece-0.2.0")
fi
update_python update_python
to_gguf to_gguf

Some files were not shown because too many files have changed in this diff Show More