Compare commits

..

No commits in common. "main" and "v0.10.1" have entirely different histories.

175 changed files with 1896 additions and 5199 deletions

2
.gitignore vendored
View File

@ -19,5 +19,3 @@ __pycache__/
coverage.*
htmlcov/
.idea/
.hypothesis/
uv.lock

View File

@ -9,7 +9,8 @@ set -exo pipefail
# Extract version from pyproject.toml instead of setup.py
VERSION=$(awk -F'[""]' ' /^\s*version\s*/ {print $(NF-1)}' pyproject.toml )
SPEC_FILE=rpm/ramalama.spec
#SPEC_FILE=rpm-next/ramalama.spec
SPEC_FILE=rpm/python-ramalama.spec
# RPM Spec modifications

View File

@ -7,11 +7,21 @@ upstream_tag_template: v{version}
packages:
ramalama-fedora:
pkg_tool: fedpkg
downstream_package_name: ramalama
specfile_path: rpm/ramalama.spec
downstream_package_name: python-ramalama
specfile_path: rpm/python-ramalama.spec
ramalama-centos:
downstream_package_name: ramalama
specfile_path: rpm/ramalama.spec
downstream_package_name: python-ramalama
specfile_path: rpm/python-ramalama.spec
# Uncomment when we get this approved.
# packages:
# ramalama-fedora:
# pkg_tool: fedpkg
# downstream_package_name: ramalama
# specfile_path: rpm-next/ramalama.spec
# ramalama-centos:
# downstream_package_name: ramalama
# specfile_path: rpm-next/ramalama.spec
srpm_build_deps:
- make
@ -79,7 +89,7 @@ jobs:
dist_git_branches: &fedora_targets
- fedora-all
- epel10
- epel10.0
- epel9
- job: koji_build
trigger: commit
@ -92,4 +102,4 @@ jobs:
dist_git_branches:
- fedora-branched # rawhide updates are created automatically
- epel10
- epel10.0
- epel9

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats
@ -28,8 +28,8 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- linux/arm64
- name: dockerfile
value: container-images/bats/Containerfile
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats
@ -25,8 +25,8 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/bats:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- linux/arm64
- name: dockerfile
value: container-images/bats/Containerfile
pipelineRef:

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/cuda/Containerfile
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/cuda/Containerfile
pipelineRef:

View File

@ -1,66 +0,0 @@
kind: Pipeline
apiVersion: tekton.dev/v1
metadata:
name: bats-integration
spec:
description: |
Test the newly-built ramalama image and layered images on all supported architectures.
params:
- name: SNAPSHOT
description: >-
Information about the components included in the current snapshot under test.
- name: platforms
description: VM platforms on which to run test commands
type: array
default:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: commands
description: Test commands to run
type: array
default:
- make bats
- name: git-url
description: URL of the Git repository containing pipeline and task definitions
default: https://github.com/containers/ramalama.git
- name: git-revision
description: Revision of the Git repository containing pipeline and task definitions
default: main
tasks:
- name: init
params:
- name: SNAPSHOT
value: $(params.SNAPSHOT)
taskRef:
resolver: git
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: pathInRepo
value: .tekton/integration/tasks/init-snapshot.yaml
- name: test
matrix:
params:
- name: PLATFORM
value:
- $(params.platforms)
- name: cmd
value:
- $(params.commands)
params:
- name: image
value: $(tasks.init.results.bats-image)
- name: envs
value:
- RAMALAMA_IMAGE=$(tasks.init.results.ramalama-image)
taskRef:
resolver: git
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: pathInRepo
value: .tekton/integration/tasks/test-vm-cmd.yaml

View File

@ -1,55 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: init-snapshot
spec:
description: Extract information from the SNAPSHOT and make it available as Tekton results
params:
- name: SNAPSHOT
description: >-
Information about the components included in the current snapshot under test.
results:
- name: event-type
description: The type of event that triggered the pipeline
- name: bats-image
description: URI of the bats image included in the snapshot
- name: ramalama-image
description: URI of the ramalama image included in the snapshot
- name: TEST_OUTPUT
description: Test result in json format
steps:
- name: process
image: registry.access.redhat.com/ubi10/ubi:latest
env:
- name: SNAPSHOT
value: $(params.SNAPSHOT)
- name: EVENT_TYPE
valueFrom:
fieldRef:
fieldPath: metadata.labels['pac.test.appstudio.openshift.io/event-type']
- name: RESULTS_EVENT_TYPE_PATH
value: $(results.event-type.path)
- name: RESULTS_BATS_IMAGE_PATH
value: $(results.bats-image.path)
- name: RESULTS_RAMALAMA_IMAGE_PATH
value: $(results.ramalama-image.path)
- name: RESULTS_TEST_OUTPUT_PATH
value: $(results.TEST_OUTPUT.path)
script: |
#!/bin/bash -ex
dnf -y install jq
echo -n "$EVENT_TYPE" | tee "$RESULTS_EVENT_TYPE_PATH"
echo
component_image() {
TAGSEP=":"
if [ "$EVENT_TYPE" == "pull_request" ]; then
TAGSEP+="on-pr-"
fi
jq -j --arg name "$1" --arg tagsep "$TAGSEP" '.components[] | select(.name == $name) | [(.containerImage | split("@")[0]), .source.git.revision] | join($tagsep)' <<< "$SNAPSHOT"
}
component_image bats | tee "$RESULTS_BATS_IMAGE_PATH"
echo
component_image ramalama | tee "$RESULTS_RAMALAMA_IMAGE_PATH"
echo
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
echo

View File

@ -1,118 +0,0 @@
apiVersion: tekton.dev/v1
kind: Task
metadata:
name: test-vm-cmd
spec:
description: Run a command in a test VM
params:
- name: PLATFORM
description: The platform of the VM to provision
- name: image
description: The image to use when setting up the test environment
- name: cmd
description: The command to run
- name: envs
description: List of environment variables (NAME=VALUE) to be set in the test environment
type: array
default: []
results:
- name: TEST_OUTPUT
description: Test result in json format
volumes:
- name: workdir
emptyDir: {}
- name: ssh
secret:
secretName: multi-platform-ssh-$(context.taskRun.name)
steps:
- name: run-in-vm
image: registry.access.redhat.com/ubi10/ubi:latest
volumeMounts:
- mountPath: /var/workdir
name: workdir
- mountPath: /ssh
name: ssh
workingDir: /var/workdir
env:
- name: TEST_IMAGE
value: $(params.image)
- name: TEST_CMD
value: $(params.cmd)
- name: RESULTS_TEST_OUTPUT_PATH
value: $(results.TEST_OUTPUT.path)
args:
- $(params.envs[*])
script: |
#!/bin/bash -ex
log() {
echo "[$(date -uIns)]" $*
}
log Install packages
dnf -y install openssh-clients rsync jq
log Prepare connection
if [ -e "/ssh/error" ]; then
log Error provisioning VM
cat /ssh/error
exit 1
fi
export SSH_HOST=$(cat /ssh/host)
mkdir -p ~/.ssh
if [ "$SSH_HOST" == "localhost" ] ; then
IS_LOCALHOST=true
log Localhost detected, running build in cluster
elif [ -s "/ssh/otp" ]; then
log Fetching OTP token
curl --cacert /ssh/otp-ca -d @/ssh/otp $(cat /ssh/otp-server) > ~/.ssh/id_rsa
echo >> ~/.ssh/id_rsa
chmod 0400 ~/.ssh/id_rsa
elif [ -s "/ssh/id_rsa" ]; then
log Copying ssh key
cp /ssh/id_rsa ~/.ssh
chmod 0400 ~/.ssh/id_rsa
else
log No authentication mechanism found
exit 1
fi
mkdir -p scripts
PODMAN_ENV=()
while [ $# -ne 0 ]; do
PODMAN_ENV+=("-e" "$1")
shift
done
cat > scripts/test.sh <<SCRIPTEOF
#!/bin/bash -ex
podman run \
--userns=keep-id \
--security-opt label=disable \
--security-opt unmask=/proc/* \
--device /dev/net/tun \
--device /dev/fuse \
${PODMAN_ENV[*]} \
$TEST_IMAGE $TEST_CMD
SCRIPTEOF
chmod +x scripts/test.sh
if ! [[ $IS_LOCALHOST ]]; then
log VM exec
export BUILD_DIR=$(cat /ssh/user-dir)
export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10"
# ssh once before rsync to retrieve the host key
ssh $SSH_ARGS "$SSH_HOST" "uname -a"
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
ssh $SSH_ARGS "$SSH_HOST" "$BUILD_DIR/scripts/test.sh"
log End VM exec
else
log Local exec
scripts/test.sh
log End local exec
fi
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
echo

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- GPU=musa
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- GPU=musa
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -20,11 +20,13 @@ spec:
name: output-image
type: string
- default: .
description: Path to the source code of an application's component from where to build image.
description: Path to the source code of an application's component from where
to build image.
name: path-context
type: string
- default: Dockerfile
description: Path to the Dockerfile inside the context specified by parameter path-context
description: Path to the Dockerfile inside the context specified by parameter
path-context
name: dockerfile
type: string
- default: "false"
@ -44,7 +46,8 @@ spec:
name: prefetch-input
type: string
- default: ""
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.
description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
name: image-expires-after
- default: "false"
description: Build a source image.
@ -63,12 +66,14 @@ spec:
name: build-args-file
type: string
- default: "false"
description: Whether to enable privileged mode, should be used only with remote VMs
description: Whether to enable privileged mode, should be used only with remote
VMs
name: privileged-nested
type: string
- default:
- linux-c4xlarge/amd64
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.
- linux/x86_64
description: List of platforms to build the container images on. The available
set of values is determined by the configuration of the multi-platform-controller.
name: build-platforms
type: array
- default: ""
@ -113,7 +118,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind
value: task
resolver: bundles
@ -163,7 +168,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
- name: kind
value: task
resolver: bundles
@ -225,7 +230,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
- name: kind
value: task
resolver: bundles
@ -254,7 +259,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind
value: task
resolver: bundles
@ -285,6 +290,8 @@ spec:
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: source-artifact
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: envs
value:
- $(params.test-envs[*])
@ -300,13 +307,11 @@ spec:
- name: build-source-image
params:
- name: BINARY_IMAGE
value: $(tasks.build-image-index.results.IMAGE_URL)
value: $(params.output-image)
- name: SOURCE_ARTIFACT
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: BINARY_IMAGE_DIGEST
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
runAfter:
- build-image-index
taskRef:
@ -314,7 +319,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
- name: kind
value: task
resolver: bundles
@ -362,7 +367,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind
value: task
resolver: bundles
@ -382,7 +387,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
- name: kind
value: task
resolver: bundles
@ -408,7 +413,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind
value: task
resolver: bundles
@ -430,7 +435,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind
value: task
resolver: bundles
@ -475,7 +480,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
- name: kind
value: task
resolver: bundles
@ -522,7 +527,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind
value: task
resolver: bundles
@ -548,7 +553,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
- name: kind
value: task
resolver: bundles
@ -593,7 +598,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind
value: task
resolver: bundles
@ -629,7 +634,7 @@ spec:
- name: name
value: show-sbom
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind
value: task
resolver: bundles

View File

@ -20,11 +20,13 @@ spec:
name: output-image
type: string
- default: .
description: Path to the source code of an application's component from where to build image.
description: Path to the source code of an application's component from where
to build image.
name: path-context
type: string
- default: Dockerfile
description: Path to the Dockerfile inside the context specified by parameter path-context
description: Path to the Dockerfile inside the context specified by parameter
path-context
name: dockerfile
type: string
- default: "false"
@ -44,7 +46,8 @@ spec:
name: prefetch-input
type: string
- default: ""
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.
description: Image tag expiration time, time values could be something like
1h, 2d, 3w for hours, days, and weeks, respectively.
name: image-expires-after
- default: "false"
description: Build a source image.
@ -63,12 +66,14 @@ spec:
name: build-args-file
type: string
- default: "false"
description: Whether to enable privileged mode, should be used only with remote VMs
description: Whether to enable privileged mode, should be used only with remote
VMs
name: privileged-nested
type: string
- default:
- linux-c4xlarge/amd64
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.
- linux/x86_64
description: List of platforms to build the container images on. The available
set of values is determined by the configuration of the multi-platform-controller.
name: build-platforms
type: array
- default: ""
@ -113,7 +118,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind
value: task
resolver: bundles
@ -163,7 +168,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
- name: kind
value: task
resolver: bundles
@ -225,7 +230,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
- name: kind
value: task
resolver: bundles
@ -254,7 +259,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind
value: task
resolver: bundles
@ -285,6 +290,8 @@ spec:
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: source-artifact
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: envs
value:
- $(params.test-envs[*])
@ -300,13 +307,11 @@ spec:
- name: build-source-image
params:
- name: BINARY_IMAGE
value: $(tasks.build-image-index.results.IMAGE_URL)
value: $(params.output-image)
- name: SOURCE_ARTIFACT
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: BINARY_IMAGE_DIGEST
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
runAfter:
- build-image-index
taskRef:
@ -314,7 +319,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
- name: kind
value: task
resolver: bundles
@ -362,7 +367,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind
value: task
resolver: bundles
@ -382,7 +387,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
- name: kind
value: task
resolver: bundles
@ -408,7 +413,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind
value: task
resolver: bundles
@ -430,7 +435,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind
value: task
resolver: bundles
@ -475,7 +480,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
- name: kind
value: task
resolver: bundles
@ -522,7 +527,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind
value: task
resolver: bundles
@ -548,7 +553,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
- name: kind
value: task
resolver: bundles
@ -593,7 +598,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind
value: task
resolver: bundles
@ -629,7 +634,7 @@ spec:
- name: name
value: show-sbom
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind
value: task
resolver: bundles

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server
@ -28,8 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server
@ -25,8 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama
@ -28,8 +28,8 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- linux/arm64
- name: dockerfile
value: container-images/ramalama/Containerfile
- name: test-image
@ -40,6 +40,7 @@ spec:
- name: test-commands
value:
- make validate
- make bats-nocontainer
- make unit-tests
- make cov-tests
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama
@ -25,8 +25,8 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- linux/x86_64
- linux/arm64
- name: dockerfile
value: container-images/ramalama/Containerfile
- name: test-image
@ -37,6 +37,7 @@ spec:
- name: test-commands
value:
- make validate
- make bats-nocontainer
- make unit-tests
- make cov-tests
pipelineRef:

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-fast/amd64
- linux/x86_64
- name: dockerfile
value: container-images/rocm-ubi/Containerfile
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
- name: build-platforms
value:
- linux-fast/amd64
- linux/x86_64
- name: dockerfile
value: container-images/rocm-ubi/Containerfile
pipelineRef:

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux/x86_64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm
@ -28,7 +28,7 @@ spec:
value: 5d
- name: build-platforms
value:
- linux-fast/amd64
- linux/x86_64
- name: dockerfile
value: container-images/rocm/Containerfile
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm
@ -25,7 +25,7 @@ spec:
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
- name: build-platforms
value:
- linux-fast/amd64
- linux/x86_64
- name: dockerfile
value: container-images/rocm/Containerfile
pipelineRef:

View File

@ -7,29 +7,58 @@ spec:
params:
- name: image
description: The image to use when setting up the test environment.
- name: source-artifact
description: The Trusted Artifact URI pointing to the artifact with the application source code.
- name: cmd
description: The command to run.
- name: envs
description: List of environment variables (NAME=VALUE) to be set in the test environment.
type: array
default: []
steps:
- name: run
image: $(params.image)
volumes:
- name: workdir
emptyDir: {}
stepTemplate:
volumeMounts:
- mountPath: /var/workdir
name: workdir
computeResources:
limits:
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
steps:
- name: use-trusted-artifact
image: quay.io/konflux-ci/build-trusted-artifacts:latest@sha256:4689f88dd253bd1feebf57f1a76a5a751880f739000719cd662bbdc76990a7fd
args:
- use
- $(params.source-artifact)=/var/workdir/source
- name: set-env
image: $(params.image)
workingDir: /var/workdir/source
args:
- $(params.envs[*])
script: |
#!/bin/bash -e
rm -f .bashenv
while [ $# -ne 0 ]; do
echo "$1" >> .bashenv
shift
done
- name: run
image: $(params.image)
securityContext:
capabilities:
add:
- SETFCAP
workingDir: /var/workdir/source
env:
- name: BASH_ENV
value: .bashenv
command:
- /usr/bin/entrypoint.sh
args:
- $(params.envs[*])
- /bin/bash
- -ex
- -c

View File

@ -168,7 +168,7 @@ bats-image:
podman inspect $(BATS_IMAGE) &> /dev/null || \
podman build -t $(BATS_IMAGE) -f container-images/bats/Containerfile .
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun --device /dev/fuse
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun
%-in-container: bats-image
podman run -it --rm \

View File

@ -2,12 +2,14 @@
<img src="https://github.com/user-attachments/assets/1a338ecf-dc84-4495-8c70-16882955da47" width=50%>
</p>
[RamaLama](https://ramalama.ai) strives to make working with AI simple, straightforward, and familiar by using OCI containers.
[RamaLama](https://ramalama.ai) is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers.
<br>
<br>
## Description
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. It allows engineers to use container-centric development patterns and benefits to extend to AI use cases.
RamaLama strives to make working with AI simple, straightforward, and familiar by using OCI containers.
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. Using a container engine like Podman, engineers can use container-centric development patterns and benefits to extend to AI use cases.
RamaLama eliminates the need to configure the host system by instead pulling a container image specific to the GPUs discovered on the host system, and allowing you to work with various models and platforms.
@ -21,25 +23,6 @@ RamaLama eliminates the need to configure the host system by instead pulling a c
- Interact with models via REST API or as a chatbot.
<br>
## Install
### Install on Fedora
RamaLama is available in [Fedora](https://fedoraproject.org/) and later. To install it, run:
```
sudo dnf install python3-ramalama
```
### Install via PyPI
RamaLama is available via PyPI at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
```
pip install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
```
## Accelerated images
| Accelerator | Image |
@ -120,6 +103,30 @@ pip install mlx-lm
ramalama --runtime=mlx serve hf://mlx-community/Unsloth-Phi-4-4bit
```
## Install
### Install on Fedora
RamaLama is available in [Fedora 40](https://fedoraproject.org/) and later. To install it, run:
```
sudo dnf install python3-ramalama
```
### Install via PyPi
RamaLama is available via PyPi at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
```
pip install ramalama
```
### Install via Homebrew
```
brew install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
```
#### Default Container Engine
When both Podman and Docker are installed, RamaLama defaults to Podman. The `RAMALAMA_CONTAINER_ENGINE=docker` environment variable can override this behaviour. When neither are installed, RamaLama will attempt to run the model with software on the local system.
<br>
@ -222,7 +229,7 @@ $ cat /usr/share/ramalama/shortnames.conf
<br>
```
$ ramalama bench granite3-moe
$ ramalama bench granite-moe3
```
</details>
@ -829,7 +836,7 @@ $ cat /usr/share/ramalama/shortnames.conf
Perplexity measures how well the model can predict the next token with lower values being better
```
$ ramalama perplexity granite3-moe
$ ramalama perplexity granite-moe3
```
</details>

View File

@ -1,11 +1,12 @@
FROM quay.io/fedora/fedora:42
ENV HOME=/tmp \
XDG_RUNTIME_DIR=/tmp
XDG_RUNTIME_DIR=/tmp \
STORAGE_DRIVER=vfs
WORKDIR /src
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
RUN dnf -y install make bats jq iproute podman openssl httpd-tools diffutils \
RUN dnf -y install make bats jq iproute podman openssl httpd-tools \
python3-huggingface-hub \
$([ $(uname -m) == "x86_64" ] && echo ollama) \
# for validate and unit-tests
@ -25,6 +26,4 @@ RUN git clone --depth=1 https://github.com/ggml-org/llama.cpp && \
COPY container-images/bats/entrypoint.sh /usr/bin
COPY container-images/bats/containers.conf /etc/containers
COPY . /src
RUN chmod -R a+rw /src
RUN chmod a+rw /etc/subuid /etc/subgid

View File

@ -3,16 +3,6 @@
echo "$(id -un):10000:2000" > /etc/subuid
echo "$(id -un):10000:2000" > /etc/subgid
while [ $# -gt 0 ]; do
if [[ "$1" =~ = ]]; then
# shellcheck disable=SC2163
export "$1"
shift
else
break
fi
done
if [ $# -gt 0 ]; then
exec "$@"
else

View File

@ -1,6 +1,6 @@
FROM quay.io/fedora/fedora:42
ARG RAMALAMA_STACK_VERSION=0.2.5
ARG RAMALAMA_STACK_VERSION=0.2.4
# hack that should be removed when the following bug is addressed
# https://github.com/containers/ramalama-stack/issues/53

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
FROM registry.access.redhat.com/ubi9/ubi:9.6-1751445649
# Install Python development dependencies
RUN dnf install -y python3-devel wget compat-openssl11 python3-jinja2 python3-markupsafe

View File

@ -1,17 +0,0 @@
FROM quay.io/ramalama/ramalama
ENV PATH="/root/.local/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
ENV UV_PYTHON_INSTALL_DIR="/opt/uv/python"
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
ENV UV_LINK_MODE="copy"
COPY . /src/ramalama
WORKDIR /src/ramalama
RUN container-images/scripts/build-vllm.sh
WORKDIR /

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
FROM registry.access.redhat.com/ubi9/ubi:9.6-1751445649
COPY container-images/rocm-ubi/amdgpu.repo /etc/yum.repos.d/
COPY container-images/rocm-ubi/rocm.repo /etc/yum.repos.d/

View File

@ -1,90 +0,0 @@
#!/bin/bash
available() {
command -v "$1" >/dev/null
}
install_deps() {
set -eux -o pipefail
if available dnf; then
dnf install -y git curl wget ca-certificates gcc gcc-c++ \
gperftools-libs numactl-devel ffmpeg libSM libXext mesa-libGL jq lsof \
vim numactl
dnf -y clean all
rm -rf /var/cache/*dnf*
elif available apt-get; then
apt-get update -y
apt-get install -y --no-install-recommends git curl wget ca-certificates \
gcc g++ libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 \
jq lsof vim numactl
rm -rf /var/lib/apt/lists/*
fi
curl -LsSf https://astral.sh/uv/0.7.21/install.sh | bash
}
preload_and_ulimit() {
local ld_preload_file="libtcmalloc_minimal.so.4"
local ld_preload_file_1="/usr/lib/$arch-linux-gnu/$ld_preload_file"
local ld_preload_file_2="/usr/lib64/$ld_preload_file"
if [ -e "$ld_preload_file_1" ]; then
ld_preload_file="$ld_preload_file_1"
elif [ -e "$ld_preload_file_2" ]; then
ld_preload_file="$ld_preload_file_2"
fi
if [ -e "$ld_preload_file" ]; then
echo "LD_PRELOAD=$ld_preload_file" >> /etc/environment
fi
echo 'ulimit -c 0' >> ~/.bashrc
}
pip_install() {
local url="https://download.pytorch.org/whl/cpu"
uv pip install -v -r "$1" --extra-index-url $url
}
git_clone_specific_commit() {
local repo="${vllm_url##*/}"
git init "$repo"
cd "$repo"
git remote add origin "$vllm_url"
git fetch --depth 1 origin $commit
git reset --hard $commit
}
main() {
set -eux -o pipefail
install_deps
local arch
arch=$(uname -m)
preload_and_ulimit
uv venv --python 3.12 --seed "$VIRTUAL_ENV"
uv pip install --upgrade pip
local vllm_url="https://github.com/vllm-project/vllm"
local commit="ac9fb732a5c0b8e671f8c91be8b40148282bb14a"
git_clone_specific_commit
if [ "$arch" == "x86_64" ]; then
export VLLM_CPU_DISABLE_AVX512="0"
export VLLM_CPU_AVX512BF16="0"
export VLLM_CPU_AVX512VNNI="0"
elif [ "$arch" == "aarch64" ]; then
export VLLM_CPU_DISABLE_AVX512="true"
fi
pip_install requirements/cpu-build.txt
pip_install requirements/cpu.txt
MAX_JOBS=2 VLLM_TARGET_DEVICE=cpu python3 setup.py install
cd -
rm -rf vllm /root/.cache
}
main "$@"

View File

@ -99,10 +99,10 @@ is_rhel_based() { # doesn't include openEuler
dnf_install_mesa() {
if [ "${ID}" = "fedora" ]; then
dnf copr enable -y slp/mesa-libkrun-vulkan
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 virglrenderer "${vulkan_rpms[@]}"
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 "${vulkan_rpms[@]}"
dnf versionlock add mesa-vulkan-drivers-25.0.7-100.fc42
else
dnf install -y mesa-vulkan-drivers virglrenderer "${vulkan_rpms[@]}"
dnf install -y mesa-vulkan-drivers "${vulkan_rpms[@]}"
fi
rm_non_ubi_repos
@ -282,13 +282,10 @@ clone_and_build_llama_cpp() {
}
install_ramalama() {
if [ -e "pyproject.toml" ]; then
$PYTHON -m pip install . --prefix="$1"
fi
}
install_entrypoints() {
if [ -e "container-images" ]; then
install -d "$install_prefix"/bin
install -m 755 \
container-images/scripts/llama-server.sh \
@ -297,7 +294,6 @@ install_entrypoints() {
container-images/scripts/doc2rag \
container-images/scripts/rag_framework \
"$install_prefix"/bin
fi
}
main() {

View File

@ -40,18 +40,7 @@ update_python() {
}
docling() {
case $1 in
cuda)
PYTORCH_DIR="cu128"
;;
rocm)
PYTORCH_DIR="rocm6.3"
;;
*)
PYTORCH_DIR="cpu"
;;
esac
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url "https://download.pytorch.org/whl/$PYTORCH_DIR"
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url https://download.pytorch.org/whl/"$1"
# Preloads models (assumes its installed from container_build.sh)
doc2rag load
}
@ -62,8 +51,6 @@ rag() {
}
to_gguf() {
# required to build under GCC 15 until a new release is available, see https://github.com/google/sentencepiece/issues/1108 for details
export CXXFLAGS="-include cstdint"
${python} -m pip install --prefix=/usr "numpy~=1.26.4" "sentencepiece~=0.2.0" "transformers>=4.45.1,<5.0.0" git+https://github.com/ggml-org/llama.cpp#subdirectory=gguf-py "protobuf>=4.21.0,<5.0.0"
}
@ -73,9 +60,6 @@ main() {
# shellcheck disable=SC1091
source /etc/os-release
# caching in a container build is unhelpful, and can cause errors
export PIP_NO_CACHE_DIR=1
local arch
arch="$(uname -m)"
local gpu="${1-cpu}"
@ -83,14 +67,18 @@ main() {
python=$(python_version)
local pkgs
if available dnf; then
pkgs=("git-core" "gcc" "gcc-c++" "cmake")
pkgs=("git-core" "gcc" "gcc-c++")
else
pkgs=("git" "gcc" "g++" "cmake")
pkgs=("git" "gcc" "g++")
fi
if [ "${gpu}" = "cuda" ]; then
pkgs+=("libcudnn9-devel-cuda-12" "libcusparselt0" "cuda-cupti-12-*")
fi
if [[ "$ID" = "fedora" && "$VERSION_ID" -ge 42 ]] ; then
pkgs+=("python3-sentencepiece-0.2.0")
fi
update_python
to_gguf

View File

@ -1,10 +1,8 @@
#!/usr/bin/env python3
import argparse
import errno
import hashlib
import os
import os.path
import sys
import uuid
@ -25,20 +23,18 @@ COLLECTION_NAME = "rag"
class Converter:
"""A Class designed to handle all document conversions using Docling"""
def __init__(self, args):
def __init__(self, output, targets, ocr):
# Docling Setup (Turn off OCR (image processing) for drastically reduced RAM usage and big speed increase)
pipeline_options = PdfPipelineOptions()
pipeline_options.do_ocr = args.ocr
self.sources = []
for source in args.sources:
self.add(source)
self.output = args.output
self.format = args.format
pipeline_options.do_ocr = ocr
self.doc_converter = DocumentConverter(
format_options={InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)}
)
if self.format == "qdrant":
self.client = qdrant_client.QdrantClient(path=self.output)
self.targets = []
for target in targets:
self.add(target)
self.output = output
self.client = qdrant_client.QdrantClient(path=output)
self.client.set_model(EMBED_MODEL)
self.client.set_sparse_model(SPARSE_MODEL)
# optimizations to reduce ram
@ -58,13 +54,14 @@ class Converter:
if os.path.isdir(file_path):
self.walk(file_path) # Walk directory and process all files
else:
self.sources.append(file_path) # Process the single file
self.targets.append(file_path) # Process the single file
def convert_qdrant(self, results):
def convert(self):
result = self.doc_converter.convert_all(self.targets)
documents, ids = [], []
chunker = HybridChunker(tokenizer=EMBED_MODEL, overlap=100, merge_peers=True)
for result in results:
chunk_iter = chunker.chunk(dl_doc=result.document)
for file in result:
chunk_iter = chunker.chunk(dl_doc=file.document)
for chunk in chunk_iter:
# Extract the enriched text from the chunk
doc_text = chunker.contextualize(chunk=chunk)
@ -75,33 +72,6 @@ class Converter:
ids.append(doc_id)
return self.client.add(COLLECTION_NAME, documents=documents, ids=ids, batch_size=1)
def convert(self):
results = self.doc_converter.convert_all(self.sources)
if self.format == "qdrant":
return self.convert_qdrant(results)
if self.format == "markdown":
# Export the converted document to Markdown
return self.convert_markdown(results)
if self.format == "json":
# Export the converted document to JSON
return self.convert_json(results)
def convert_markdown(self, results):
ctr = 0
# Process the conversion results
for ctr, result in enumerate(results):
dirname = self.output + os.path.dirname(self.sources[ctr])
os.makedirs(dirname, exist_ok=True)
document = result.document
document.save_as_markdown(os.path.join(dirname, f"{document.name}.md"))
def convert_json(self, results):
for ctr, result in enumerate(results):
dirname = self.output + os.path.dirname(self.sources[ctr])
os.makedirs(dirname, exist_ok=True)
document = result.document
document.save_as_json(os.path.join(dirname, f"{document.name}.json"))
def walk(self, path):
for root, dirs, files in os.walk(path, topdown=True):
if len(files) == 0:
@ -109,7 +79,7 @@ class Converter:
for f in files:
file = os.path.join(root, f)
if os.path.isfile(file):
self.sources.append(file)
self.targets.append(file)
def generate_hash(self, document: str) -> str:
"""Generate a unique hash for a document."""
@ -133,14 +103,8 @@ parser = argparse.ArgumentParser(
description="process source files into RAG vector database",
)
parser.add_argument("output", nargs="?", help="Output directory")
parser.add_argument("sources", nargs="*", help="Source files")
parser.add_argument(
"--format",
default="qdrant",
help="Output format for RAG Data",
choices=["qdrant", "json", "markdown"],
)
parser.add_argument("target", nargs="?", help="Target database")
parser.add_argument("source", nargs="*", help="Source files")
parser.add_argument(
"--ocr",
action='store_true',
@ -159,16 +123,16 @@ def eprint(e, exit_code):
try:
args = parser.parse_args()
if args.output == "load":
if args.target == "load":
load()
else:
converter = Converter(args)
converter = Converter(args.target, args.source, args.ocr)
converter.convert()
except docling.exceptions.ConversionError as e:
eprint(e, 1)
except FileNotFoundError as e:
eprint(e, errno.ENOENT)
eprint(e, 1)
except ValueError as e:
eprint(e, errno.EINVAL)
eprint(e, 1)
except KeyboardInterrupt:
pass

View File

@ -62,7 +62,7 @@ add_rag() {
tag=$tag-rag
containerfile="container-images/common/Containerfile.rag"
GPU=cpu
case "${2##*/}" in
case $2 in
cuda)
GPU=cuda
;;

View File

@ -48,33 +48,6 @@ for a value and set the variable only if it is set on the host.
#### **--help**, **-h**
show this help message and exit
#### **--image**=IMAGE
OCI container image to run with specified AI model. RamaLama defaults to using
images based on the accelerator it discovers. For example:
`quay.io/ramalama/ramalama`. See the table below for all default images.
The default image tag is based on the minor version of the RamaLama package.
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
The default can be overridden in the ramalama.conf file or via the
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
Accelerated images:
| Accelerator | Image |
| ------------------------| -------------------------- |
| CPU, Apple | quay.io/ramalama/ramalama |
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
#### **--keep-groups**
pass --group-add keep-groups to podman (default: False)
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
#### **--name**, **-n**
name of the container to run the Model in
@ -122,9 +95,6 @@ not have more privileges than the user that launched them.
#### **--seed**=
Specify seed rather than using random seed model interaction
#### **--selinux**=*true*
Enable SELinux container separation
#### **--temp**="0.8"
Temperature of the response from the AI Model
llama.cpp explains this as:
@ -148,7 +118,7 @@ Benchmark specified AI Model.
## EXAMPLES
```
ramalama bench granite3-moe
ramalama bench granite-moe3
```
## SEE ALSO

View File

@ -137,19 +137,6 @@ ramalama run granite
This is particularly useful in multi-GPU systems where you want to dedicate specific GPUs to different workloads.
If the `CUDA_VISIBLE_DEVICES` environment variable is set to an empty string, RamaLama will default to using the CPU.
```bash
export CUDA_VISIBLE_DEVICES="" # Defaults to CPU
ramalama run granite
```
To revert to using all available GPUs, unset the environment variable:
```bash
unset CUDA_VISIBLE_DEVICES
```
## Troubleshooting
### CUDA Updates

View File

@ -53,33 +53,6 @@ for a value and set the variable only if it is set on the host.
#### **--help**, **-h**
show this help message and exit
#### **--image**=IMAGE
OCI container image to run with specified AI model. RamaLama defaults to using
images based on the accelerator it discovers. For example:
`quay.io/ramalama/ramalama`. See the table below for all default images.
The default image tag is based on the minor version of the RamaLama package.
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
The default can be overridden in the ramalama.conf file or via the
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
Accelerated images:
| Accelerator | Image |
| ------------------------| -------------------------- |
| CPU, Apple | quay.io/ramalama/ramalama |
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
#### **--keep-groups**
pass --group-add keep-groups to podman (default: False)
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
#### **--name**, **-n**
name of the container to run the Model in
@ -130,9 +103,6 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
#### **--seed**=
Specify seed rather than using random seed model interaction
#### **--selinux**=*true*
Enable SELinux container separation
#### **--temp**="0.8"
Temperature of the response from the AI Model
llama.cpp explains this as:
@ -156,7 +126,7 @@ Calculate the perplexity of an AI Model. Perplexity measures how well the model
## EXAMPLES
```
ramalama perplexity granite3-moe
ramalama perplexity granite-moe3
```
## SEE ALSO

View File

@ -19,7 +19,7 @@ positional arguments:
AsciiDoc & Markdown formatted files to be processed.
Can be specified multiple times.
*PATH|IMAGE* Path or OCI Image name to contain processed rag data
*IMAGE* OCI Image name to contain processed rag data
## OPTIONS
@ -32,45 +32,9 @@ process to be launched inside of the container. If an environment variable is
specified without a value, the container engine checks the host environment
for a value and set the variable only if it is set on the host.
#### **--format**=*json* | *markdown* | *qdrant* |
Convert documents into the following formats
| Type | Description |
| ------- | ---------------------------------------------------- |
| json | JavaScript Object Notation. lightweight format for exchanging data |
| markdown| Lightweight markup language using plain text editing |
| qdrant | Retrieval-Augmented Generation (RAG) Vector database |
#### **--help**, **-h**
Print usage message
#### **--image**=IMAGE
OCI container image to run with specified AI model. RamaLama defaults to using
images based on the accelerator it discovers. For example:
`quay.io/ramalama/ramalama-rag`. See the table below for all default images.
The default image tag is based on the minor version of the RamaLama package.
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
The default can be overridden in the ramalama.conf file or via the
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
Accelerated images:
| Accelerator | Image |
| ------------------------| ------------------------------ |
| CPU, Apple | quay.io/ramalama/ramalama-rag |
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm-rag |
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda-rag |
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi-rag |
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu-rag |
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann-rag |
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa-rag |
#### **--keep-groups**
pass --group-add keep-groups to podman (default: False)
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
#### **--network**=*none*
sets the configuration for network namespaces when handling RUN instructions
@ -85,13 +49,10 @@ Pull image policy. The default is **missing**.
- **never**: Never pull the image but use the one from the local containers storage. Throw an error when no image is found.
- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
#### **--selinux**=*true*
Enable SELinux container separation
## EXAMPLES
```
$ ramalama rag ./README.md https://github.com/containers/podman/blob/main/README.md quay.io/rhatdan/myrag
./bin/ramalama rag ./README.md https://github.com/containers/podman/blob/main/README.md quay.io/rhatdan/myrag
100% |███████████████████████████████████████████████████████| 114.00 KB/ 0.00 B 922.89 KB/s 59m 59s
Building quay.io/ramalama/myrag...
adding vectordb...
@ -99,17 +60,7 @@ c857ebc65c641084b34e39b740fdb6a2d9d2d97be320e6aa9439ed0ab8780fe0
```
```
$ ramalama rag --ocr README.md https://mysight.edu/document quay.io/rhatdan/myrag
```
```
$ ramalama rag --format markdown /tmp/internet.pdf /tmp/output
$ ls /tmp/output/docs/tmp/
/tmp/output/docs/tmp/internet.md
$ ramalama rag --format json /tmp/internet.pdf /tmp/output
$ ls /tmp/output/docs/tmp/
/tmp/output/docs/tmp/internet.md
/tmp/output/docs/tmp/internet.json
ramalama rag --ocr README.md https://mysight.edu/document quay.io/rhatdan/myrag
```
## SEE ALSO

View File

@ -61,33 +61,6 @@ for a value and set the variable only if it is set on the host.
#### **--help**, **-h**
Show this help message and exit
#### **--image**=IMAGE
OCI container image to run with specified AI model. RamaLama defaults to using
images based on the accelerator it discovers. For example:
`quay.io/ramalama/ramalama`. See the table below for all default images.
The default image tag is based on the minor version of the RamaLama package.
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
The default can be overridden in the ramalama.conf file or via the
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
Accelerated images:
| Accelerator | Image |
| ------------------------| -------------------------- |
| CPU, Apple | quay.io/ramalama/ramalama |
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
#### **--keep-groups**
pass --group-add keep-groups to podman (default: False)
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
#### **--keepalive**
duration to keep a model loaded (e.g. 5m)
@ -148,9 +121,6 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
#### **--seed**=
Specify seed rather than using random seed model interaction
#### **--selinux**=*true*
Enable SELinux container separation
#### **--temp**="0.8"
Temperature of the response from the AI Model
llama.cpp explains this as:

View File

@ -93,33 +93,6 @@ show this help message and exit
#### **--host**="0.0.0.0"
IP address for llama.cpp to listen on.
#### **--image**=IMAGE
OCI container image to run with specified AI model. RamaLama defaults to using
images based on the accelerator it discovers. For example:
`quay.io/ramalama/ramalama`. See the table above for all default images.
The default image tag is based on the minor version of the RamaLama package.
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
The default can be overridden in the ramalama.conf file or via the
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
Accelerated images:
| Accelerator | Image |
| ------------------------| -------------------------- |
| CPU, Apple | quay.io/ramalama/ramalama |
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
#### **--keep-groups**
pass --group-add keep-groups to podman (default: False)
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
#### **--model-draft**
A draft model is a smaller, faster model that helps accelerate the decoding
@ -191,9 +164,6 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
#### **--seed**=
Specify seed rather than using random seed model interaction
#### **--selinux**=*true*
Enable SELinux container separation
#### **--temp**="0.8"
Temperature of the response from the AI Model.
llama.cpp explains this as:

Some files were not shown because too many files have changed in this diff Show More