Compare commits
106 Commits
Author | SHA1 | Date |
---|---|---|
|
3f8e31a073 | |
|
08722738cf | |
|
ab7adbb430 | |
|
72504179fc | |
|
dcfeee8538 | |
|
1d903e746c | |
|
13a22f6671 | |
|
1d6aa51cd7 | |
|
50d01f177b | |
|
234134b5cc | |
|
64ca9cfb4a | |
|
e3dda75ec6 | |
|
075df4bb87 | |
|
5b46b23f2e | |
|
1fe1b20c8c | |
|
f5512c8f65 | |
|
7132d5a7f8 | |
|
2d3f8dfe28 | |
|
1d8a2e5b6c | |
|
42ac787686 | |
|
18c560fff6 | |
|
ce35ccb4c3 | |
|
b97177b408 | |
|
14c4aaca39 | |
|
bf4fd56106 | |
|
1373a8e7ba | |
|
74584d0b5e | |
|
4dea2ee02f | |
|
069e98c095 | |
|
f57b8eb284 | |
|
299d3b9b75 | |
|
683b8fb8a0 | |
|
64e22ee0aa | |
|
651fc503bd | |
|
384cad7161 | |
|
3dec0d7487 | |
|
d7763ad1c5 | |
|
b550cc97d2 | |
|
927d2f992a | |
|
f176bb3926 | |
|
f38c736d23 | |
|
fa2f485175 | |
|
f8c41b38c1 | |
|
b7323f7972 | |
|
53e38dea8f | |
|
bf68cfddd3 | |
|
8ab242f820 | |
|
eba46c8df6 | |
|
b5826c96e9 | |
|
066b659f3a | |
|
6d7effadc2 | |
|
1d2e1a1e01 | |
|
a54e2b78c4 | |
|
f4cec203ac | |
|
a616005695 | |
|
c7c0f7d2e5 | |
|
b630fcdea2 | |
|
027f88cf31 | |
|
d7ed2216dd | |
|
6d9a7eea9e | |
|
5ebc48f453 | |
|
b6cb2fdbe2 | |
|
f75599097e | |
|
80317bffbc | |
|
124afc14bb | |
|
79b23e1237 | |
|
5fd301532c | |
|
64d53180fd | |
|
c0278c1b8c | |
|
e402a456cf | |
|
3da38bc7b8 | |
|
980179d5ca | |
|
657bacb52e | |
|
09c6ccb2f0 | |
|
7f09d4bf5b | |
|
7a6c9977f7 | |
|
def6116f15 | |
|
5e39e11678 | |
|
21e42fc837 | |
|
eefafe24fd | |
|
6cbaf692aa | |
|
129ee175d6 | |
|
8e98c77f54 | |
|
e398941913 | |
|
d95bd13ca0 | |
|
496439ea02 | |
|
bf0af8034a | |
|
99f56a7684 | |
|
5b20aa4e2c | |
|
957dfd52e7 | |
|
ebb8ea93fd | |
|
7dc3d9da8e | |
|
72aa795b17 | |
|
2fea5f86f6 | |
|
412d5616d3 | |
|
3b880923c0 | |
|
b7c15ce86a | |
|
87287ae574 | |
|
eeaab7276c | |
|
8104b697dd | |
|
eacaffe03d | |
|
21957b22c2 | |
|
cd7220a3ea | |
|
ee8d7a3a04 | |
|
fe2d22c848 | |
|
cba091b265 |
|
@ -19,3 +19,5 @@ __pycache__/
|
|||
coverage.*
|
||||
htmlcov/
|
||||
.idea/
|
||||
.hypothesis/
|
||||
uv.lock
|
||||
|
|
|
@ -9,8 +9,7 @@ set -exo pipefail
|
|||
# Extract version from pyproject.toml instead of setup.py
|
||||
VERSION=$(awk -F'[""]' ' /^\s*version\s*/ {print $(NF-1)}' pyproject.toml )
|
||||
|
||||
#SPEC_FILE=rpm-next/ramalama.spec
|
||||
SPEC_FILE=rpm/python-ramalama.spec
|
||||
SPEC_FILE=rpm/ramalama.spec
|
||||
|
||||
# RPM Spec modifications
|
||||
|
||||
|
|
22
.packit.yaml
22
.packit.yaml
|
@ -7,21 +7,11 @@ upstream_tag_template: v{version}
|
|||
packages:
|
||||
ramalama-fedora:
|
||||
pkg_tool: fedpkg
|
||||
downstream_package_name: python-ramalama
|
||||
specfile_path: rpm/python-ramalama.spec
|
||||
downstream_package_name: ramalama
|
||||
specfile_path: rpm/ramalama.spec
|
||||
ramalama-centos:
|
||||
downstream_package_name: python-ramalama
|
||||
specfile_path: rpm/python-ramalama.spec
|
||||
|
||||
# Uncomment when we get this approved.
|
||||
# packages:
|
||||
# ramalama-fedora:
|
||||
# pkg_tool: fedpkg
|
||||
# downstream_package_name: ramalama
|
||||
# specfile_path: rpm-next/ramalama.spec
|
||||
# ramalama-centos:
|
||||
# downstream_package_name: ramalama
|
||||
# specfile_path: rpm-next/ramalama.spec
|
||||
downstream_package_name: ramalama
|
||||
specfile_path: rpm/ramalama.spec
|
||||
|
||||
srpm_build_deps:
|
||||
- make
|
||||
|
@ -89,7 +79,7 @@ jobs:
|
|||
dist_git_branches: &fedora_targets
|
||||
- fedora-all
|
||||
- epel10
|
||||
- epel9
|
||||
- epel10.0
|
||||
|
||||
- job: koji_build
|
||||
trigger: commit
|
||||
|
@ -102,4 +92,4 @@ jobs:
|
|||
dist_git_branches:
|
||||
- fedora-branched # rawhide updates are created automatically
|
||||
- epel10
|
||||
- epel9
|
||||
- epel10.0
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-llama-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-llama-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-rag-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-rag-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-whisper-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-whisper-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/asahi/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,39 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: asahi
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: asahi-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/asahi/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: bats
|
||||
|
@ -28,8 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux/arm64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/bats/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: bats
|
||||
|
@ -25,8 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/bats:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux/arm64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/bats/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-llama-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-llama-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-rag-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-rag-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-whisper-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,45 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-whisper-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/cann/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,39 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cann
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: cann-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/cann/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-llama-server
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-llama-server
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-rag
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-rag
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-whisper-server
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda-whisper-server
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/cuda/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: cuda
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/cuda:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/cuda/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
kind: Pipeline
|
||||
apiVersion: tekton.dev/v1
|
||||
metadata:
|
||||
name: bats-integration
|
||||
spec:
|
||||
description: |
|
||||
Test the newly-built ramalama image and layered images on all supported architectures.
|
||||
params:
|
||||
- name: SNAPSHOT
|
||||
description: >-
|
||||
Information about the components included in the current snapshot under test.
|
||||
- name: platforms
|
||||
description: VM platforms on which to run test commands
|
||||
type: array
|
||||
default:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: commands
|
||||
description: Test commands to run
|
||||
type: array
|
||||
default:
|
||||
- make bats
|
||||
- name: git-url
|
||||
description: URL of the Git repository containing pipeline and task definitions
|
||||
default: https://github.com/containers/ramalama.git
|
||||
- name: git-revision
|
||||
description: Revision of the Git repository containing pipeline and task definitions
|
||||
default: main
|
||||
tasks:
|
||||
- name: init
|
||||
params:
|
||||
- name: SNAPSHOT
|
||||
value: $(params.SNAPSHOT)
|
||||
taskRef:
|
||||
resolver: git
|
||||
params:
|
||||
- name: url
|
||||
value: $(params.git-url)
|
||||
- name: revision
|
||||
value: $(params.git-revision)
|
||||
- name: pathInRepo
|
||||
value: .tekton/integration/tasks/init-snapshot.yaml
|
||||
- name: test
|
||||
matrix:
|
||||
params:
|
||||
- name: PLATFORM
|
||||
value:
|
||||
- $(params.platforms)
|
||||
- name: cmd
|
||||
value:
|
||||
- $(params.commands)
|
||||
params:
|
||||
- name: image
|
||||
value: $(tasks.init.results.bats-image)
|
||||
- name: envs
|
||||
value:
|
||||
- RAMALAMA_IMAGE=$(tasks.init.results.ramalama-image)
|
||||
taskRef:
|
||||
resolver: git
|
||||
params:
|
||||
- name: url
|
||||
value: $(params.git-url)
|
||||
- name: revision
|
||||
value: $(params.git-revision)
|
||||
- name: pathInRepo
|
||||
value: .tekton/integration/tasks/test-vm-cmd.yaml
|
|
@ -0,0 +1,55 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: Task
|
||||
metadata:
|
||||
name: init-snapshot
|
||||
spec:
|
||||
description: Extract information from the SNAPSHOT and make it available as Tekton results
|
||||
params:
|
||||
- name: SNAPSHOT
|
||||
description: >-
|
||||
Information about the components included in the current snapshot under test.
|
||||
results:
|
||||
- name: event-type
|
||||
description: The type of event that triggered the pipeline
|
||||
- name: bats-image
|
||||
description: URI of the bats image included in the snapshot
|
||||
- name: ramalama-image
|
||||
description: URI of the ramalama image included in the snapshot
|
||||
- name: TEST_OUTPUT
|
||||
description: Test result in json format
|
||||
steps:
|
||||
- name: process
|
||||
image: registry.access.redhat.com/ubi10/ubi:latest
|
||||
env:
|
||||
- name: SNAPSHOT
|
||||
value: $(params.SNAPSHOT)
|
||||
- name: EVENT_TYPE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.labels['pac.test.appstudio.openshift.io/event-type']
|
||||
- name: RESULTS_EVENT_TYPE_PATH
|
||||
value: $(results.event-type.path)
|
||||
- name: RESULTS_BATS_IMAGE_PATH
|
||||
value: $(results.bats-image.path)
|
||||
- name: RESULTS_RAMALAMA_IMAGE_PATH
|
||||
value: $(results.ramalama-image.path)
|
||||
- name: RESULTS_TEST_OUTPUT_PATH
|
||||
value: $(results.TEST_OUTPUT.path)
|
||||
script: |
|
||||
#!/bin/bash -ex
|
||||
dnf -y install jq
|
||||
echo -n "$EVENT_TYPE" | tee "$RESULTS_EVENT_TYPE_PATH"
|
||||
echo
|
||||
component_image() {
|
||||
TAGSEP=":"
|
||||
if [ "$EVENT_TYPE" == "pull_request" ]; then
|
||||
TAGSEP+="on-pr-"
|
||||
fi
|
||||
jq -j --arg name "$1" --arg tagsep "$TAGSEP" '.components[] | select(.name == $name) | [(.containerImage | split("@")[0]), .source.git.revision] | join($tagsep)' <<< "$SNAPSHOT"
|
||||
}
|
||||
component_image bats | tee "$RESULTS_BATS_IMAGE_PATH"
|
||||
echo
|
||||
component_image ramalama | tee "$RESULTS_RAMALAMA_IMAGE_PATH"
|
||||
echo
|
||||
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
|
||||
echo
|
|
@ -0,0 +1,118 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: Task
|
||||
metadata:
|
||||
name: test-vm-cmd
|
||||
spec:
|
||||
description: Run a command in a test VM
|
||||
params:
|
||||
- name: PLATFORM
|
||||
description: The platform of the VM to provision
|
||||
- name: image
|
||||
description: The image to use when setting up the test environment
|
||||
- name: cmd
|
||||
description: The command to run
|
||||
- name: envs
|
||||
description: List of environment variables (NAME=VALUE) to be set in the test environment
|
||||
type: array
|
||||
default: []
|
||||
results:
|
||||
- name: TEST_OUTPUT
|
||||
description: Test result in json format
|
||||
volumes:
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
- name: ssh
|
||||
secret:
|
||||
secretName: multi-platform-ssh-$(context.taskRun.name)
|
||||
steps:
|
||||
- name: run-in-vm
|
||||
image: registry.access.redhat.com/ubi10/ubi:latest
|
||||
volumeMounts:
|
||||
- mountPath: /var/workdir
|
||||
name: workdir
|
||||
- mountPath: /ssh
|
||||
name: ssh
|
||||
workingDir: /var/workdir
|
||||
env:
|
||||
- name: TEST_IMAGE
|
||||
value: $(params.image)
|
||||
- name: TEST_CMD
|
||||
value: $(params.cmd)
|
||||
- name: RESULTS_TEST_OUTPUT_PATH
|
||||
value: $(results.TEST_OUTPUT.path)
|
||||
args:
|
||||
- $(params.envs[*])
|
||||
script: |
|
||||
#!/bin/bash -ex
|
||||
log() {
|
||||
echo "[$(date -uIns)]" $*
|
||||
}
|
||||
|
||||
log Install packages
|
||||
dnf -y install openssh-clients rsync jq
|
||||
|
||||
log Prepare connection
|
||||
|
||||
if [ -e "/ssh/error" ]; then
|
||||
log Error provisioning VM
|
||||
cat /ssh/error
|
||||
exit 1
|
||||
fi
|
||||
export SSH_HOST=$(cat /ssh/host)
|
||||
|
||||
mkdir -p ~/.ssh
|
||||
if [ "$SSH_HOST" == "localhost" ] ; then
|
||||
IS_LOCALHOST=true
|
||||
log Localhost detected, running build in cluster
|
||||
elif [ -s "/ssh/otp" ]; then
|
||||
log Fetching OTP token
|
||||
curl --cacert /ssh/otp-ca -d @/ssh/otp $(cat /ssh/otp-server) > ~/.ssh/id_rsa
|
||||
echo >> ~/.ssh/id_rsa
|
||||
chmod 0400 ~/.ssh/id_rsa
|
||||
elif [ -s "/ssh/id_rsa" ]; then
|
||||
log Copying ssh key
|
||||
cp /ssh/id_rsa ~/.ssh
|
||||
chmod 0400 ~/.ssh/id_rsa
|
||||
else
|
||||
log No authentication mechanism found
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p scripts
|
||||
|
||||
PODMAN_ENV=()
|
||||
while [ $# -ne 0 ]; do
|
||||
PODMAN_ENV+=("-e" "$1")
|
||||
shift
|
||||
done
|
||||
|
||||
cat > scripts/test.sh <<SCRIPTEOF
|
||||
#!/bin/bash -ex
|
||||
podman run \
|
||||
--userns=keep-id \
|
||||
--security-opt label=disable \
|
||||
--security-opt unmask=/proc/* \
|
||||
--device /dev/net/tun \
|
||||
--device /dev/fuse \
|
||||
${PODMAN_ENV[*]} \
|
||||
$TEST_IMAGE $TEST_CMD
|
||||
SCRIPTEOF
|
||||
chmod +x scripts/test.sh
|
||||
|
||||
if ! [[ $IS_LOCALHOST ]]; then
|
||||
log VM exec
|
||||
export BUILD_DIR=$(cat /ssh/user-dir)
|
||||
export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10"
|
||||
# ssh once before rsync to retrieve the host key
|
||||
ssh $SSH_ARGS "$SSH_HOST" "uname -a"
|
||||
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
|
||||
ssh $SSH_ARGS "$SSH_HOST" "$BUILD_DIR/scripts/test.sh"
|
||||
log End VM exec
|
||||
else
|
||||
log Local exec
|
||||
scripts/test.sh
|
||||
log End local exec
|
||||
fi
|
||||
|
||||
jq -jnc '{result: "SUCCESS", timestamp: now | todateiso8601, failures: 0, successes: 1, warnings: 0}' | tee "$RESULTS_TEST_OUTPUT_PATH"
|
||||
echo
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-llama-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-llama-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-rag-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-rag-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- GPU=cpu
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-whisper-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-whisper-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,41 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/intel-gpu/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: intel-gpu
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: intel-gpu-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/intel-gpu/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: llama-stack
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: llama-stack-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/llama-stack/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,39 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: llama-stack
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: llama-stack-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/llama-stack/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-llama-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-llama-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-llama-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/llama-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-rag-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- GPU=musa
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-rag
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-rag-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- GPU=musa
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,47 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-whisper-server-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa-whisper-server
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-whisper-server-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- name: build-args
|
||||
value:
|
||||
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- ENTRYPOINT=/usr/bin/whisper-server.sh
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,41 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/musa/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: musa
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: musa-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/musa/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,41 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: openvino
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: openvino-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/openvino/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,38 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: openvino
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: openvino-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/openvino/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -20,13 +20,11 @@ spec:
|
|||
name: output-image
|
||||
type: string
|
||||
- default: .
|
||||
description: Path to the source code of an application's component from where
|
||||
to build image.
|
||||
description: Path to the source code of an application's component from where to build image.
|
||||
name: path-context
|
||||
type: string
|
||||
- default: Dockerfile
|
||||
description: Path to the Dockerfile inside the context specified by parameter
|
||||
path-context
|
||||
description: Path to the Dockerfile inside the context specified by parameter path-context
|
||||
name: dockerfile
|
||||
type: string
|
||||
- default: "false"
|
||||
|
@ -46,8 +44,7 @@ spec:
|
|||
name: prefetch-input
|
||||
type: string
|
||||
- default: ""
|
||||
description: Image tag expiration time, time values could be something like
|
||||
1h, 2d, 3w for hours, days, and weeks, respectively.
|
||||
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.
|
||||
name: image-expires-after
|
||||
- default: "false"
|
||||
description: Build a source image.
|
||||
|
@ -66,14 +63,12 @@ spec:
|
|||
name: build-args-file
|
||||
type: string
|
||||
- default: "false"
|
||||
description: Whether to enable privileged mode, should be used only with remote
|
||||
VMs
|
||||
description: Whether to enable privileged mode, should be used only with remote VMs
|
||||
name: privileged-nested
|
||||
type: string
|
||||
- default:
|
||||
- linux/x86_64
|
||||
description: List of platforms to build the container images on. The available
|
||||
set of values is determined by the configuration of the multi-platform-controller.
|
||||
- linux-c4xlarge/amd64
|
||||
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.
|
||||
name: build-platforms
|
||||
type: array
|
||||
- default: ""
|
||||
|
@ -118,7 +113,7 @@ spec:
|
|||
- name: name
|
||||
value: init
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -168,7 +163,7 @@ spec:
|
|||
- name: name
|
||||
value: prefetch-dependencies-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -230,7 +225,7 @@ spec:
|
|||
- name: name
|
||||
value: buildah-remote-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -259,7 +254,7 @@ spec:
|
|||
- name: name
|
||||
value: build-image-index
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -290,8 +285,6 @@ spec:
|
|||
params:
|
||||
- name: image
|
||||
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
|
||||
- name: source-artifact
|
||||
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
|
||||
- name: envs
|
||||
value:
|
||||
- $(params.test-envs[*])
|
||||
|
@ -307,11 +300,13 @@ spec:
|
|||
- name: build-source-image
|
||||
params:
|
||||
- name: BINARY_IMAGE
|
||||
value: $(params.output-image)
|
||||
value: $(tasks.build-image-index.results.IMAGE_URL)
|
||||
- name: SOURCE_ARTIFACT
|
||||
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
|
||||
- name: CACHI2_ARTIFACT
|
||||
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
|
||||
- name: BINARY_IMAGE_DIGEST
|
||||
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
|
||||
runAfter:
|
||||
- build-image-index
|
||||
taskRef:
|
||||
|
@ -319,7 +314,7 @@ spec:
|
|||
- name: name
|
||||
value: source-build-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -367,7 +362,7 @@ spec:
|
|||
- name: name
|
||||
value: clair-scan
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -387,7 +382,7 @@ spec:
|
|||
- name: name
|
||||
value: ecosystem-cert-preflight-checks
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -413,7 +408,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-snyk-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -435,7 +430,7 @@ spec:
|
|||
- name: name
|
||||
value: clamav-scan
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -480,7 +475,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-coverity-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -527,7 +522,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-shell-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -553,7 +548,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-unicode-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -598,7 +593,7 @@ spec:
|
|||
- name: name
|
||||
value: push-dockerfile-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -634,7 +629,7 @@ spec:
|
|||
- name: name
|
||||
value: show-sbom
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
|
|
@ -20,13 +20,11 @@ spec:
|
|||
name: output-image
|
||||
type: string
|
||||
- default: .
|
||||
description: Path to the source code of an application's component from where
|
||||
to build image.
|
||||
description: Path to the source code of an application's component from where to build image.
|
||||
name: path-context
|
||||
type: string
|
||||
- default: Dockerfile
|
||||
description: Path to the Dockerfile inside the context specified by parameter
|
||||
path-context
|
||||
description: Path to the Dockerfile inside the context specified by parameter path-context
|
||||
name: dockerfile
|
||||
type: string
|
||||
- default: "false"
|
||||
|
@ -46,8 +44,7 @@ spec:
|
|||
name: prefetch-input
|
||||
type: string
|
||||
- default: ""
|
||||
description: Image tag expiration time, time values could be something like
|
||||
1h, 2d, 3w for hours, days, and weeks, respectively.
|
||||
description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.
|
||||
name: image-expires-after
|
||||
- default: "false"
|
||||
description: Build a source image.
|
||||
|
@ -66,14 +63,12 @@ spec:
|
|||
name: build-args-file
|
||||
type: string
|
||||
- default: "false"
|
||||
description: Whether to enable privileged mode, should be used only with remote
|
||||
VMs
|
||||
description: Whether to enable privileged mode, should be used only with remote VMs
|
||||
name: privileged-nested
|
||||
type: string
|
||||
- default:
|
||||
- linux/x86_64
|
||||
description: List of platforms to build the container images on. The available
|
||||
set of values is determined by the configuration of the multi-platform-controller.
|
||||
- linux-c4xlarge/amd64
|
||||
description: List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.
|
||||
name: build-platforms
|
||||
type: array
|
||||
- default: ""
|
||||
|
@ -118,7 +113,7 @@ spec:
|
|||
- name: name
|
||||
value: init
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -168,7 +163,7 @@ spec:
|
|||
- name: name
|
||||
value: prefetch-dependencies-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:5e15408f997557153b13d492aeccb51c01923bfbe4fbdf6f1e8695ce1b82f826
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -230,7 +225,7 @@ spec:
|
|||
- name: name
|
||||
value: buildah-remote-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:ae87472f60dbbf71e4980cd478c92740c145fd9e44acbb9b164a21f1bcd61aa3
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -259,7 +254,7 @@ spec:
|
|||
- name: name
|
||||
value: build-image-index
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -290,8 +285,6 @@ spec:
|
|||
params:
|
||||
- name: image
|
||||
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
|
||||
- name: source-artifact
|
||||
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
|
||||
- name: envs
|
||||
value:
|
||||
- $(params.test-envs[*])
|
||||
|
@ -307,11 +300,13 @@ spec:
|
|||
- name: build-source-image
|
||||
params:
|
||||
- name: BINARY_IMAGE
|
||||
value: $(params.output-image)
|
||||
value: $(tasks.build-image-index.results.IMAGE_URL)
|
||||
- name: SOURCE_ARTIFACT
|
||||
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
|
||||
- name: CACHI2_ARTIFACT
|
||||
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
|
||||
- name: BINARY_IMAGE_DIGEST
|
||||
value: $(tasks.build-image-index.results.IMAGE_DIGEST)
|
||||
runAfter:
|
||||
- build-image-index
|
||||
taskRef:
|
||||
|
@ -319,7 +314,7 @@ spec:
|
|||
- name: name
|
||||
value: source-build-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.2@sha256:b424894fc8e806c12658daa565b835fd2d66e7f7608afc47529eb7b410f030d7
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b1eb49583b41872b27356fee20d5f0eb6ff7f5cdeacde7ffb39655f031104728
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -367,7 +362,7 @@ spec:
|
|||
- name: name
|
||||
value: clair-scan
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -387,7 +382,7 @@ spec:
|
|||
- name: name
|
||||
value: ecosystem-cert-preflight-checks
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:b550ff4f0b634512ce5200074be7afd7a5a6c05b783620c626e2a3035cd56448
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -413,7 +408,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-snyk-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -435,7 +430,7 @@ spec:
|
|||
- name: name
|
||||
value: clamav-scan
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -480,7 +475,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-coverity-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f81ade665c725616b918356c8c2fb2d4ed972e822a1a3181933cd0ada728a231
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -527,7 +522,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-shell-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -553,7 +548,7 @@ spec:
|
|||
- name: name
|
||||
value: sast-unicode-check-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.2@sha256:24ad71fde435fc25abba2c4c550beb088b1530f738d3c377e2f635b5f320d57b
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -598,7 +593,7 @@ spec:
|
|||
- name: name
|
||||
value: push-dockerfile-oci-ta
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
@ -634,7 +629,7 @@ spec:
|
|||
- name: name
|
||||
value: show-sbom
|
||||
- name: bundle
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
|
||||
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
|
||||
- name: kind
|
||||
value: task
|
||||
resolver: bundles
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-cli
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: ramalama-cli-on-pull-request
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:on-pr-{{revision}}
|
||||
- name: image-expires-after
|
||||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/ramalama-cli/Containerfile
|
||||
pipelineRef:
|
||||
name: pull-request-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -0,0 +1,39 @@
|
|||
apiVersion: tekton.dev/v1
|
||||
kind: PipelineRun
|
||||
metadata:
|
||||
annotations:
|
||||
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
|
||||
build.appstudio.redhat.com/commit_sha: '{{revision}}'
|
||||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-cli
|
||||
pipelines.appstudio.openshift.io/type: build
|
||||
name: ramalama-cli-on-push
|
||||
namespace: ramalama-tenant
|
||||
spec:
|
||||
params:
|
||||
- name: git-url
|
||||
value: '{{source_url}}'
|
||||
- name: revision
|
||||
value: '{{revision}}'
|
||||
- name: output-image
|
||||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/ramalama-cli/Containerfile
|
||||
pipelineRef:
|
||||
name: push-pipeline
|
||||
timeouts:
|
||||
pipeline: 6h
|
||||
workspaces:
|
||||
- name: git-auth
|
||||
secret:
|
||||
secretName: '{{ git_auth_secret }}'
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-llama-server
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-llama-server
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-rag
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-rag
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- linux-d160-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-whisper-server
|
||||
|
@ -28,7 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama-whisper-server
|
||||
|
@ -25,7 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- linux-m2xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama
|
||||
|
@ -28,8 +28,8 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux/arm64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/ramalama/Containerfile
|
||||
- name: test-image
|
||||
|
@ -40,7 +40,6 @@ spec:
|
|||
- name: test-commands
|
||||
value:
|
||||
- make validate
|
||||
- make bats-nocontainer
|
||||
- make unit-tests
|
||||
- make cov-tests
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: ramalama
|
||||
|
@ -25,8 +25,8 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux/arm64
|
||||
- linux-c4xlarge/amd64
|
||||
- linux-c4xlarge/arm64
|
||||
- name: dockerfile
|
||||
value: container-images/ramalama/Containerfile
|
||||
- name: test-image
|
||||
|
@ -37,7 +37,6 @@ spec:
|
|||
- name: test-commands
|
||||
value:
|
||||
- make validate
|
||||
- make bats-nocontainer
|
||||
- make unit-tests
|
||||
- make cov-tests
|
||||
pipelineRef:
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-llama-server
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-llama-server
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-rag
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-rag
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-llama-server
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-llama-server
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-llama-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-rag
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-rag
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-rag:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-d160-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.rag
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-whisper-server
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi-whisper-server
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-fast/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/rocm-ubi/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-ubi
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-ubi:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-fast/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/rocm-ubi/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-whisper-server
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm-whisper-server
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm-whisper-server:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-m2xlarge/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/common/Containerfile.entrypoint
|
||||
- name: parent-image
|
||||
|
|
|
@ -8,8 +8,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "true"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm
|
||||
|
@ -28,7 +28,7 @@ spec:
|
|||
value: 5d
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-fast/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/rocm/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,8 +7,8 @@ metadata:
|
|||
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
|
||||
pipelinesascode.tekton.dev/cancel-in-progress: "false"
|
||||
pipelinesascode.tekton.dev/max-keep-runs: "3"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
|
||||
== "main"
|
||||
pipelinesascode.tekton.dev/on-cel-expression: >-
|
||||
event == "push" && target_branch == "main"
|
||||
labels:
|
||||
appstudio.openshift.io/application: ramalama
|
||||
appstudio.openshift.io/component: rocm
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
value: quay.io/redhat-user-workloads/ramalama-tenant/rocm:{{revision}}
|
||||
- name: build-platforms
|
||||
value:
|
||||
- linux/x86_64
|
||||
- linux-fast/amd64
|
||||
- name: dockerfile
|
||||
value: container-images/rocm/Containerfile
|
||||
pipelineRef:
|
||||
|
|
|
@ -7,58 +7,29 @@ spec:
|
|||
params:
|
||||
- name: image
|
||||
description: The image to use when setting up the test environment.
|
||||
- name: source-artifact
|
||||
description: The Trusted Artifact URI pointing to the artifact with the application source code.
|
||||
- name: cmd
|
||||
description: The command to run.
|
||||
- name: envs
|
||||
description: List of environment variables (NAME=VALUE) to be set in the test environment.
|
||||
type: array
|
||||
default: []
|
||||
volumes:
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
stepTemplate:
|
||||
volumeMounts:
|
||||
- mountPath: /var/workdir
|
||||
name: workdir
|
||||
steps:
|
||||
- name: run
|
||||
image: $(params.image)
|
||||
computeResources:
|
||||
limits:
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
steps:
|
||||
- name: use-trusted-artifact
|
||||
image: quay.io/konflux-ci/build-trusted-artifacts:latest@sha256:4689f88dd253bd1feebf57f1a76a5a751880f739000719cd662bbdc76990a7fd
|
||||
args:
|
||||
- use
|
||||
- $(params.source-artifact)=/var/workdir/source
|
||||
- name: set-env
|
||||
image: $(params.image)
|
||||
workingDir: /var/workdir/source
|
||||
args:
|
||||
- $(params.envs[*])
|
||||
script: |
|
||||
#!/bin/bash -e
|
||||
rm -f .bashenv
|
||||
while [ $# -ne 0 ]; do
|
||||
echo "$1" >> .bashenv
|
||||
shift
|
||||
done
|
||||
- name: run
|
||||
image: $(params.image)
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SETFCAP
|
||||
workingDir: /var/workdir/source
|
||||
env:
|
||||
- name: BASH_ENV
|
||||
value: .bashenv
|
||||
command:
|
||||
- /usr/bin/entrypoint.sh
|
||||
args:
|
||||
- $(params.envs[*])
|
||||
- /bin/bash
|
||||
- -ex
|
||||
- -c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -168,7 +168,7 @@ bats-image:
|
|||
podman inspect $(BATS_IMAGE) &> /dev/null || \
|
||||
podman build -t $(BATS_IMAGE) -f container-images/bats/Containerfile .
|
||||
|
||||
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun
|
||||
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun --device /dev/fuse
|
||||
|
||||
%-in-container: bats-image
|
||||
podman run -it --rm \
|
||||
|
|
53
README.md
53
README.md
|
@ -2,14 +2,12 @@
|
|||
<img src="https://github.com/user-attachments/assets/1a338ecf-dc84-4495-8c70-16882955da47" width=50%>
|
||||
</p>
|
||||
|
||||
[RamaLama](https://ramalama.ai) is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers.
|
||||
[RamaLama](https://ramalama.ai) strives to make working with AI simple, straightforward, and familiar by using OCI containers.
|
||||
<br>
|
||||
<br>
|
||||
|
||||
## Description
|
||||
RamaLama strives to make working with AI simple, straightforward, and familiar by using OCI containers.
|
||||
|
||||
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. Using a container engine like Podman, engineers can use container-centric development patterns and benefits to extend to AI use cases.
|
||||
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. It allows engineers to use container-centric development patterns and benefits to extend to AI use cases.
|
||||
|
||||
RamaLama eliminates the need to configure the host system by instead pulling a container image specific to the GPUs discovered on the host system, and allowing you to work with various models and platforms.
|
||||
|
||||
|
@ -23,6 +21,25 @@ RamaLama eliminates the need to configure the host system by instead pulling a c
|
|||
- Interact with models via REST API or as a chatbot.
|
||||
<br>
|
||||
|
||||
## Install
|
||||
### Install on Fedora
|
||||
RamaLama is available in [Fedora](https://fedoraproject.org/) and later. To install it, run:
|
||||
```
|
||||
sudo dnf install python3-ramalama
|
||||
```
|
||||
|
||||
### Install via PyPI
|
||||
RamaLama is available via PyPI at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
|
||||
```
|
||||
pip install ramalama
|
||||
```
|
||||
|
||||
### Install script (Linux and macOS)
|
||||
Install RamaLama by running:
|
||||
```
|
||||
curl -fsSL https://ramalama.ai/install.sh | bash
|
||||
```
|
||||
|
||||
## Accelerated images
|
||||
|
||||
| Accelerator | Image |
|
||||
|
@ -103,30 +120,6 @@ pip install mlx-lm
|
|||
ramalama --runtime=mlx serve hf://mlx-community/Unsloth-Phi-4-4bit
|
||||
```
|
||||
|
||||
## Install
|
||||
### Install on Fedora
|
||||
RamaLama is available in [Fedora 40](https://fedoraproject.org/) and later. To install it, run:
|
||||
```
|
||||
sudo dnf install python3-ramalama
|
||||
```
|
||||
|
||||
### Install via PyPi
|
||||
RamaLama is available via PyPi at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
|
||||
```
|
||||
pip install ramalama
|
||||
```
|
||||
|
||||
### Install via Homebrew
|
||||
```
|
||||
brew install ramalama
|
||||
```
|
||||
|
||||
### Install script (Linux and macOS)
|
||||
Install RamaLama by running:
|
||||
```
|
||||
curl -fsSL https://ramalama.ai/install.sh | bash
|
||||
```
|
||||
|
||||
#### Default Container Engine
|
||||
When both Podman and Docker are installed, RamaLama defaults to Podman. The `RAMALAMA_CONTAINER_ENGINE=docker` environment variable can override this behaviour. When neither are installed, RamaLama will attempt to run the model with software on the local system.
|
||||
<br>
|
||||
|
@ -229,7 +222,7 @@ $ cat /usr/share/ramalama/shortnames.conf
|
|||
<br>
|
||||
|
||||
```
|
||||
$ ramalama bench granite-moe3
|
||||
$ ramalama bench granite3-moe
|
||||
```
|
||||
</details>
|
||||
|
||||
|
@ -836,7 +829,7 @@ $ cat /usr/share/ramalama/shortnames.conf
|
|||
|
||||
Perplexity measures how well the model can predict the next token with lower values being better
|
||||
```
|
||||
$ ramalama perplexity granite-moe3
|
||||
$ ramalama perplexity granite3-moe
|
||||
```
|
||||
</details>
|
||||
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
FROM quay.io/fedora/fedora:42
|
||||
|
||||
ENV HOME=/tmp \
|
||||
XDG_RUNTIME_DIR=/tmp \
|
||||
STORAGE_DRIVER=vfs
|
||||
XDG_RUNTIME_DIR=/tmp
|
||||
WORKDIR /src
|
||||
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
|
||||
|
||||
RUN dnf -y install make bats jq iproute podman openssl httpd-tools \
|
||||
RUN dnf -y install make bats jq iproute podman openssl httpd-tools diffutils \
|
||||
python3-huggingface-hub \
|
||||
$([ $(uname -m) == "x86_64" ] && echo ollama) \
|
||||
# for validate and unit-tests
|
||||
|
@ -26,4 +25,6 @@ RUN git clone --depth=1 https://github.com/ggml-org/llama.cpp && \
|
|||
|
||||
COPY container-images/bats/entrypoint.sh /usr/bin
|
||||
COPY container-images/bats/containers.conf /etc/containers
|
||||
COPY . /src
|
||||
RUN chmod -R a+rw /src
|
||||
RUN chmod a+rw /etc/subuid /etc/subgid
|
||||
|
|
|
@ -3,6 +3,16 @@
|
|||
echo "$(id -un):10000:2000" > /etc/subuid
|
||||
echo "$(id -un):10000:2000" > /etc/subgid
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
if [[ "$1" =~ = ]]; then
|
||||
# shellcheck disable=SC2163
|
||||
export "$1"
|
||||
shift
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
exec "$@"
|
||||
else
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
FROM quay.io/fedora/fedora:42
|
||||
|
||||
ARG RAMALAMA_STACK_VERSION=0.2.4
|
||||
ARG RAMALAMA_STACK_VERSION=0.2.5
|
||||
|
||||
# hack that should be removed when the following bug is addressed
|
||||
# https://github.com/containers/ramalama-stack/issues/53
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM registry.access.redhat.com/ubi9/ubi:9.6-1751445649
|
||||
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
|
||||
|
||||
# Install Python development dependencies
|
||||
RUN dnf install -y python3-devel wget compat-openssl11 python3-jinja2 python3-markupsafe
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
FROM quay.io/ramalama/ramalama
|
||||
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV VIRTUAL_ENV="/opt/venv"
|
||||
ENV UV_PYTHON_INSTALL_DIR="/opt/uv/python"
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
ENV UV_LINK_MODE="copy"
|
||||
|
||||
COPY . /src/ramalama
|
||||
WORKDIR /src/ramalama
|
||||
RUN container-images/scripts/build-vllm.sh
|
||||
WORKDIR /
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
FROM registry.access.redhat.com/ubi9/ubi:9.6-1751445649
|
||||
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
|
||||
|
||||
COPY container-images/rocm-ubi/amdgpu.repo /etc/yum.repos.d/
|
||||
COPY container-images/rocm-ubi/rocm.repo /etc/yum.repos.d/
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
#!/bin/bash
|
||||
|
||||
available() {
|
||||
command -v "$1" >/dev/null
|
||||
}
|
||||
|
||||
install_deps() {
|
||||
set -eux -o pipefail
|
||||
|
||||
if available dnf; then
|
||||
dnf install -y git curl wget ca-certificates gcc gcc-c++ \
|
||||
gperftools-libs numactl-devel ffmpeg libSM libXext mesa-libGL jq lsof \
|
||||
vim numactl
|
||||
dnf -y clean all
|
||||
rm -rf /var/cache/*dnf*
|
||||
elif available apt-get; then
|
||||
apt-get update -y
|
||||
apt-get install -y --no-install-recommends git curl wget ca-certificates \
|
||||
gcc g++ libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 \
|
||||
jq lsof vim numactl
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
curl -LsSf https://astral.sh/uv/0.7.21/install.sh | bash
|
||||
}
|
||||
|
||||
preload_and_ulimit() {
|
||||
local ld_preload_file="libtcmalloc_minimal.so.4"
|
||||
local ld_preload_file_1="/usr/lib/$arch-linux-gnu/$ld_preload_file"
|
||||
local ld_preload_file_2="/usr/lib64/$ld_preload_file"
|
||||
if [ -e "$ld_preload_file_1" ]; then
|
||||
ld_preload_file="$ld_preload_file_1"
|
||||
elif [ -e "$ld_preload_file_2" ]; then
|
||||
ld_preload_file="$ld_preload_file_2"
|
||||
fi
|
||||
|
||||
if [ -e "$ld_preload_file" ]; then
|
||||
echo "LD_PRELOAD=$ld_preload_file" >> /etc/environment
|
||||
fi
|
||||
|
||||
echo 'ulimit -c 0' >> ~/.bashrc
|
||||
}
|
||||
|
||||
pip_install() {
|
||||
local url="https://download.pytorch.org/whl/cpu"
|
||||
uv pip install -v -r "$1" --extra-index-url $url
|
||||
}
|
||||
|
||||
git_clone_specific_commit() {
|
||||
local repo="${vllm_url##*/}"
|
||||
git init "$repo"
|
||||
cd "$repo"
|
||||
git remote add origin "$vllm_url"
|
||||
git fetch --depth 1 origin $commit
|
||||
git reset --hard $commit
|
||||
}
|
||||
|
||||
main() {
|
||||
set -eux -o pipefail
|
||||
|
||||
install_deps
|
||||
|
||||
local arch
|
||||
arch=$(uname -m)
|
||||
preload_and_ulimit
|
||||
|
||||
uv venv --python 3.12 --seed "$VIRTUAL_ENV"
|
||||
uv pip install --upgrade pip
|
||||
|
||||
local vllm_url="https://github.com/vllm-project/vllm"
|
||||
local commit="ac9fb732a5c0b8e671f8c91be8b40148282bb14a"
|
||||
git_clone_specific_commit
|
||||
if [ "$arch" == "x86_64" ]; then
|
||||
export VLLM_CPU_DISABLE_AVX512="0"
|
||||
export VLLM_CPU_AVX512BF16="0"
|
||||
export VLLM_CPU_AVX512VNNI="0"
|
||||
elif [ "$arch" == "aarch64" ]; then
|
||||
export VLLM_CPU_DISABLE_AVX512="true"
|
||||
fi
|
||||
|
||||
pip_install requirements/cpu-build.txt
|
||||
pip_install requirements/cpu.txt
|
||||
|
||||
MAX_JOBS=2 VLLM_TARGET_DEVICE=cpu python3 setup.py install
|
||||
cd -
|
||||
rm -rf vllm /root/.cache
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
@ -99,10 +99,10 @@ is_rhel_based() { # doesn't include openEuler
|
|||
dnf_install_mesa() {
|
||||
if [ "${ID}" = "fedora" ]; then
|
||||
dnf copr enable -y slp/mesa-libkrun-vulkan
|
||||
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 "${vulkan_rpms[@]}"
|
||||
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 virglrenderer "${vulkan_rpms[@]}"
|
||||
dnf versionlock add mesa-vulkan-drivers-25.0.7-100.fc42
|
||||
else
|
||||
dnf install -y mesa-vulkan-drivers "${vulkan_rpms[@]}"
|
||||
dnf install -y mesa-vulkan-drivers virglrenderer "${vulkan_rpms[@]}"
|
||||
fi
|
||||
|
||||
rm_non_ubi_repos
|
||||
|
@ -282,18 +282,22 @@ clone_and_build_llama_cpp() {
|
|||
}
|
||||
|
||||
install_ramalama() {
|
||||
$PYTHON -m pip install . --prefix="$1"
|
||||
if [ -e "pyproject.toml" ]; then
|
||||
$PYTHON -m pip install . --prefix="$1"
|
||||
fi
|
||||
}
|
||||
|
||||
install_entrypoints() {
|
||||
install -d "$install_prefix"/bin
|
||||
install -m 755 \
|
||||
container-images/scripts/llama-server.sh \
|
||||
container-images/scripts/whisper-server.sh \
|
||||
container-images/scripts/build_rag.sh \
|
||||
container-images/scripts/doc2rag \
|
||||
container-images/scripts/rag_framework \
|
||||
"$install_prefix"/bin
|
||||
if [ -e "container-images" ]; then
|
||||
install -d "$install_prefix"/bin
|
||||
install -m 755 \
|
||||
container-images/scripts/llama-server.sh \
|
||||
container-images/scripts/whisper-server.sh \
|
||||
container-images/scripts/build_rag.sh \
|
||||
container-images/scripts/doc2rag \
|
||||
container-images/scripts/rag_framework \
|
||||
"$install_prefix"/bin
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
|
|
|
@ -40,7 +40,18 @@ update_python() {
|
|||
}
|
||||
|
||||
docling() {
|
||||
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url https://download.pytorch.org/whl/"$1"
|
||||
case $1 in
|
||||
cuda)
|
||||
PYTORCH_DIR="cu128"
|
||||
;;
|
||||
rocm)
|
||||
PYTORCH_DIR="rocm6.3"
|
||||
;;
|
||||
*)
|
||||
PYTORCH_DIR="cpu"
|
||||
;;
|
||||
esac
|
||||
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url "https://download.pytorch.org/whl/$PYTORCH_DIR"
|
||||
# Preloads models (assumes its installed from container_build.sh)
|
||||
doc2rag load
|
||||
}
|
||||
|
@ -51,6 +62,8 @@ rag() {
|
|||
}
|
||||
|
||||
to_gguf() {
|
||||
# required to build under GCC 15 until a new release is available, see https://github.com/google/sentencepiece/issues/1108 for details
|
||||
export CXXFLAGS="-include cstdint"
|
||||
${python} -m pip install --prefix=/usr "numpy~=1.26.4" "sentencepiece~=0.2.0" "transformers>=4.45.1,<5.0.0" git+https://github.com/ggml-org/llama.cpp#subdirectory=gguf-py "protobuf>=4.21.0,<5.0.0"
|
||||
}
|
||||
|
||||
|
@ -60,6 +73,9 @@ main() {
|
|||
# shellcheck disable=SC1091
|
||||
source /etc/os-release
|
||||
|
||||
# caching in a container build is unhelpful, and can cause errors
|
||||
export PIP_NO_CACHE_DIR=1
|
||||
|
||||
local arch
|
||||
arch="$(uname -m)"
|
||||
local gpu="${1-cpu}"
|
||||
|
@ -67,18 +83,14 @@ main() {
|
|||
python=$(python_version)
|
||||
local pkgs
|
||||
if available dnf; then
|
||||
pkgs=("git-core" "gcc" "gcc-c++")
|
||||
pkgs=("git-core" "gcc" "gcc-c++" "cmake")
|
||||
else
|
||||
pkgs=("git" "gcc" "g++")
|
||||
pkgs=("git" "gcc" "g++" "cmake")
|
||||
fi
|
||||
if [ "${gpu}" = "cuda" ]; then
|
||||
pkgs+=("libcudnn9-devel-cuda-12" "libcusparselt0" "cuda-cupti-12-*")
|
||||
fi
|
||||
|
||||
if [[ "$ID" = "fedora" && "$VERSION_ID" -ge 42 ]] ; then
|
||||
pkgs+=("python3-sentencepiece-0.2.0")
|
||||
fi
|
||||
|
||||
update_python
|
||||
to_gguf
|
||||
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
|
@ -23,45 +25,46 @@ COLLECTION_NAME = "rag"
|
|||
class Converter:
|
||||
"""A Class designed to handle all document conversions using Docling"""
|
||||
|
||||
def __init__(self, output, targets, ocr):
|
||||
def __init__(self, args):
|
||||
# Docling Setup (Turn off OCR (image processing) for drastically reduced RAM usage and big speed increase)
|
||||
pipeline_options = PdfPipelineOptions()
|
||||
pipeline_options.do_ocr = ocr
|
||||
pipeline_options.do_ocr = args.ocr
|
||||
self.sources = []
|
||||
for source in args.sources:
|
||||
self.add(source)
|
||||
self.output = args.output
|
||||
self.format = args.format
|
||||
self.doc_converter = DocumentConverter(
|
||||
format_options={InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options)}
|
||||
)
|
||||
self.targets = []
|
||||
for target in targets:
|
||||
self.add(target)
|
||||
self.output = output
|
||||
self.client = qdrant_client.QdrantClient(path=output)
|
||||
self.client.set_model(EMBED_MODEL)
|
||||
self.client.set_sparse_model(SPARSE_MODEL)
|
||||
# optimizations to reduce ram
|
||||
self.client.create_collection(
|
||||
collection_name=COLLECTION_NAME,
|
||||
vectors_config=self.client.get_fastembed_vector_params(on_disk=True),
|
||||
sparse_vectors_config=self.client.get_fastembed_sparse_vector_params(on_disk=True),
|
||||
quantization_config=models.ScalarQuantization(
|
||||
scalar=models.ScalarQuantizationConfig(
|
||||
type=models.ScalarType.INT8,
|
||||
always_ram=True,
|
||||
if self.format == "qdrant":
|
||||
self.client = qdrant_client.QdrantClient(path=self.output)
|
||||
self.client.set_model(EMBED_MODEL)
|
||||
self.client.set_sparse_model(SPARSE_MODEL)
|
||||
# optimizations to reduce ram
|
||||
self.client.create_collection(
|
||||
collection_name=COLLECTION_NAME,
|
||||
vectors_config=self.client.get_fastembed_vector_params(on_disk=True),
|
||||
sparse_vectors_config=self.client.get_fastembed_sparse_vector_params(on_disk=True),
|
||||
quantization_config=models.ScalarQuantization(
|
||||
scalar=models.ScalarQuantizationConfig(
|
||||
type=models.ScalarType.INT8,
|
||||
always_ram=True,
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
def add(self, file_path):
|
||||
if os.path.isdir(file_path):
|
||||
self.walk(file_path) # Walk directory and process all files
|
||||
else:
|
||||
self.targets.append(file_path) # Process the single file
|
||||
self.sources.append(file_path) # Process the single file
|
||||
|
||||
def convert(self):
|
||||
result = self.doc_converter.convert_all(self.targets)
|
||||
def convert_qdrant(self, results):
|
||||
documents, ids = [], []
|
||||
chunker = HybridChunker(tokenizer=EMBED_MODEL, overlap=100, merge_peers=True)
|
||||
for file in result:
|
||||
chunk_iter = chunker.chunk(dl_doc=file.document)
|
||||
for result in results:
|
||||
chunk_iter = chunker.chunk(dl_doc=result.document)
|
||||
for chunk in chunk_iter:
|
||||
# Extract the enriched text from the chunk
|
||||
doc_text = chunker.contextualize(chunk=chunk)
|
||||
|
@ -72,6 +75,33 @@ class Converter:
|
|||
ids.append(doc_id)
|
||||
return self.client.add(COLLECTION_NAME, documents=documents, ids=ids, batch_size=1)
|
||||
|
||||
def convert(self):
|
||||
results = self.doc_converter.convert_all(self.sources)
|
||||
if self.format == "qdrant":
|
||||
return self.convert_qdrant(results)
|
||||
if self.format == "markdown":
|
||||
# Export the converted document to Markdown
|
||||
return self.convert_markdown(results)
|
||||
if self.format == "json":
|
||||
# Export the converted document to JSON
|
||||
return self.convert_json(results)
|
||||
|
||||
def convert_markdown(self, results):
|
||||
ctr = 0
|
||||
# Process the conversion results
|
||||
for ctr, result in enumerate(results):
|
||||
dirname = self.output + os.path.dirname(self.sources[ctr])
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
document = result.document
|
||||
document.save_as_markdown(os.path.join(dirname, f"{document.name}.md"))
|
||||
|
||||
def convert_json(self, results):
|
||||
for ctr, result in enumerate(results):
|
||||
dirname = self.output + os.path.dirname(self.sources[ctr])
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
document = result.document
|
||||
document.save_as_json(os.path.join(dirname, f"{document.name}.json"))
|
||||
|
||||
def walk(self, path):
|
||||
for root, dirs, files in os.walk(path, topdown=True):
|
||||
if len(files) == 0:
|
||||
|
@ -79,7 +109,7 @@ class Converter:
|
|||
for f in files:
|
||||
file = os.path.join(root, f)
|
||||
if os.path.isfile(file):
|
||||
self.targets.append(file)
|
||||
self.sources.append(file)
|
||||
|
||||
def generate_hash(self, document: str) -> str:
|
||||
"""Generate a unique hash for a document."""
|
||||
|
@ -103,8 +133,14 @@ parser = argparse.ArgumentParser(
|
|||
description="process source files into RAG vector database",
|
||||
)
|
||||
|
||||
parser.add_argument("target", nargs="?", help="Target database")
|
||||
parser.add_argument("source", nargs="*", help="Source files")
|
||||
parser.add_argument("output", nargs="?", help="Output directory")
|
||||
parser.add_argument("sources", nargs="*", help="Source files")
|
||||
parser.add_argument(
|
||||
"--format",
|
||||
default="qdrant",
|
||||
help="Output format for RAG Data",
|
||||
choices=["qdrant", "json", "markdown"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ocr",
|
||||
action='store_true',
|
||||
|
@ -123,16 +159,16 @@ def eprint(e, exit_code):
|
|||
|
||||
try:
|
||||
args = parser.parse_args()
|
||||
if args.target == "load":
|
||||
if args.output == "load":
|
||||
load()
|
||||
else:
|
||||
converter = Converter(args.target, args.source, args.ocr)
|
||||
converter = Converter(args)
|
||||
converter.convert()
|
||||
except docling.exceptions.ConversionError as e:
|
||||
eprint(e, 1)
|
||||
except FileNotFoundError as e:
|
||||
eprint(e, 1)
|
||||
eprint(e, errno.ENOENT)
|
||||
except ValueError as e:
|
||||
eprint(e, 1)
|
||||
eprint(e, errno.EINVAL)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
|
|
@ -62,7 +62,7 @@ add_rag() {
|
|||
tag=$tag-rag
|
||||
containerfile="container-images/common/Containerfile.rag"
|
||||
GPU=cpu
|
||||
case $2 in
|
||||
case "${2##*/}" in
|
||||
cuda)
|
||||
GPU=cuda
|
||||
;;
|
||||
|
|
|
@ -48,6 +48,33 @@ for a value and set the variable only if it is set on the host.
|
|||
#### **--help**, **-h**
|
||||
show this help message and exit
|
||||
|
||||
#### **--image**=IMAGE
|
||||
OCI container image to run with specified AI model. RamaLama defaults to using
|
||||
images based on the accelerator it discovers. For example:
|
||||
`quay.io/ramalama/ramalama`. See the table below for all default images.
|
||||
The default image tag is based on the minor version of the RamaLama package.
|
||||
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
|
||||
|
||||
The default can be overridden in the ramalama.conf file or via the
|
||||
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
|
||||
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
|
||||
|
||||
Accelerated images:
|
||||
|
||||
| Accelerator | Image |
|
||||
| ------------------------| -------------------------- |
|
||||
| CPU, Apple | quay.io/ramalama/ramalama |
|
||||
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
|
||||
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
|
||||
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
|
||||
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
|
||||
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
|
||||
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
|
||||
|
||||
#### **--keep-groups**
|
||||
pass --group-add keep-groups to podman (default: False)
|
||||
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
|
||||
|
||||
#### **--name**, **-n**
|
||||
name of the container to run the Model in
|
||||
|
||||
|
@ -95,6 +122,9 @@ not have more privileges than the user that launched them.
|
|||
#### **--seed**=
|
||||
Specify seed rather than using random seed model interaction
|
||||
|
||||
#### **--selinux**=*true*
|
||||
Enable SELinux container separation
|
||||
|
||||
#### **--temp**="0.8"
|
||||
Temperature of the response from the AI Model
|
||||
llama.cpp explains this as:
|
||||
|
@ -118,7 +148,7 @@ Benchmark specified AI Model.
|
|||
## EXAMPLES
|
||||
|
||||
```
|
||||
ramalama bench granite-moe3
|
||||
ramalama bench granite3-moe
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
|
|
@ -137,6 +137,19 @@ ramalama run granite
|
|||
|
||||
This is particularly useful in multi-GPU systems where you want to dedicate specific GPUs to different workloads.
|
||||
|
||||
If the `CUDA_VISIBLE_DEVICES` environment variable is set to an empty string, RamaLama will default to using the CPU.
|
||||
|
||||
```bash
|
||||
export CUDA_VISIBLE_DEVICES="" # Defaults to CPU
|
||||
ramalama run granite
|
||||
```
|
||||
|
||||
To revert to using all available GPUs, unset the environment variable:
|
||||
|
||||
```bash
|
||||
unset CUDA_VISIBLE_DEVICES
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### CUDA Updates
|
||||
|
|
|
@ -53,6 +53,33 @@ for a value and set the variable only if it is set on the host.
|
|||
#### **--help**, **-h**
|
||||
show this help message and exit
|
||||
|
||||
#### **--image**=IMAGE
|
||||
OCI container image to run with specified AI model. RamaLama defaults to using
|
||||
images based on the accelerator it discovers. For example:
|
||||
`quay.io/ramalama/ramalama`. See the table below for all default images.
|
||||
The default image tag is based on the minor version of the RamaLama package.
|
||||
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
|
||||
|
||||
The default can be overridden in the ramalama.conf file or via the
|
||||
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
|
||||
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
|
||||
|
||||
Accelerated images:
|
||||
|
||||
| Accelerator | Image |
|
||||
| ------------------------| -------------------------- |
|
||||
| CPU, Apple | quay.io/ramalama/ramalama |
|
||||
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
|
||||
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
|
||||
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
|
||||
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
|
||||
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
|
||||
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
|
||||
|
||||
#### **--keep-groups**
|
||||
pass --group-add keep-groups to podman (default: False)
|
||||
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
|
||||
|
||||
#### **--name**, **-n**
|
||||
name of the container to run the Model in
|
||||
|
||||
|
@ -103,6 +130,9 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
|
|||
#### **--seed**=
|
||||
Specify seed rather than using random seed model interaction
|
||||
|
||||
#### **--selinux**=*true*
|
||||
Enable SELinux container separation
|
||||
|
||||
#### **--temp**="0.8"
|
||||
Temperature of the response from the AI Model
|
||||
llama.cpp explains this as:
|
||||
|
@ -126,7 +156,7 @@ Calculate the perplexity of an AI Model. Perplexity measures how well the model
|
|||
## EXAMPLES
|
||||
|
||||
```
|
||||
ramalama perplexity granite-moe3
|
||||
ramalama perplexity granite3-moe
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
|
|
@ -19,7 +19,7 @@ positional arguments:
|
|||
AsciiDoc & Markdown formatted files to be processed.
|
||||
Can be specified multiple times.
|
||||
|
||||
*IMAGE* OCI Image name to contain processed rag data
|
||||
*PATH|IMAGE* Path or OCI Image name to contain processed rag data
|
||||
|
||||
## OPTIONS
|
||||
|
||||
|
@ -32,9 +32,45 @@ process to be launched inside of the container. If an environment variable is
|
|||
specified without a value, the container engine checks the host environment
|
||||
for a value and set the variable only if it is set on the host.
|
||||
|
||||
#### **--format**=*json* | *markdown* | *qdrant* |
|
||||
Convert documents into the following formats
|
||||
|
||||
| Type | Description |
|
||||
| ------- | ---------------------------------------------------- |
|
||||
| json | JavaScript Object Notation. lightweight format for exchanging data |
|
||||
| markdown| Lightweight markup language using plain text editing |
|
||||
| qdrant | Retrieval-Augmented Generation (RAG) Vector database |
|
||||
|
||||
#### **--help**, **-h**
|
||||
Print usage message
|
||||
|
||||
#### **--image**=IMAGE
|
||||
OCI container image to run with specified AI model. RamaLama defaults to using
|
||||
images based on the accelerator it discovers. For example:
|
||||
`quay.io/ramalama/ramalama-rag`. See the table below for all default images.
|
||||
The default image tag is based on the minor version of the RamaLama package.
|
||||
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
|
||||
|
||||
The default can be overridden in the ramalama.conf file or via the
|
||||
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
|
||||
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
|
||||
|
||||
Accelerated images:
|
||||
|
||||
| Accelerator | Image |
|
||||
| ------------------------| ------------------------------ |
|
||||
| CPU, Apple | quay.io/ramalama/ramalama-rag |
|
||||
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm-rag |
|
||||
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda-rag |
|
||||
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi-rag |
|
||||
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu-rag |
|
||||
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann-rag |
|
||||
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa-rag |
|
||||
|
||||
#### **--keep-groups**
|
||||
pass --group-add keep-groups to podman (default: False)
|
||||
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
|
||||
|
||||
#### **--network**=*none*
|
||||
sets the configuration for network namespaces when handling RUN instructions
|
||||
|
||||
|
@ -49,10 +85,13 @@ Pull image policy. The default is **missing**.
|
|||
- **never**: Never pull the image but use the one from the local containers storage. Throw an error when no image is found.
|
||||
- **newer**: Pull if the image on the registry is newer than the one in the local containers storage. An image is considered to be newer when the digests are different. Comparing the time stamps is prone to errors. Pull errors are suppressed if a local image was found.
|
||||
|
||||
#### **--selinux**=*true*
|
||||
Enable SELinux container separation
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```
|
||||
./bin/ramalama rag ./README.md https://github.com/containers/podman/blob/main/README.md quay.io/rhatdan/myrag
|
||||
$ ramalama rag ./README.md https://github.com/containers/podman/blob/main/README.md quay.io/rhatdan/myrag
|
||||
100% |███████████████████████████████████████████████████████| 114.00 KB/ 0.00 B 922.89 KB/s 59m 59s
|
||||
Building quay.io/ramalama/myrag...
|
||||
adding vectordb...
|
||||
|
@ -60,7 +99,17 @@ c857ebc65c641084b34e39b740fdb6a2d9d2d97be320e6aa9439ed0ab8780fe0
|
|||
```
|
||||
|
||||
```
|
||||
ramalama rag --ocr README.md https://mysight.edu/document quay.io/rhatdan/myrag
|
||||
$ ramalama rag --ocr README.md https://mysight.edu/document quay.io/rhatdan/myrag
|
||||
```
|
||||
|
||||
```
|
||||
$ ramalama rag --format markdown /tmp/internet.pdf /tmp/output
|
||||
$ ls /tmp/output/docs/tmp/
|
||||
/tmp/output/docs/tmp/internet.md
|
||||
$ ramalama rag --format json /tmp/internet.pdf /tmp/output
|
||||
$ ls /tmp/output/docs/tmp/
|
||||
/tmp/output/docs/tmp/internet.md
|
||||
/tmp/output/docs/tmp/internet.json
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
|
|
@ -61,6 +61,33 @@ for a value and set the variable only if it is set on the host.
|
|||
#### **--help**, **-h**
|
||||
Show this help message and exit
|
||||
|
||||
#### **--image**=IMAGE
|
||||
OCI container image to run with specified AI model. RamaLama defaults to using
|
||||
images based on the accelerator it discovers. For example:
|
||||
`quay.io/ramalama/ramalama`. See the table below for all default images.
|
||||
The default image tag is based on the minor version of the RamaLama package.
|
||||
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
|
||||
|
||||
The default can be overridden in the ramalama.conf file or via the
|
||||
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
|
||||
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
|
||||
|
||||
Accelerated images:
|
||||
|
||||
| Accelerator | Image |
|
||||
| ------------------------| -------------------------- |
|
||||
| CPU, Apple | quay.io/ramalama/ramalama |
|
||||
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
|
||||
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
|
||||
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
|
||||
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
|
||||
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
|
||||
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
|
||||
|
||||
#### **--keep-groups**
|
||||
pass --group-add keep-groups to podman (default: False)
|
||||
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
|
||||
|
||||
#### **--keepalive**
|
||||
duration to keep a model loaded (e.g. 5m)
|
||||
|
||||
|
@ -121,6 +148,9 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
|
|||
#### **--seed**=
|
||||
Specify seed rather than using random seed model interaction
|
||||
|
||||
#### **--selinux**=*true*
|
||||
Enable SELinux container separation
|
||||
|
||||
#### **--temp**="0.8"
|
||||
Temperature of the response from the AI Model
|
||||
llama.cpp explains this as:
|
||||
|
|
|
@ -93,6 +93,33 @@ show this help message and exit
|
|||
#### **--host**="0.0.0.0"
|
||||
IP address for llama.cpp to listen on.
|
||||
|
||||
#### **--image**=IMAGE
|
||||
OCI container image to run with specified AI model. RamaLama defaults to using
|
||||
images based on the accelerator it discovers. For example:
|
||||
`quay.io/ramalama/ramalama`. See the table above for all default images.
|
||||
The default image tag is based on the minor version of the RamaLama package.
|
||||
Version 0.11.0 of RamaLama pulls an image with a `:0.11` tag from the quay.io/ramalama OCI repository. The --image option overrides this default.
|
||||
|
||||
The default can be overridden in the ramalama.conf file or via the
|
||||
RAMALAMA_IMAGE environment variable. `export RAMALAMA_IMAGE=quay.io/ramalama/aiimage:1.2` tells
|
||||
RamaLama to use the `quay.io/ramalama/aiimage:1.2` image.
|
||||
|
||||
Accelerated images:
|
||||
|
||||
| Accelerator | Image |
|
||||
| ------------------------| -------------------------- |
|
||||
| CPU, Apple | quay.io/ramalama/ramalama |
|
||||
| HIP_VISIBLE_DEVICES | quay.io/ramalama/rocm |
|
||||
| CUDA_VISIBLE_DEVICES | quay.io/ramalama/cuda |
|
||||
| ASAHI_VISIBLE_DEVICES | quay.io/ramalama/asahi |
|
||||
| INTEL_VISIBLE_DEVICES | quay.io/ramalama/intel-gpu |
|
||||
| ASCEND_VISIBLE_DEVICES | quay.io/ramalama/cann |
|
||||
| MUSA_VISIBLE_DEVICES | quay.io/ramalama/musa |
|
||||
|
||||
#### **--keep-groups**
|
||||
pass --group-add keep-groups to podman (default: False)
|
||||
If GPU device on host system is accessible to user via group access, this option leaks the groups into the container.
|
||||
|
||||
#### **--model-draft**
|
||||
|
||||
A draft model is a smaller, faster model that helps accelerate the decoding
|
||||
|
@ -164,6 +191,9 @@ Add *args* to the runtime (llama.cpp or vllm) invocation.
|
|||
#### **--seed**=
|
||||
Specify seed rather than using random seed model interaction
|
||||
|
||||
#### **--selinux**=*true*
|
||||
Enable SELinux container separation
|
||||
|
||||
#### **--temp**="0.8"
|
||||
Temperature of the response from the AI Model.
|
||||
llama.cpp explains this as:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue