Compare commits

..

No commits in common. "main" and "v0.11.0" have entirely different histories.

102 changed files with 271 additions and 2079 deletions

View File

@ -79,7 +79,7 @@ jobs:
dist_git_branches: &fedora_targets
- fedora-all
- epel10
- epel10.0
- epel9
- job: koji_build
trigger: commit
@ -92,4 +92,4 @@ jobs:
dist_git_branches:
- fedora-branched # rawhide updates are created automatically
- epel10
- epel10.0
- epel9

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-llama-server
pipelines.appstudio.openshift.io/type: build
name: asahi-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-rag
pipelines.appstudio.openshift.io/type: build
name: asahi-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi-whisper-server
pipelines.appstudio.openshift.io/type: build
name: asahi-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: asahi
pipelines.appstudio.openshift.io/type: build
name: asahi-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/asahi:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/asahi/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: bats

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-llama-server
pipelines.appstudio.openshift.io/type: build
name: cann-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-rag
pipelines.appstudio.openshift.io/type: build
name: cann-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,48 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann-whisper-server
pipelines.appstudio.openshift.io/type: build
name: cann-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cann
pipelines.appstudio.openshift.io/type: build
name: cann-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/cann:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cann/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
@ -29,7 +29,6 @@ spec:
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-llama-server
@ -26,7 +26,6 @@ spec:
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
@ -29,7 +29,6 @@ spec:
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-rag
@ -26,7 +26,6 @@ spec:
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- linux-d160-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
@ -29,7 +29,6 @@ spec:
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda-whisper-server
@ -26,7 +26,6 @@ spec:
- name: build-platforms
value:
- linux-m2xlarge/amd64
- linux-m2xlarge/arm64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda
@ -29,7 +29,6 @@ spec:
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cuda/Containerfile
pipelineRef:

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: cuda
@ -26,7 +26,6 @@ spec:
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/cuda/Containerfile
pipelineRef:

View File

@ -52,6 +52,10 @@ spec:
params:
- name: image
value: $(tasks.init.results.bats-image)
- name: git-url
value: $(params.git-url)
- name: git-revision
value: $(params.git-revision)
- name: envs
value:
- RAMALAMA_IMAGE=$(tasks.init.results.ramalama-image)

View File

@ -9,6 +9,10 @@ spec:
description: The platform of the VM to provision
- name: image
description: The image to use when setting up the test environment
- name: git-url
description: The URL of the source code repository
- name: git-revision
description: The revision of the source code to test
- name: cmd
description: The command to run
- name: envs
@ -34,6 +38,10 @@ spec:
name: ssh
workingDir: /var/workdir
env:
- name: GIT_URL
value: $(params.git-url)
- name: GIT_REVISION
value: $(params.git-revision)
- name: TEST_IMAGE
value: $(params.image)
- name: TEST_CMD
@ -49,7 +57,13 @@ spec:
}
log Install packages
dnf -y install openssh-clients rsync jq
dnf -y install openssh-clients rsync git-core jq
log Clone source
git clone -n "$GIT_URL" source
pushd source
git checkout "$GIT_REVISION"
popd
log Prepare connection
@ -93,7 +107,7 @@ spec:
--security-opt label=disable \
--security-opt unmask=/proc/* \
--device /dev/net/tun \
--device /dev/fuse \
-v \$PWD/source:/src \
${PODMAN_ENV[*]} \
$TEST_IMAGE $TEST_CMD
SCRIPTEOF
@ -105,7 +119,7 @@ spec:
export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10"
# ssh once before rsync to retrieve the host key
ssh $SSH_ARGS "$SSH_HOST" "uname -a"
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
rsync -ra scripts source "$SSH_HOST:$BUILD_DIR"
ssh $SSH_ARGS "$SSH_HOST" "$BUILD_DIR/scripts/test.sh"
log End VM exec
else

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-llama-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- GPU=cpu
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-rag
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- GPU=cpu
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu-whisper-server
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: intel-gpu
pipelines.appstudio.openshift.io/type: build
name: intel-gpu-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/intel-gpu:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/intel-gpu/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: llama-stack
pipelines.appstudio.openshift.io/type: build
name: llama-stack-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/llama-stack:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/llama-stack/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-llama-server
pipelines.appstudio.openshift.io/type: build
name: musa-llama-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-llama-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/llama-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- GPU=musa
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-rag
pipelines.appstudio.openshift.io/type: build
name: musa-rag-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-rag:{{revision}}
- name: build-platforms
value:
- linux-d160-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.rag
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- GPU=musa
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,47 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,44 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa-whisper-server
pipelines.appstudio.openshift.io/type: build
name: musa-whisper-server-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa-whisper-server:{{revision}}
- name: build-platforms
value:
- linux-m2xlarge/amd64
- name: dockerfile
value: container-images/common/Containerfile.entrypoint
- name: parent-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-args
value:
- PARENT=quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- ENTRYPOINT=/usr/bin/whisper-server.sh
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: musa
pipelines.appstudio.openshift.io/type: build
name: musa-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/musa:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/musa/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,41 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,38 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: openvino
pipelines.appstudio.openshift.io/type: build
name: openvino-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/openvino:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- name: dockerfile
value: container-images/openvino/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -113,7 +113,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind
value: task
resolver: bundles
@ -163,7 +163,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:f10a4841e6f75fbb314b1d8cbf14f652499c1fe7f59e59aed59f7431c680aa17
- name: kind
value: task
resolver: bundles
@ -225,7 +225,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:5b8d51fa889cdac873750904c3fccc0cca1c4f65af16902ebb2b573151f80657
- name: kind
value: task
resolver: bundles
@ -254,7 +254,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind
value: task
resolver: bundles
@ -285,6 +285,8 @@ spec:
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: source-artifact
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: envs
value:
- $(params.test-envs[*])
@ -362,7 +364,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind
value: task
resolver: bundles
@ -382,7 +384,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:abbe195626eec925288df6425679559025d1be4af5ae70ca6dbbcb49ad3bf08b
- name: kind
value: task
resolver: bundles
@ -408,7 +410,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind
value: task
resolver: bundles
@ -430,7 +432,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind
value: task
resolver: bundles
@ -475,7 +477,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:c926568ce63e4f63e18bb6a4178caca2e8192f6e3b830bbcd354e6485d29458c
- name: kind
value: task
resolver: bundles
@ -522,7 +524,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind
value: task
resolver: bundles
@ -593,7 +595,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind
value: task
resolver: bundles
@ -629,7 +631,7 @@ spec:
- name: name
value: show-sbom
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind
value: task
resolver: bundles

View File

@ -113,7 +113,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:1d8221c84f91b923d89de50bf16481ea729e3b68ea04a9a7cbe8485ddbb27ee6
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:66e90d31e1386bf516fb548cd3e3f0082b5d0234b8b90dbf9e0d4684b70dbe1a
- name: kind
value: task
resolver: bundles
@ -163,7 +163,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:092491ac0f6e1009d10c58a1319d1029371bf637cc1293cceba53c6da5314ed1
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:f10a4841e6f75fbb314b1d8cbf14f652499c1fe7f59e59aed59f7431c680aa17
- name: kind
value: task
resolver: bundles
@ -225,7 +225,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:9e866d4d0489a6ab84ae263db416c9f86d2d6117ef4444f495a0e97388ae3ac0
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:5b8d51fa889cdac873750904c3fccc0cca1c4f65af16902ebb2b573151f80657
- name: kind
value: task
resolver: bundles
@ -254,7 +254,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:3499772af90aad0d3935629be6d37dd9292195fb629e6f43ec839c7f545a0faa
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:846dc9975914f31380ec2712fdbac9df3b06c00a9cc7df678315a7f97145efc2
- name: kind
value: task
resolver: bundles
@ -285,6 +285,8 @@ spec:
params:
- name: image
value: $(params.test-image)@$(tasks.wait-for-test-image.results.digest)
- name: source-artifact
value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT)
- name: envs
value:
- $(params.test-envs[*])
@ -362,7 +364,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:417f44117f8d87a4a62fea6589b5746612ac61640b454dbd88f74892380411f2
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:d354939892f3a904223ec080cc3771bd11931085a5d202323ea491ee8e8c5e43
- name: kind
value: task
resolver: bundles
@ -382,7 +384,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:f99d2bdb02f13223d494077a2cde31418d09369f33c02134a8e7e5fad2f61eda
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:abbe195626eec925288df6425679559025d1be4af5ae70ca6dbbcb49ad3bf08b
- name: kind
value: task
resolver: bundles
@ -408,7 +410,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:fe5e5ba3a72632cd505910de2eacd62c9d11ed570c325173188f8d568ac60771
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:e61f541189b30d14292ef8df36ccaf13f7feb2378fed5f74cb6293b3e79eb687
- name: kind
value: task
resolver: bundles
@ -430,7 +432,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:7749146f7e4fe530846f1b15c9366178ec9f44776ef1922a60d3e7e2b8c6426b
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:9cab95ac9e833d77a63c079893258b73b8d5a298d93aaf9bdd6722471bc2f338
- name: kind
value: task
resolver: bundles
@ -475,7 +477,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:f9ca942208dc2e63b479384ccc56a611cc793397ecc837637b5b9f89c2ecbefe
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:c926568ce63e4f63e18bb6a4178caca2e8192f6e3b830bbcd354e6485d29458c
- name: kind
value: task
resolver: bundles
@ -522,7 +524,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:808bcaf75271db6a999f53fdefb973a385add94a277d37fbd3df68f8ac7dfaa3
- name: kind
value: task
resolver: bundles
@ -593,7 +595,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:8c75c4a747e635e5f3e12266a3bb6e5d3132bf54e37eaa53d505f89897dd8eca
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:5d8013b6a27bbc5e4ff261144616268f28417ed0950d583ef36349fcd59d3d3d
- name: kind
value: task
resolver: bundles
@ -629,7 +631,7 @@ spec:
- name: name
value: show-sbom
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:86c069cac0a669797e8049faa8aa4088e70ff7fcd579d5bdc37626a9e0488a05
value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:1b1df4da95966d08ac6a5b8198710e09e68b5c2cdc707c37d9d19769e65884b2
- name: kind
value: task
resolver: bundles

View File

@ -1,42 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-pull-request
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:on-pr-{{revision}}
- name: image-expires-after
value: 5d
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: pull-request-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -1,39 +0,0 @@
apiVersion: tekton.dev/v1
kind: PipelineRun
metadata:
annotations:
build.appstudio.openshift.io/repo: https://github.com/containers/ramalama?rev={{revision}}
build.appstudio.redhat.com/commit_sha: '{{revision}}'
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-cli
pipelines.appstudio.openshift.io/type: build
name: ramalama-cli-on-push
namespace: ramalama-tenant
spec:
params:
- name: git-url
value: '{{source_url}}'
- name: revision
value: '{{revision}}'
- name: output-image
value: quay.io/redhat-user-workloads/ramalama-tenant/ramalama-cli:{{revision}}
- name: build-platforms
value:
- linux-c4xlarge/amd64
- linux-c4xlarge/arm64
- name: dockerfile
value: container-images/ramalama-cli/Containerfile
pipelineRef:
name: push-pipeline
timeouts:
pipeline: 6h
workspaces:
- name: git-auth
secret:
secretName: '{{ git_auth_secret }}'

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-llama-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-rag

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama-whisper-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: ramalama

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-llama-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-rag

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-llama-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-rag

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi-whisper-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-ubi

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm-whisper-server

View File

@ -8,8 +8,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "true"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "pull_request" && target_branch == "main" && body.action != "ready_for_review"
pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm

View File

@ -7,8 +7,8 @@ metadata:
build.appstudio.redhat.com/target_branch: '{{target_branch}}'
pipelinesascode.tekton.dev/cancel-in-progress: "false"
pipelinesascode.tekton.dev/max-keep-runs: "3"
pipelinesascode.tekton.dev/on-cel-expression: >-
event == "push" && target_branch == "main"
pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch
== "main"
labels:
appstudio.openshift.io/application: ramalama
appstudio.openshift.io/component: rocm

View File

@ -7,29 +7,58 @@ spec:
params:
- name: image
description: The image to use when setting up the test environment.
- name: source-artifact
description: The Trusted Artifact URI pointing to the artifact with the application source code.
- name: cmd
description: The command to run.
- name: envs
description: List of environment variables (NAME=VALUE) to be set in the test environment.
type: array
default: []
steps:
- name: run
image: $(params.image)
volumes:
- name: workdir
emptyDir: {}
stepTemplate:
volumeMounts:
- mountPath: /var/workdir
name: workdir
computeResources:
limits:
memory: 4Gi
requests:
cpu: "1"
memory: 1Gi
steps:
- name: use-trusted-artifact
image: quay.io/konflux-ci/build-trusted-artifacts:latest@sha256:4689f88dd253bd1feebf57f1a76a5a751880f739000719cd662bbdc76990a7fd
args:
- use
- $(params.source-artifact)=/var/workdir/source
- name: set-env
image: $(params.image)
workingDir: /var/workdir/source
args:
- $(params.envs[*])
script: |
#!/bin/bash -e
rm -f .bashenv
while [ $# -ne 0 ]; do
echo "$1" >> .bashenv
shift
done
- name: run
image: $(params.image)
securityContext:
capabilities:
add:
- SETFCAP
workingDir: /var/workdir/source
env:
- name: BASH_ENV
value: .bashenv
command:
- /usr/bin/entrypoint.sh
args:
- $(params.envs[*])
- /bin/bash
- -ex
- -c

View File

@ -168,7 +168,7 @@ bats-image:
podman inspect $(BATS_IMAGE) &> /dev/null || \
podman build -t $(BATS_IMAGE) -f container-images/bats/Containerfile .
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun --device /dev/fuse
bats-in-container: extra-opts = --security-opt unmask=/proc/* --device /dev/net/tun
%-in-container: bats-image
podman run -it --rm \

View File

@ -2,12 +2,14 @@
<img src="https://github.com/user-attachments/assets/1a338ecf-dc84-4495-8c70-16882955da47" width=50%>
</p>
[RamaLama](https://ramalama.ai) strives to make working with AI simple, straightforward, and familiar by using OCI containers.
[RamaLama](https://ramalama.ai) is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers.
<br>
<br>
## Description
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. It allows engineers to use container-centric development patterns and benefits to extend to AI use cases.
RamaLama strives to make working with AI simple, straightforward, and familiar by using OCI containers.
RamaLama is an open-source tool that simplifies the local use and serving of AI models for inference from any source through the familiar approach of containers. Using a container engine like Podman, engineers can use container-centric development patterns and benefits to extend to AI use cases.
RamaLama eliminates the need to configure the host system by instead pulling a container image specific to the GPUs discovered on the host system, and allowing you to work with various models and platforms.
@ -21,25 +23,6 @@ RamaLama eliminates the need to configure the host system by instead pulling a c
- Interact with models via REST API or as a chatbot.
<br>
## Install
### Install on Fedora
RamaLama is available in [Fedora](https://fedoraproject.org/) and later. To install it, run:
```
sudo dnf install python3-ramalama
```
### Install via PyPI
RamaLama is available via PyPI at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
```
pip install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
```
## Accelerated images
| Accelerator | Image |
@ -120,6 +103,25 @@ pip install mlx-lm
ramalama --runtime=mlx serve hf://mlx-community/Unsloth-Phi-4-4bit
```
## Install
### Install on Fedora
RamaLama is available in [Fedora 40](https://fedoraproject.org/) and later. To install it, run:
```
sudo dnf install ramalama
```
### Install via PyPi
RamaLama is available via PyPi at [https://pypi.org/project/ramalama](https://pypi.org/project/ramalama)
```
pip install ramalama
```
### Install script (Linux and macOS)
Install RamaLama by running:
```
curl -fsSL https://ramalama.ai/install.sh | bash
```
#### Default Container Engine
When both Podman and Docker are installed, RamaLama defaults to Podman. The `RAMALAMA_CONTAINER_ENGINE=docker` environment variable can override this behaviour. When neither are installed, RamaLama will attempt to run the model with software on the local system.
<br>

View File

@ -1,11 +1,12 @@
FROM quay.io/fedora/fedora:42
ENV HOME=/tmp \
XDG_RUNTIME_DIR=/tmp
XDG_RUNTIME_DIR=/tmp \
STORAGE_DRIVER=vfs
WORKDIR /src
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
RUN dnf -y install make bats jq iproute podman openssl httpd-tools diffutils \
RUN dnf -y install make bats jq iproute podman openssl httpd-tools \
python3-huggingface-hub \
$([ $(uname -m) == "x86_64" ] && echo ollama) \
# for validate and unit-tests
@ -25,6 +26,4 @@ RUN git clone --depth=1 https://github.com/ggml-org/llama.cpp && \
COPY container-images/bats/entrypoint.sh /usr/bin
COPY container-images/bats/containers.conf /etc/containers
COPY . /src
RUN chmod -R a+rw /src
RUN chmod a+rw /etc/subuid /etc/subgid

View File

@ -3,16 +3,6 @@
echo "$(id -un):10000:2000" > /etc/subuid
echo "$(id -un):10000:2000" > /etc/subgid
while [ $# -gt 0 ]; do
if [[ "$1" =~ = ]]; then
# shellcheck disable=SC2163
export "$1"
shift
else
break
fi
done
if [ $# -gt 0 ]; then
exec "$@"
else

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752069608
# Install Python development dependencies
RUN dnf install -y python3-devel wget compat-openssl11 python3-jinja2 python3-markupsafe

View File

@ -1,17 +0,0 @@
FROM quay.io/ramalama/ramalama
ENV PATH="/root/.local/bin:$PATH"
ENV VIRTUAL_ENV="/opt/venv"
ENV UV_PYTHON_INSTALL_DIR="/opt/uv/python"
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
ENV UV_LINK_MODE="copy"
COPY . /src/ramalama
WORKDIR /src/ramalama
RUN container-images/scripts/build-vllm.sh
WORKDIR /

View File

@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752625787
FROM registry.access.redhat.com/ubi9/ubi:9.6-1752069608
COPY container-images/rocm-ubi/amdgpu.repo /etc/yum.repos.d/
COPY container-images/rocm-ubi/rocm.repo /etc/yum.repos.d/

View File

@ -1,90 +0,0 @@
#!/bin/bash
available() {
command -v "$1" >/dev/null
}
install_deps() {
set -eux -o pipefail
if available dnf; then
dnf install -y git curl wget ca-certificates gcc gcc-c++ \
gperftools-libs numactl-devel ffmpeg libSM libXext mesa-libGL jq lsof \
vim numactl
dnf -y clean all
rm -rf /var/cache/*dnf*
elif available apt-get; then
apt-get update -y
apt-get install -y --no-install-recommends git curl wget ca-certificates \
gcc g++ libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 \
jq lsof vim numactl
rm -rf /var/lib/apt/lists/*
fi
curl -LsSf https://astral.sh/uv/0.7.21/install.sh | bash
}
preload_and_ulimit() {
local ld_preload_file="libtcmalloc_minimal.so.4"
local ld_preload_file_1="/usr/lib/$arch-linux-gnu/$ld_preload_file"
local ld_preload_file_2="/usr/lib64/$ld_preload_file"
if [ -e "$ld_preload_file_1" ]; then
ld_preload_file="$ld_preload_file_1"
elif [ -e "$ld_preload_file_2" ]; then
ld_preload_file="$ld_preload_file_2"
fi
if [ -e "$ld_preload_file" ]; then
echo "LD_PRELOAD=$ld_preload_file" >> /etc/environment
fi
echo 'ulimit -c 0' >> ~/.bashrc
}
pip_install() {
local url="https://download.pytorch.org/whl/cpu"
uv pip install -v -r "$1" --extra-index-url $url
}
git_clone_specific_commit() {
local repo="${vllm_url##*/}"
git init "$repo"
cd "$repo"
git remote add origin "$vllm_url"
git fetch --depth 1 origin $commit
git reset --hard $commit
}
main() {
set -eux -o pipefail
install_deps
local arch
arch=$(uname -m)
preload_and_ulimit
uv venv --python 3.12 --seed "$VIRTUAL_ENV"
uv pip install --upgrade pip
local vllm_url="https://github.com/vllm-project/vllm"
local commit="ac9fb732a5c0b8e671f8c91be8b40148282bb14a"
git_clone_specific_commit
if [ "$arch" == "x86_64" ]; then
export VLLM_CPU_DISABLE_AVX512="0"
export VLLM_CPU_AVX512BF16="0"
export VLLM_CPU_AVX512VNNI="0"
elif [ "$arch" == "aarch64" ]; then
export VLLM_CPU_DISABLE_AVX512="true"
fi
pip_install requirements/cpu-build.txt
pip_install requirements/cpu.txt
MAX_JOBS=2 VLLM_TARGET_DEVICE=cpu python3 setup.py install
cd -
rm -rf vllm /root/.cache
}
main "$@"

View File

@ -99,10 +99,10 @@ is_rhel_based() { # doesn't include openEuler
dnf_install_mesa() {
if [ "${ID}" = "fedora" ]; then
dnf copr enable -y slp/mesa-libkrun-vulkan
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 virglrenderer "${vulkan_rpms[@]}"
dnf install -y mesa-vulkan-drivers-25.0.7-100.fc42 "${vulkan_rpms[@]}"
dnf versionlock add mesa-vulkan-drivers-25.0.7-100.fc42
else
dnf install -y mesa-vulkan-drivers virglrenderer "${vulkan_rpms[@]}"
dnf install -y mesa-vulkan-drivers "${vulkan_rpms[@]}"
fi
rm_non_ubi_repos

View File

@ -40,18 +40,7 @@ update_python() {
}
docling() {
case $1 in
cuda)
PYTORCH_DIR="cu128"
;;
rocm)
PYTORCH_DIR="rocm6.3"
;;
*)
PYTORCH_DIR="cpu"
;;
esac
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url "https://download.pytorch.org/whl/$PYTORCH_DIR"
${python} -m pip install --prefix=/usr docling docling-core accelerate --extra-index-url https://download.pytorch.org/whl/"$1"
# Preloads models (assumes its installed from container_build.sh)
doc2rag load
}
@ -62,8 +51,6 @@ rag() {
}
to_gguf() {
# required to build under GCC 15 until a new release is available, see https://github.com/google/sentencepiece/issues/1108 for details
export CXXFLAGS="-include cstdint"
${python} -m pip install --prefix=/usr "numpy~=1.26.4" "sentencepiece~=0.2.0" "transformers>=4.45.1,<5.0.0" git+https://github.com/ggml-org/llama.cpp#subdirectory=gguf-py "protobuf>=4.21.0,<5.0.0"
}
@ -73,9 +60,6 @@ main() {
# shellcheck disable=SC1091
source /etc/os-release
# caching in a container build is unhelpful, and can cause errors
export PIP_NO_CACHE_DIR=1
local arch
arch="$(uname -m)"
local gpu="${1-cpu}"
@ -83,14 +67,18 @@ main() {
python=$(python_version)
local pkgs
if available dnf; then
pkgs=("git-core" "gcc" "gcc-c++" "cmake")
pkgs=("git-core" "gcc" "gcc-c++")
else
pkgs=("git" "gcc" "g++" "cmake")
pkgs=("git" "gcc" "g++")
fi
if [ "${gpu}" = "cuda" ]; then
pkgs+=("libcudnn9-devel-cuda-12" "libcusparselt0" "cuda-cupti-12-*")
fi
if [[ "$ID" = "fedora" && "$VERSION_ID" -ge 42 ]] ; then
pkgs+=("python3-sentencepiece-0.2.0")
fi
update_python
to_gguf

View File

@ -62,7 +62,7 @@ add_rag() {
tag=$tag-rag
containerfile="container-images/common/Containerfile.rag"
GPU=cpu
case "${2##*/}" in
case $2 in
cuda)
GPU=cuda
;;

View File

@ -110,7 +110,7 @@ log_cli_date_format = "%Y-%m-%d %H:%M:%S"
include = ["ramalama", "ramalama.*"]
[tool.setuptools.data-files]
"share/ramalama" = ["shortnames/shortnames.conf", "docs/ramalama.conf"]
"share/ramalama" = ["shortnames/shortnames.conf"]
"share/man/man1" = ["docs/*.1"]
"share/man/man5" = ["docs/*.5"]
"share/man/man7" = ["docs/*.7"]

View File

@ -133,10 +133,6 @@ def get_parser():
def init_cli():
"""Initialize the RamaLama CLI and parse command line arguments."""
# Need to know if we're running with --dryrun or --generate before adding the subcommands,
# otherwise calls to accel_image() when setting option defaults will cause unnecessary image pulls.
if any(arg in ("--dryrun", "--dry-run", "--generate") or arg.startswith("--generate=") for arg in sys.argv[1:]):
CONFIG.dryrun = True
parser = get_parser()
args = parse_arguments(parser)
post_parse_setup(args)
@ -707,7 +703,7 @@ def _get_source_model(args):
smodel = New(src, args)
if smodel.type == "OCI":
raise ValueError(f"converting from an OCI based image {src} is not supported")
if not smodel.exists() and not args.dryrun:
if not smodel.exists():
smodel.pull(args)
return smodel
@ -1008,8 +1004,6 @@ def serve_parser(subparsers):
def _get_rag(args):
if os.path.exists(args.rag):
return
if args.pull == "never" or args.dryrun:
return
model = New(args.rag, args=args, transport="oci")
if not model.exists():
model.pull(args)

View File

@ -127,7 +127,7 @@ def exec_cmd(args, stdout2null: bool = False, stderr2null: bool = False):
raise
def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_all=False, encoding=None):
def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_all=False):
"""
Run the given command arguments.
@ -137,7 +137,6 @@ def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_
stdout: standard output configuration
ignore_stderr: if True, ignore standard error
ignore_all: if True, ignore both standard output and standard error
encoding: encoding to apply to the result text
"""
logger.debug(f"run_cmd: {quoted(args)}")
logger.debug(f"Working directory: {cwd}")
@ -152,7 +151,7 @@ def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_
if ignore_all:
sout = subprocess.DEVNULL
result = subprocess.run(args, check=True, cwd=cwd, stdout=sout, stderr=serr, encoding=encoding)
result = subprocess.run(args, check=True, cwd=cwd, stdout=sout, stderr=serr)
logger.debug(f"Command finished with return code: {result.returncode}")
return result
@ -226,72 +225,34 @@ def engine_version(engine: SUPPORTED_ENGINES) -> str:
return run_cmd(cmd_args).stdout.decode("utf-8").strip()
def load_cdi_yaml(stream) -> dict:
# Returns a dict containing just the "devices" key, whose value is
# a list of dicts, each mapping the key "name" to a device name.
# For example: {'devices': [{'name': 'all'}]}
# This depends on the key "name" being unique to the list of dicts
# under "devices" and the value of the "name" key being on the
# same line following a colon.
data = {"devices": []}
for line in stream:
if ':' in line:
key, value = line.split(':', 1)
if key.strip() == "name":
data['devices'].append({'name': value.strip().strip('"')})
return data
def load_cdi_config(spec_dirs: List[str]) -> dict | None:
# Loads the first YAML or JSON CDI configuration file found in the
# given directories."""
def resolve_cdi(spec_dirs: List[str]):
"""Loads all CDI specs from the given directories."""
for spec_dir in spec_dirs:
for root, _, files in os.walk(spec_dir):
for file in files:
_, ext = os.path.splitext(file)
file_path = os.path.join(root, file)
if ext in [".yaml", ".yml"]:
try:
with open(file_path, "r") as stream:
return load_cdi_yaml(stream)
except OSError:
continue
elif ext == ".json":
try:
with open(file_path, "r") as stream:
return json.load(stream)
except json.JSONDecodeError:
continue
except UnicodeDecodeError:
continue
except OSError:
continue
return None
if file.endswith('.json') or file.endswith('.yaml'):
if load_spec(os.path.join(root, file)):
return True
return False
def find_in_cdi(devices: List[str]) -> tuple[List[str], List[str]]:
# Attempts to find a CDI configuration for each device in devices
# and returns a list of configured devices and a list of
# unconfigured devices.
cdi = load_cdi_config(['/etc/cdi', '/var/run/cdi'])
cdi_devices = cdi.get("devices", []) if cdi else []
cdi_device_names = [name for cdi_device in cdi_devices if (name := cdi_device.get("name"))]
def yaml_safe_load(stream) -> dict:
data = {}
for line in stream:
if ':' in line:
key, value = line.split(':', 1)
data[key.strip()] = value.strip()
configured = []
unconfigured = []
for device in devices:
if device in cdi_device_names:
configured.append(device)
# A device can be specified by a prefix of the uuid
elif device.startswith("GPU") and any(name.startswith(device) for name in cdi_device_names):
configured.append(device)
else:
perror(f"Device {device} does not have a CDI configuration")
unconfigured.append(device)
return data
return configured, unconfigured
def load_spec(path: str):
"""Loads a single CDI spec file."""
with open(path, 'r') as f:
spec = json.load(f) if path.endswith('.json') else yaml_safe_load(f)
return spec.get('kind')
def check_asahi() -> Literal["asahi"] | None:
@ -317,41 +278,27 @@ def check_metal(args: ContainerArgType) -> bool:
@lru_cache(maxsize=1)
def check_nvidia() -> Literal["cuda"] | None:
try:
command = ['nvidia-smi', '--query-gpu=index,uuid', '--format=csv,noheader']
result = run_cmd(command, encoding="utf-8")
except OSError:
return None
command = ['nvidia-smi']
run_cmd(command).stdout.decode("utf-8")
smi_lines = result.stdout.splitlines()
parsed_lines = [[item.strip() for item in line.split(',')] for line in smi_lines if line]
if not parsed_lines:
return None
# ensure at least one CDI device resolves
if resolve_cdi(['/etc/cdi', '/var/run/cdi']):
if "CUDA_VISIBLE_DEVICES" not in os.environ:
dev_command = ['nvidia-smi', '--query-gpu=index', '--format=csv,noheader']
try:
result = run_cmd(dev_command)
output = result.stdout.decode("utf-8").strip()
if not output:
raise ValueError("nvidia-smi returned empty GPU indices")
devices = ','.join(output.split('\n'))
except Exception:
devices = "0"
indices, uuids = zip(*parsed_lines) if parsed_lines else (tuple(), tuple())
# Get the list of devices specified by CUDA_VISIBLE_DEVICES, if any
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
visible_devices = cuda_visible_devices.split(',') if cuda_visible_devices else []
for device in visible_devices:
if device not in indices and not any(uuid.startswith(device) for uuid in uuids):
perror(f"{device} not found")
return None
os.environ["CUDA_VISIBLE_DEVICES"] = devices
configured, unconfigured = find_in_cdi(visible_devices + ["all"])
if unconfigured and "all" not in configured:
perror(f"No CDI configuration found for {','.join(unconfigured)}")
perror("You can use the \"nvidia-ctk cdi generate\" command from the ")
perror("nvidia-container-toolkit to generate a CDI configuration.")
perror("See ramalama-cuda(7).")
return None
elif configured:
if "all" in configured:
configured.remove("all")
if not configured:
configured = indices
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(configured)
return "cuda"
except Exception:
pass
return None
@ -403,9 +350,6 @@ def check_intel() -> Literal["intel"] | None:
intel_gpus = (
b"0xe20b",
b"0xe20c",
b"0x46a6",
b"0x46a8",
b"0x46aa",
b"0x56a0",
b"0x56a1",
b"0x7d51",
@ -622,7 +566,7 @@ def accel_image(config: Config) -> str:
vers = minor_release()
should_pull = config.pull in ["always", "missing"] and not config.dryrun
should_pull = config.pull in ["always", "missing"]
if attempt_to_use_versioned(config.engine, image, vers, True, should_pull):
return f"{image}:{vers}"

View File

@ -98,7 +98,6 @@ class BaseConfig:
default_image: str = DEFAULT_IMAGE
user: UserConfig = field(default_factory=UserConfig)
selinux: bool = False
dryrun: bool = False
settings: RamalamaSettings = field(default_factory=RamalamaSettings)
def __post_init__(self):

View File

@ -216,10 +216,6 @@ def containers(args):
conman_args = [conman, "ps", "-a", "--filter", "label=ai.ramalama"]
if getattr(args, "noheading", False):
if conman == "docker" and not args.format:
# implement --noheading by using --format
conman_args += ["--format={{.ID}} {{.Image}} {{.Command}} {{.CreatedAt}} {{.Status}} {{.Ports}} {{.Names}}"]
else:
conman_args += ["--noheading"]
if getattr(args, "notrunc", False):

View File

@ -15,7 +15,7 @@ from ramalama.model_store.snapshot_file import SnapshotFileType
missing_huggingface = """
Optional: Huggingface models require the huggingface-cli module.
This module can be installed via PyPI tools like uv, pip, pip3, pipx, or via
This module can be installed via PyPi tools like uv, pip, pip3, pipx, or via
distribution package managers like dnf or apt. Example:
uv pip install huggingface_hub
"""

View File

@ -12,7 +12,7 @@ from ramalama.model_store.snapshot_file import SnapshotFileType
missing_modelscope = """
Optional: ModelScope models require the modelscope module.
This module can be installed via PyPI tools like uv, pip, pip3, pipx, or via
This module can be installed via PyPi tools like uv, pip, pip3, pipx, or via
distribution package managers like dnf or apt. Example:
uv pip install modelscope
"""

View File

@ -31,10 +31,9 @@ BuildRequires: make
BuildRequires: python3-devel
BuildRequires: podman
BuildRequires: python3-pytest
BuildRequires: mailcap
Provides: python3-ramalama = %{version}-%{release}
Obsoletes: python3-ramalama < 0.11.0-1
Obsoletes: python3-ramalama < 0.10.1-2
Requires: podman
@ -56,12 +55,14 @@ will run the AI Models within a container based on the OCI image.
%forgeautosetup -p1
%build
make docs
%pyproject_wheel
%{__make} docs
%install
%pyproject_install
%pyproject_save_files -l %{pypi_name}
%{__make} DESTDIR=%{buildroot} PREFIX=%{_prefix} install-docs install-shortnames
%{__make} DESTDIR=%{buildroot} PREFIX=%{_prefix} install-completions
%check
%pytest -v test/unit

View File

@ -34,9 +34,9 @@
"merlinite-lab:7b" = "huggingface://instructlab/merlinite-7b-lab-GGUF/merlinite-7b-lab-Q4_K_M.gguf"
"merlinite-lab-7b" = "huggingface://instructlab/merlinite-7b-lab-GGUF/merlinite-7b-lab-Q4_K_M.gguf"
"tiny" = "ollama://tinyllama"
"mistral" = "hf://lmstudio-community/Mistral-7B-Instruct-v0.3-GGUF/Mistral-7B-Instruct-v0.3-Q4_K_M.gguf"
"mistral:7b" = "hf://lmstudio-community/Mistral-7B-Instruct-v0.3-GGUF/Mistral-7B-Instruct-v0.3-Q4_K_M.gguf"
"mistral:7b-v3" = "hf://lmstudio-community/Mistral-7B-Instruct-v0.3-GGUF/Mistral-7B-Instruct-v0.3-Q4_K_M.gguf"
"mistral" = "huggingface://TheBloke/Mistral-7B-Instruct-v0.2-GGUF/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
"mistral:7b" = "huggingface://TheBloke/Mistral-7B-Instruct-v0.2-GGUF/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
"mistral:7b-v3" = "huggingface://MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf"
"mistral:7b-v2" = "huggingface://TheBloke/Mistral-7B-Instruct-v0.2-GGUF/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
"mistral:7b-v1" = "huggingface://TheBloke/Mistral-7B-Instruct-v0.1-GGUF/mistral-7b-instruct-v0.1.Q5_K_M.gguf"
"mistral-small3.1" = "hf://bartowski/mistralai_Mistral-Small-3.1-24B-Instruct-2503-GGUF/mistralai_Mistral-Small-3.1-24B-Instruct-2503-IQ2_M.gguf"

View File

@ -14,7 +14,7 @@ EOF
if is_container; then
run_ramalama info
conman=$(jq -r .Engine.Name <<< $output)
conman=$(jq .Engine.Name <<< $output | tr -d '"' )
verify_begin="${conman} run --rm"
run_ramalama -q --dryrun run ${MODEL}
@ -140,7 +140,7 @@ EOF
skip_if_docker
run_ramalama 22 run --image bogus --pull=never tiny
is "$output" ".*Error: bogus: image not known"
run_ramalama 125 run --image bogus1 --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama 125 run --image bogus1 --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*Error: bogus1: image not known"
}
@ -148,10 +148,13 @@ EOF
skip_if_nocontainer
skip_if_darwin
skip_if_docker
run_ramalama --dryrun run --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama 125 --dryrun run --rag quay.io/ramalama/rag --pull=never tiny
is "$output" "Error: quay.io/ramalama/rag: image not known.*"
run_ramalama --dryrun run --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*quay.io/ramalama/.*-rag:"
run_ramalama --dryrun run --image quay.io/ramalama/ramalama:1.0 --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama --dryrun run --image quay.io/ramalama/ramalama:1.0 --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*quay.io/ramalama/ramalama:1.0"
}

View File

@ -115,6 +115,7 @@ verify_begin=".*run --rm"
}
@test "ramalama serve and stop" {
skip "Seems to cause race conditions"
skip_if_nocontainer
model=ollama://smollm:135m
@ -122,15 +123,17 @@ verify_begin=".*run --rm"
container2=c_$(safename)
run_ramalama serve --name ${container1} --detach ${model}
cid="$output"
run_ramalama info
conmon=$(jq .Engine <<< $output)
run -0 ${conman} inspect1 $cid
run_ramalama ps
is "$output" ".*${container1}" "list correct for container1"
run_ramalama ps --format '{{.Ports}}'
port=${output: -8:4}
run_ramalama chat --ls --url http://127.0.0.1:${port}/v1
is "$output" "smollm:135m" "list of models available correct"
run_ramalama chat --ls
is "$output" "ollama://smollm:135m" "list of models available correct"
run_ramalama containers --noheading
is "$output" ".*${container1}" "list correct for container1"
@ -148,6 +151,7 @@ verify_begin=".*run --rm"
}
@test "ramalama --detach serve multiple" {
skip "Seems to cause race conditions"
skip_if_nocontainer
model=ollama://smollm:135m
@ -345,7 +349,7 @@ verify_begin=".*run --rm"
rm /tmp/$name.yaml
}
@test "ramalama serve --api llama-stack" {
@test "ramalama serve --api llama-stack --generate=kube:/tmp" {
skip_if_docker
skip_if_nocontainer
model=tiny
@ -389,7 +393,7 @@ verify_begin=".*run --rm"
run_ramalama 125 serve --image bogus --pull=never tiny
is "$output" "Error: bogus: image not known"
run_ramalama 125 serve --image bogus1 --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama 125 serve --image bogus1 --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*Error: bogus1: image not known"
}
@ -398,10 +402,13 @@ verify_begin=".*run --rm"
skip_if_darwin
skip_if_docker
run_ramalama ? stop ${name}
run_ramalama --dryrun serve --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama ? --dryrun serve --rag quay.io/ramalama/rag --pull=never tiny
is "$output" ".*Error: quay.io/ramalama/rag: image not known"
run_ramalama --dryrun serve --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*quay.io/ramalama/.*-rag:"
run_ramalama --dryrun serve --image quay.io/ramalama/ramalama:1.0 --rag quay.io/ramalama/rag --pull=never tiny
run_ramalama --dryrun serve --image quay.io/ramalama/ramalama:1.0 --rag quay.io/ramalama/testrag --pull=never tiny
is "$output" ".*quay.io/ramalama/ramalama:1.0"
}

Some files were not shown because too many files have changed in this diff Show More