test: add the e2e test cases for download & prefetch rate limit (#3757)
Signed-off-by: chlins <chlins.zhang@gmail.com>
This commit is contained in:
parent
d7f9e91586
commit
f41058c7fe
|
|
@ -33,18 +33,22 @@ jobs:
|
||||||
image: manager
|
image: manager
|
||||||
image-tag: v2.1.65
|
image-tag: v2.1.65
|
||||||
chart-name: manager
|
chart-name: manager
|
||||||
|
skip: "Rate Limit"
|
||||||
- module: scheduler
|
- module: scheduler
|
||||||
image: scheduler
|
image: scheduler
|
||||||
image-tag: v2.1.65
|
image-tag: v2.1.65
|
||||||
chart-name: scheduler
|
chart-name: scheduler
|
||||||
|
skip: "Rate Limit"
|
||||||
- module: client
|
- module: client
|
||||||
image: client
|
image: client
|
||||||
image-tag: v0.2.4
|
image-tag: v0.2.4
|
||||||
chart-name: client
|
chart-name: client
|
||||||
|
skip: "Rate Limit"
|
||||||
- module: seed-client
|
- module: seed-client
|
||||||
image: client
|
image: client
|
||||||
image-tag: v0.2.4
|
image-tag: v0.2.4
|
||||||
chart-name: seed-client
|
chart-name: seed-client
|
||||||
|
skip: "Rate Limit"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Free Disk Space (Ubuntu)
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,153 @@
|
||||||
|
name: E2E Test With Rate Limit(API v2 - Rust Client)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main, release-*]
|
||||||
|
paths-ignore: ["**.md", "**.png", "**.jpg", "**.svg", "**/docs/**"]
|
||||||
|
pull_request:
|
||||||
|
branches: [main, release-*]
|
||||||
|
paths-ignore: ["**.md", "**.png", "**.jpg", "**.svg", "**/docs/**"]
|
||||||
|
schedule:
|
||||||
|
- cron: '0 4 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
env:
|
||||||
|
KIND_VERSION: v0.12.0
|
||||||
|
CONTAINERD_VERSION: v1.5.2
|
||||||
|
KIND_CONFIG_PATH: test/testdata/kind/config-v2.yaml
|
||||||
|
DRAGONFLY_CHARTS_PATH: deploy/helm-charts/charts/dragonfly
|
||||||
|
DRAGONFLY_FILE_SERVER_PATH: test/testdata/k8s/dufs.yaml
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
e2e_tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 60
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
module:
|
||||||
|
- "normal"
|
||||||
|
include:
|
||||||
|
- module: normal
|
||||||
|
charts-config: test/testdata/charts/config-v2-rate-limit.yaml
|
||||||
|
focus: "Rate Limit"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be
|
||||||
|
with:
|
||||||
|
tool-cache: false
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
|
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Install Go
|
||||||
|
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
|
||||||
|
- name: Get dependencies
|
||||||
|
run: |
|
||||||
|
go install github.com/onsi/ginkgo/v2/ginkgo@v2.22.1
|
||||||
|
mkdir -p /tmp/artifact
|
||||||
|
|
||||||
|
- name: Setup buildx
|
||||||
|
uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db
|
||||||
|
id: buildx
|
||||||
|
with:
|
||||||
|
install: true
|
||||||
|
|
||||||
|
- name: Cache Docker layers
|
||||||
|
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
|
|
||||||
|
- name: Pull Rust Client Image
|
||||||
|
run: |
|
||||||
|
cd client-rs
|
||||||
|
CLIENT_TAG=$(git describe --tags $(git rev-parse HEAD))
|
||||||
|
docker pull dragonflyoss/client:$CLIENT_TAG
|
||||||
|
docker tag dragonflyoss/client:$CLIENT_TAG dragonflyoss/client:latest
|
||||||
|
docker pull dragonflyoss/dfinit:$CLIENT_TAG
|
||||||
|
docker tag dragonflyoss/dfinit:$CLIENT_TAG dragonflyoss/dfinit:latest
|
||||||
|
|
||||||
|
- name: Build Scheduler Image
|
||||||
|
uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: build/images/scheduler/Dockerfile
|
||||||
|
push: false
|
||||||
|
load: true
|
||||||
|
tags: dragonflyoss/scheduler:latest
|
||||||
|
cache-from: type=local,src=/tmp/.buildx-cache
|
||||||
|
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||||
|
|
||||||
|
- name: Build Manager Image
|
||||||
|
uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: build/images/manager/Dockerfile
|
||||||
|
push: false
|
||||||
|
load: true
|
||||||
|
tags: dragonflyoss/manager:latest
|
||||||
|
cache-from: type=local,src=/tmp/.buildx-cache
|
||||||
|
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||||
|
|
||||||
|
- name: Setup Kind
|
||||||
|
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||||
|
with:
|
||||||
|
version: ${{ env.KIND_VERSION }}
|
||||||
|
config: ${{ env.KIND_CONFIG_PATH }}
|
||||||
|
cluster_name: kind
|
||||||
|
|
||||||
|
- name: Kind load images
|
||||||
|
run: |
|
||||||
|
kind load docker-image dragonflyoss/manager:latest
|
||||||
|
kind load docker-image dragonflyoss/scheduler:latest
|
||||||
|
kind load docker-image dragonflyoss/client:latest
|
||||||
|
kind load docker-image dragonflyoss/dfinit:latest
|
||||||
|
|
||||||
|
- name: Setup dragonfly
|
||||||
|
run: |
|
||||||
|
helm install --wait --timeout 15m --dependency-update --create-namespace --namespace dragonfly-system -f ${{ matrix.charts-config }} dragonfly ${{ env.DRAGONFLY_CHARTS_PATH }}
|
||||||
|
mkdir -p /tmp/artifact/dufs && chmod 777 /tmp/artifact/dufs
|
||||||
|
kubectl apply -f ${{ env.DRAGONFLY_FILE_SERVER_PATH }}
|
||||||
|
kubectl wait po dufs-0 --namespace dragonfly-e2e --for=condition=ready --timeout=10m
|
||||||
|
|
||||||
|
- name: Run E2E test
|
||||||
|
run: |
|
||||||
|
ginkgo -v -r --race --fail-fast --cover --trace --show-node-events --focus=${{ matrix.focus }} test/e2e/v2
|
||||||
|
cat coverprofile.out >> coverage.txt
|
||||||
|
|
||||||
|
- name: Move cache
|
||||||
|
run: |
|
||||||
|
rm -rf /tmp/.buildx-cache
|
||||||
|
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||||
|
|
||||||
|
- name: Upload coverage to Codecov
|
||||||
|
uses: codecov/codecov-action@1e68e06f1dbfde0e4cefc87efeba9e4643565303
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
files: ./coverage.txt
|
||||||
|
flags: e2etests
|
||||||
|
|
||||||
|
- name: Upload Logs
|
||||||
|
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.module }}-e2e-tests-logs
|
||||||
|
path: |
|
||||||
|
/tmp/artifact/**
|
||||||
|
|
@ -31,7 +31,8 @@ jobs:
|
||||||
include:
|
include:
|
||||||
- module: normal
|
- module: normal
|
||||||
charts-config: test/testdata/charts/config-v2.yaml
|
charts-config: test/testdata/charts/config-v2.yaml
|
||||||
skip: ""
|
# rate limit will be tested in another job
|
||||||
|
skip: "Rate Limit"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Free Disk Space (Ubuntu)
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,329 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2025 The Dragonfly Authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo/v2" //nolint
|
||||||
|
. "github.com/onsi/gomega" //nolint
|
||||||
|
|
||||||
|
"d7y.io/dragonfly/v2/test/e2e/v2/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
The rate limit configuration is located in the `test/testdata/charts/config-v2-rate-limit.yaml`.
|
||||||
|
By default, the rate limit is set to `1MiB`. However, due to the nature of the download process,
|
||||||
|
where pieces are downloaded concurrently, the rate-limiting mechanism cannot achieve
|
||||||
|
100% precision. As a result, the actual download speed will may exceed the theoretical
|
||||||
|
rate limit.
|
||||||
|
|
||||||
|
To ensure the stability of the following end-to-end (e2e) tests, the theoretical download
|
||||||
|
time is adjusted by multiplying it with a factor of 0.5. This adjustment accounts for
|
||||||
|
the discrepancy between the theoretical and actual speeds.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
- If the rate limit is set to 1 MiB/s and the file size is 100 MiB, the theoretical
|
||||||
|
download time is `100 MiB / 1 MiB/s = 100 seconds`.
|
||||||
|
|
||||||
|
Here are the expected download times(JettiFactor: 0.5):
|
||||||
|
|
||||||
|
Minimum expected download time: 100 seconds * (1-0.5) = 50 seconds
|
||||||
|
Maximum expected download time: 100 seconds * (1+0.5) = 150 seconds
|
||||||
|
|
||||||
|
|
||||||
|
This adjustment ensures that the e2e tests remain consistent and reliable while accounting
|
||||||
|
for the inherent imprecision of the rate-limiting mechanism.
|
||||||
|
*/
|
||||||
|
|
||||||
|
var _ = Describe("Download Using Dfget With Rate Limit", func() {
|
||||||
|
Context("50MiB file", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize10MiB * 5)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should download successfully in over 25 seconds", func() {
|
||||||
|
clientPod, err := util.ClientExec()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
startAt := time.Now()
|
||||||
|
out, err := clientPod.Command("sh", "-c", fmt.Sprintf("dfget %s --disable-back-to-source --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
elapsed := time.Since(startAt)
|
||||||
|
fmt.Println(string(out), err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(elapsed).Should(BeNumerically(">", 25*time.Second))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, testFile.GetTaskID())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
sha256sum, err = util.CalculateSha256ByOutput([]*util.PodExec{clientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("100MiB file", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize100MiB)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should download successfully in over 50 seconds", func() {
|
||||||
|
clientPod, err := util.ClientExec()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
startAt := time.Now()
|
||||||
|
out, err := clientPod.Command("sh", "-c", fmt.Sprintf("dfget %s --disable-back-to-source --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
elapsed := time.Since(startAt)
|
||||||
|
fmt.Println(string(out), err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(elapsed).Should(BeNumerically(">", 50*time.Second))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, testFile.GetTaskID())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
sha256sum, err = util.CalculateSha256ByOutput([]*util.PodExec{clientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = Describe("Download Using Proxy With Rate Limit", func() {
|
||||||
|
Context("50MiB file", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize10MiB * 5)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should download successfully in over 25 seconds", Label("proxy", "download"), func() {
|
||||||
|
clientPod, err := util.ClientExec()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
startAt := time.Now()
|
||||||
|
out, err := clientPod.Command("sh", "-c", fmt.Sprintf("curl -x 127.0.0.1:4001 -H 'X-Dragonfly-Tag: proxy' %s --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
elapsed := time.Since(startAt)
|
||||||
|
fmt.Println(string(out), err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(elapsed).Should(BeNumerically(">", 25*time.Second))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, testFile.GetTaskID(util.WithTaskIDTag("proxy")))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
sha256sum, err = util.CalculateSha256ByOutput([]*util.PodExec{clientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("100MiB file", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize100MiB)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should download successfully in over 50 seconds", Label("proxy", "download"), func() {
|
||||||
|
clientPod, err := util.ClientExec()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
startAt := time.Now()
|
||||||
|
out, err := clientPod.Command("sh", "-c", fmt.Sprintf("curl -x 127.0.0.1:4001 -H 'X-Dragonfly-Tag: proxy' %s --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
elapsed := time.Since(startAt)
|
||||||
|
fmt.Println(string(out), err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(elapsed).Should(BeNumerically(">", 50*time.Second))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{clientPod}, testFile.GetTaskID(util.WithTaskIDTag("proxy")))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
sha256sum, err = util.CalculateSha256ByOutput([]*util.PodExec{clientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetSha256()).To(Equal(sha256sum))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = Describe("Download Using Prefetch Proxy With Rate Limit", func() {
|
||||||
|
Context("50MiB file and set range header bytes=100-200", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize10MiB * 5)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("download should be ok", Label("prefetch-proxy", "download", "range: bytes=100-200"), func() {
|
||||||
|
seedClientPod, err := util.SeedClientExec(0)
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
out, err := seedClientPod.Command("sh", "-c", fmt.Sprintf("curl -x 127.0.0.1:4001 -r 100-200 -H 'X-Dragonfly-Tag: prefetch-proxy-bytes-100-200' %s --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
fmt.Println(string(out))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByOutput([]*util.PodExec{seedClientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetRangeSha256("100-200", testFile.GetInfo().Size())).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
// Prefetch should not be completed within 25 seconds.
|
||||||
|
Consistently(func() error {
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{seedClientPod}, testFile.GetTaskID(util.WithTaskIDTag("prefetch-proxy-bytes-100-200")))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Prefetch should not be completed, so the sha256sum should not be equal.
|
||||||
|
if testFile.GetSha256() == sha256sum {
|
||||||
|
return errors.New("prefetch should not be completed, but it seems done as the sha256sum is equal")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, 25*time.Second, 5*time.Second).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
|
// Prefetch should be eventually completed within 75 seconds(so wait for another 50 seconds).
|
||||||
|
Eventually(func() string {
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{seedClientPod}, testFile.GetTaskID(util.WithTaskIDTag("prefetch-proxy-bytes-100-200")))
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
// Eventually, the sha256sum should be equal.
|
||||||
|
return sha256sum
|
||||||
|
}, 50*time.Second, 5*time.Second).Should(Equal(testFile.GetSha256()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("100MiB file and set range header bytes=100-200", func() {
|
||||||
|
var (
|
||||||
|
testFile *util.File
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
testFile, err = util.GetFileServer().GenerateFile(util.FileSize100MiB)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
err = util.GetFileServer().DeleteFile(testFile.GetInfo())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("download should be ok", Label("prefetch-proxy", "download", "range: bytes=100-200"), func() {
|
||||||
|
seedClientPod, err := util.SeedClientExec(0)
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
out, err := seedClientPod.Command("sh", "-c", fmt.Sprintf("curl -x 127.0.0.1:4001 -r 100-200 -H 'X-Dragonfly-Tag: prefetch-proxy-bytes-100-200' %s --output %s", testFile.GetDownloadURL(), testFile.GetOutputPath())).CombinedOutput()
|
||||||
|
fmt.Println(err)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
fmt.Println(string(out))
|
||||||
|
|
||||||
|
sha256sum, err := util.CalculateSha256ByOutput([]*util.PodExec{seedClientPod}, testFile.GetOutputPath())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(testFile.GetRangeSha256("100-200", testFile.GetInfo().Size())).To(Equal(sha256sum))
|
||||||
|
|
||||||
|
// Prefetch should not be completed within 80 seconds.
|
||||||
|
Consistently(func() error {
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{seedClientPod}, testFile.GetTaskID(util.WithTaskIDTag("prefetch-proxy-bytes-100-200")))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Prefetch should not be completed, so the sha256sum should not be equal.
|
||||||
|
if testFile.GetSha256() == sha256sum {
|
||||||
|
return errors.New("prefetch should not be completed, but it seems done as the sha256sum is equal")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, 50*time.Second, 10*time.Second).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
|
// Prefetch should be eventually completed within 150 seconds(so wait for another 100 seconds).
|
||||||
|
Eventually(func() string {
|
||||||
|
sha256sum, err := util.CalculateSha256ByTaskID([]*util.PodExec{seedClientPod}, testFile.GetTaskID(util.WithTaskIDTag("prefetch-proxy-bytes-100-200")))
|
||||||
|
if err != nil {
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
// Eventually, the sha256sum should be equal.
|
||||||
|
return sha256sum
|
||||||
|
}, 100*time.Second, 10*time.Second).Should(Equal(testFile.GetSha256()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
@ -0,0 +1,178 @@
|
||||||
|
manager:
|
||||||
|
image:
|
||||||
|
repository: dragonflyoss/manager
|
||||||
|
tag: latest
|
||||||
|
replicas: 1
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "0"
|
||||||
|
memory: "0"
|
||||||
|
limits:
|
||||||
|
cpu: "1"
|
||||||
|
memory: "2Gi"
|
||||||
|
extraVolumeMounts:
|
||||||
|
- name: logs
|
||||||
|
mountPath: "/var/log/"
|
||||||
|
- name: artifact
|
||||||
|
mountPath: /tmp/artifact
|
||||||
|
extraVolumes:
|
||||||
|
- name: logs
|
||||||
|
emptyDir: { }
|
||||||
|
- name: artifact
|
||||||
|
hostPath:
|
||||||
|
path: /tmp/artifact
|
||||||
|
metrics:
|
||||||
|
enable: true
|
||||||
|
config:
|
||||||
|
console: false
|
||||||
|
verbose: true
|
||||||
|
job:
|
||||||
|
rateLimit:
|
||||||
|
fillInterval: 1m
|
||||||
|
capacity: 100
|
||||||
|
quantum: 100
|
||||||
|
|
||||||
|
scheduler:
|
||||||
|
image:
|
||||||
|
repository: dragonflyoss/scheduler
|
||||||
|
tag: latest
|
||||||
|
replicas: 3
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "0"
|
||||||
|
memory: "0"
|
||||||
|
limits:
|
||||||
|
cpu: "2"
|
||||||
|
memory: "4Gi"
|
||||||
|
service:
|
||||||
|
type: NodePort
|
||||||
|
nodePort: 30802
|
||||||
|
extraVolumeMounts:
|
||||||
|
- name: logs
|
||||||
|
mountPath: "/var/log/"
|
||||||
|
- name: artifact
|
||||||
|
mountPath: /tmp/artifact
|
||||||
|
extraVolumes:
|
||||||
|
- name: logs
|
||||||
|
emptyDir: { }
|
||||||
|
- name: artifact
|
||||||
|
hostPath:
|
||||||
|
path: /tmp/artifact
|
||||||
|
metrics:
|
||||||
|
enable: true
|
||||||
|
enableHost: true
|
||||||
|
config:
|
||||||
|
console: false
|
||||||
|
verbose: true
|
||||||
|
scheduler:
|
||||||
|
gc:
|
||||||
|
hostGCInterval: 2m
|
||||||
|
|
||||||
|
seedClient:
|
||||||
|
enable: true
|
||||||
|
replicas: 3
|
||||||
|
image:
|
||||||
|
repository: dragonflyoss/client
|
||||||
|
tag: latest
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "0"
|
||||||
|
memory: "0"
|
||||||
|
limits:
|
||||||
|
cpu: "2"
|
||||||
|
memory: "4Gi"
|
||||||
|
extraVolumeMounts:
|
||||||
|
- name: logs
|
||||||
|
mountPath: "/var/log/"
|
||||||
|
- name: artifact
|
||||||
|
mountPath: /tmp/artifact
|
||||||
|
extraVolumes:
|
||||||
|
- name: logs
|
||||||
|
emptyDir: { }
|
||||||
|
- name: artifact
|
||||||
|
hostPath:
|
||||||
|
path: /tmp/artifact
|
||||||
|
config:
|
||||||
|
download:
|
||||||
|
rateLimit: 1MiB
|
||||||
|
dynconfig:
|
||||||
|
refreshInterval: 1s
|
||||||
|
scheduler:
|
||||||
|
announceInterval: 1s
|
||||||
|
log:
|
||||||
|
level: debug
|
||||||
|
proxy:
|
||||||
|
prefetch: true
|
||||||
|
prefetchRateLimit: 1MiB
|
||||||
|
registryMirror:
|
||||||
|
addr: https://index.docker.io
|
||||||
|
rules:
|
||||||
|
- regex: blobs/sha256.*
|
||||||
|
- regxe: file-server.*
|
||||||
|
|
||||||
|
client:
|
||||||
|
enable: true
|
||||||
|
image:
|
||||||
|
repository: dragonflyoss/client
|
||||||
|
tag: latest
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "0"
|
||||||
|
memory: "0"
|
||||||
|
limits:
|
||||||
|
cpu: "2"
|
||||||
|
memory: "4Gi"
|
||||||
|
# Allow client daemonSet to create a pod on master node for testing when the daemon goes offline.
|
||||||
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoSchedule"
|
||||||
|
extraVolumeMounts:
|
||||||
|
- name: logs
|
||||||
|
mountPath: "/var/log/"
|
||||||
|
- name: artifact
|
||||||
|
mountPath: /tmp/artifact
|
||||||
|
extraVolumes:
|
||||||
|
- name: logs
|
||||||
|
emptyDir: { }
|
||||||
|
- name: artifact
|
||||||
|
hostPath:
|
||||||
|
path: /tmp/artifact
|
||||||
|
dfinit:
|
||||||
|
enable: true
|
||||||
|
image:
|
||||||
|
repository: dragonflyoss/dfinit
|
||||||
|
tag: latest
|
||||||
|
config:
|
||||||
|
containerRuntime:
|
||||||
|
containerd:
|
||||||
|
configPath: /etc/containerd/config.toml
|
||||||
|
registries:
|
||||||
|
- hostNamespace: docker.io
|
||||||
|
serverAddr: https://index.docker.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
- hostNamespace: ghcr.io
|
||||||
|
serverAddr: https://ghcr.io
|
||||||
|
capabilities: ["pull", "resolve"]
|
||||||
|
config:
|
||||||
|
download:
|
||||||
|
rateLimit: 1MiB
|
||||||
|
dynconfig:
|
||||||
|
refreshInterval: 1s
|
||||||
|
scheduler:
|
||||||
|
announceInterval: 1s
|
||||||
|
log:
|
||||||
|
level: debug
|
||||||
|
proxy:
|
||||||
|
prefetch: false
|
||||||
|
registryMirror:
|
||||||
|
addr: https://index.docker.io
|
||||||
|
rules:
|
||||||
|
- regex: blobs/sha256.*
|
||||||
|
- regxe: file-server.*
|
||||||
|
|
||||||
|
dfdaemon:
|
||||||
|
enable: false
|
||||||
|
|
||||||
|
seedPeer:
|
||||||
|
enable: false
|
||||||
Loading…
Reference in New Issue