231 lines
6.2 KiB
Go
231 lines
6.2 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
package memorylimiter
|
|
|
|
import (
|
|
"runtime"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
"go.uber.org/zap"
|
|
|
|
"go.opentelemetry.io/collector/internal/memorylimiter/iruntime"
|
|
)
|
|
|
|
// TestMemoryPressureResponse manipulates results from querying memory and
|
|
// check expected side effects.
|
|
func TestMemoryPressureResponse(t *testing.T) {
|
|
var currentMemAlloc uint64
|
|
cfg := &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MemoryLimitMiB: 1024,
|
|
MemorySpikeLimitMiB: 0,
|
|
}
|
|
ml, err := NewMemoryLimiter(cfg, zap.NewNop())
|
|
require.NoError(t, err)
|
|
ml.readMemStatsFn = func(ms *runtime.MemStats) {
|
|
ms.Alloc = currentMemAlloc * mibBytes
|
|
}
|
|
|
|
// Below memAllocLimit.
|
|
currentMemAlloc = 800
|
|
ml.CheckMemLimits()
|
|
assert.False(t, ml.MustRefuse())
|
|
|
|
// Above memAllocLimit.
|
|
currentMemAlloc = 1800
|
|
ml.CheckMemLimits()
|
|
assert.True(t, ml.MustRefuse())
|
|
|
|
// Check spike limit
|
|
ml.usageChecker.memSpikeLimit = 512 * mibBytes
|
|
|
|
// Below memSpikeLimit.
|
|
currentMemAlloc = 500
|
|
ml.CheckMemLimits()
|
|
assert.False(t, ml.MustRefuse())
|
|
|
|
// Above memSpikeLimit.
|
|
currentMemAlloc = 550
|
|
ml.CheckMemLimits()
|
|
assert.True(t, ml.MustRefuse())
|
|
}
|
|
|
|
func TestGetDecision(t *testing.T) {
|
|
t.Run("fixed_limit", func(t *testing.T) {
|
|
d, err := getMemUsageChecker(&Config{MemoryLimitMiB: 100, MemorySpikeLimitMiB: 20}, zap.NewNop())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, &memUsageChecker{
|
|
memAllocLimit: 100 * mibBytes,
|
|
memSpikeLimit: 20 * mibBytes,
|
|
}, d)
|
|
})
|
|
|
|
t.Cleanup(func() {
|
|
GetMemoryFn = iruntime.TotalMemory
|
|
})
|
|
GetMemoryFn = func() (uint64, error) {
|
|
return 100 * mibBytes, nil
|
|
}
|
|
t.Run("percentage_limit", func(t *testing.T) {
|
|
d, err := getMemUsageChecker(&Config{MemoryLimitPercentage: 50, MemorySpikePercentage: 10}, zap.NewNop())
|
|
require.NoError(t, err)
|
|
assert.Equal(t, &memUsageChecker{
|
|
memAllocLimit: 50 * mibBytes,
|
|
memSpikeLimit: 10 * mibBytes,
|
|
}, d)
|
|
})
|
|
}
|
|
|
|
func TestRefuseDecision(t *testing.T) {
|
|
decision1000Limit30Spike30 := newPercentageMemUsageChecker(1000, 60, 30)
|
|
decision1000Limit60Spike50 := newPercentageMemUsageChecker(1000, 60, 50)
|
|
decision1000Limit40Spike20 := newPercentageMemUsageChecker(1000, 40, 20)
|
|
|
|
tests := []struct {
|
|
name string
|
|
usageChecker memUsageChecker
|
|
ms *runtime.MemStats
|
|
shouldRefuse bool
|
|
}{
|
|
{
|
|
name: "should refuse over limit",
|
|
usageChecker: *decision1000Limit30Spike30,
|
|
ms: &runtime.MemStats{Alloc: 600},
|
|
shouldRefuse: true,
|
|
},
|
|
{
|
|
name: "should not refuse",
|
|
usageChecker: *decision1000Limit30Spike30,
|
|
ms: &runtime.MemStats{Alloc: 100},
|
|
shouldRefuse: false,
|
|
},
|
|
{
|
|
name: "should not refuse spike, fixed usageChecker",
|
|
usageChecker: memUsageChecker{
|
|
memAllocLimit: 600,
|
|
memSpikeLimit: 500,
|
|
},
|
|
ms: &runtime.MemStats{Alloc: 300},
|
|
shouldRefuse: true,
|
|
},
|
|
{
|
|
name: "should refuse, spike, percentage usageChecker",
|
|
usageChecker: *decision1000Limit60Spike50,
|
|
ms: &runtime.MemStats{Alloc: 300},
|
|
shouldRefuse: true,
|
|
},
|
|
{
|
|
name: "should refuse, spike, percentage usageChecker",
|
|
usageChecker: *decision1000Limit40Spike20,
|
|
ms: &runtime.MemStats{Alloc: 250},
|
|
shouldRefuse: true,
|
|
},
|
|
}
|
|
for _, test := range tests {
|
|
t.Run(test.name, func(t *testing.T) {
|
|
shouldRefuse := test.usageChecker.aboveSoftLimit(test.ms)
|
|
assert.Equal(t, test.shouldRefuse, shouldRefuse)
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestCallGCWhenSoftLimit(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
mlCfg *Config
|
|
memAllocMiB [2]uint64
|
|
numGCs int
|
|
}{
|
|
{
|
|
name: "GC when first soft limit and not immediately",
|
|
mlCfg: &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MinGCIntervalWhenSoftLimited: 10 * time.Second,
|
|
MemoryLimitMiB: 50,
|
|
MemorySpikeLimitMiB: 10,
|
|
},
|
|
memAllocMiB: [2]uint64{45, 45},
|
|
numGCs: 1,
|
|
},
|
|
{
|
|
name: "GC always when soft limit min interval is 0",
|
|
mlCfg: &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MinGCIntervalWhenSoftLimited: 0,
|
|
MemoryLimitMiB: 50,
|
|
MemorySpikeLimitMiB: 10,
|
|
},
|
|
memAllocMiB: [2]uint64{45, 45},
|
|
numGCs: 2,
|
|
},
|
|
{
|
|
name: "GC when first hard limit and not immediately",
|
|
mlCfg: &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MinGCIntervalWhenHardLimited: 10 * time.Second,
|
|
MemoryLimitMiB: 50,
|
|
MemorySpikeLimitMiB: 10,
|
|
},
|
|
memAllocMiB: [2]uint64{55, 55},
|
|
numGCs: 1,
|
|
},
|
|
{
|
|
name: "GC always when hard limit min interval is 0",
|
|
mlCfg: &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MinGCIntervalWhenHardLimited: 0,
|
|
MemoryLimitMiB: 50,
|
|
MemorySpikeLimitMiB: 10,
|
|
},
|
|
memAllocMiB: [2]uint64{55, 55},
|
|
numGCs: 2,
|
|
},
|
|
{
|
|
name: "GC based on soft then based on hard limit",
|
|
mlCfg: &Config{
|
|
CheckInterval: 1 * time.Minute,
|
|
MinGCIntervalWhenSoftLimited: 10 * time.Second,
|
|
MinGCIntervalWhenHardLimited: 0,
|
|
MemoryLimitMiB: 50,
|
|
MemorySpikeLimitMiB: 10,
|
|
},
|
|
memAllocMiB: [2]uint64{45, 55},
|
|
numGCs: 2,
|
|
},
|
|
}
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
ml, err := NewMemoryLimiter(tt.mlCfg, zap.NewNop())
|
|
require.NoError(t, err)
|
|
memAllocMiB := uint64(0)
|
|
ml.readMemStatsFn = func(ms *runtime.MemStats) {
|
|
ms.Alloc = memAllocMiB * mibBytes
|
|
}
|
|
// Mark last GC in the past so that even first call can trigger GC
|
|
// Not updating the initialization code, since at the beginning of the collector no need to GC.
|
|
ml.lastGCDone = ml.lastGCDone.Add(-time.Minute)
|
|
numGCs := 0
|
|
ml.runGCFn = func() {
|
|
numGCs++
|
|
}
|
|
|
|
memAllocMiB = tt.memAllocMiB[0]
|
|
ml.CheckMemLimits()
|
|
assert.True(t, ml.MustRefuse())
|
|
|
|
// On windows, time has larger precision, and checking here again may return same time as "lastGCDone"
|
|
// which will not trigger a new GC for 0 duration, update last GC with -1 millis.
|
|
ml.lastGCDone = ml.lastGCDone.Add(-1 * time.Millisecond)
|
|
memAllocMiB = tt.memAllocMiB[1]
|
|
ml.CheckMemLimits()
|
|
assert.True(t, ml.MustRefuse())
|
|
|
|
assert.Equal(t, tt.numGCs, numGCs)
|
|
})
|
|
}
|
|
}
|