Replacing ID generation with WeakReference reuse

First pass at replacing ID generation with WeakReference reuse

In this first version, the Cache<ClassLoader, ID> was replaced with Cache<ClassLoader, WeakReference<ClassLoader>>.

The core cache is still of Cache<TypeCacheKey, TypePool.Resolution> and TypeCacheKey logically remains a composite key of ClassLoader, class name.

The removal of ID assignment means ID exhaustion is no longer na issue, so there's never a need to rebuild the cache.  For that reason, CacheInstance has removed and the core caching logic has been moved into DDCachingPoolStrategy.

While TypeCacheKey remains conceptually the same, the internals have changed somewhat.  The TypeCacheKey now has 3 core fields...
- loaderHash
- loadeRef
- class name

Since loader refs are recycled, the fast path for key equivalence can use reference equivalence of the reference objects.

This change ripples through the CacheProvider-s which also have to store loaderHash and loaderRef.

It may be worth going a step further and switching to a Cache<Loader, TypePool> as well.  That still avoid the creation of many WeakReference-s, since the underlying CacheProvider will hold a canonical WeakReference per ClassLoader.
This commit is contained in:
dougqh 2020-01-27 12:03:44 -05:00
parent cf877f67e5
commit fb871611b5
2 changed files with 228 additions and 262 deletions

View File

@ -4,9 +4,9 @@ import static net.bytebuddy.agent.builder.AgentBuilder.PoolStrategy;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
import java.lang.ref.WeakReference;
import lombok.extern.slf4j.Slf4j;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.dynamic.ClassFileLocator;
@ -43,167 +43,94 @@ import net.bytebuddy.pool.TypePool;
*/
@Slf4j
public class DDCachingPoolStrategy implements PoolStrategy {
// Many things are package visible for testing purposes --
// others to avoid creation of synthetic accessors
static final int CONCURRENCY_LEVEL = 8;
static final int LOADER_CAPACITY = 64;
static final int TYPE_CAPACITY = 64;
static final int BOOTSTRAP_HASH = 0;
/**
* Most of the logic exists in CacheInstance This volatile + exhaustion checking is defense
* against loader ID exhaustion
* Cache of recent ClassLoader WeakReferences; used to...
* <ul>
* <li>Reduced number of WeakReferences created</li>
* <li>Allow for quick fast path equivalence check of composite keys</li>
* </ul>
*/
volatile CacheInstance cacheInstance = new CacheInstance();
final Cache<ClassLoader, WeakReference<ClassLoader>> loaderRefCache =
CacheBuilder.newBuilder()
.weakKeys()
.concurrencyLevel(CONCURRENCY_LEVEL)
.initialCapacity(LOADER_CAPACITY / 2)
.maximumSize(LOADER_CAPACITY)
.build();
@Override
public TypePool typePool(final ClassFileLocator classFileLocator, final ClassLoader classLoader) {
CacheInstance cacheInstance = this.cacheInstance;
TypePool typePool = cacheInstance.typePool(classFileLocator, classLoader);
if (cacheInstance.exhaustedLoaderIdSeq()) {
// If the loader ID sequence is exhausted, drop the prior cache & start over
// The ID space is so large that this shouldn't occur
log.error("cacheInstance exhausted - rebuilding cache");
this.cacheInstance = new CacheInstance();
}
return typePool;
}
/*
* CacheInstance embodies the core of the cache. In general, we only
* expect a single CacheInstance object to ever be created.
*
* However, CacheInstance does provide an extra layer of protection
* against loaderIdSeq exhaustion. If ever the loaderIdSeq of
* CacheInstance is exhausted, then DDCachingPoolStrategy.typePool
* will detect that and discard the CacheInstance.
*
* At that time, a new CacheInstance with a fresh sequence will
* be created in its place.
/**
* Single shared Type.Resolution cache -- uses a composite key --
* conceptually of loader & name
*/
private static final class CacheInstance {
static final int CONCURRENCY_LEVEL = 8;
static final int LOADER_CAPACITY = 64;
static final int TYPE_CAPACITY = 64;
static final long BOOTSTRAP_ID = Long.MIN_VALUE;
static final long START_ID = BOOTSTRAP_ID + 1;
static final long LIMIT_ID = Long.MAX_VALUE - 10;
static final long EXHAUSTED_ID = LIMIT_ID;
// Many things are package visible for testing purposes --
// others to avoid creation of synthetic accessors
/**
* Cache of recent loaderIds: guarantee is that no two loaders are given the same ID; however, a
* loader may be given more than one ID if it falls out the cache.
*/
final Cache<ClassLoader, Long> loaderIdCache =
CacheBuilder.newBuilder()
.weakKeys()
.concurrencyLevel(CONCURRENCY_LEVEL)
.initialCapacity(LOADER_CAPACITY / 2)
.maximumSize(LOADER_CAPACITY)
.build();
/**
* Single shared Type.Resolution cache -- uses a composite key of loader ID & class name The
* initial capacity is set to the maximum capacity to avoid expansion overhead.
*/
final Cache<TypeCacheKey, TypePool.Resolution> sharedResolutionCache =
CacheBuilder.newBuilder()
.softValues()
.concurrencyLevel(CONCURRENCY_LEVEL)
.initialCapacity(TYPE_CAPACITY)
.maximumSize(TYPE_CAPACITY)
.build();
/**
* ID sequence for loaders -- BOOTSTRAP_ID is reserved -- starts higher at START_ID Sequence
* proceeds up until LIMIT_ID at which the sequence and this cacheInstance are considered to be
* exhausted
*/
final AtomicLong loaderIdSeq = new AtomicLong(START_ID);
final Cache<TypeCacheKey, TypePool.Resolution> sharedResolutionCache =
CacheBuilder.newBuilder()
.softValues()
.concurrencyLevel(CONCURRENCY_LEVEL)
.initialCapacity(TYPE_CAPACITY)
.maximumSize(TYPE_CAPACITY)
.build();
/** Fast path for bootstrap */
final SharedResolutionCacheAdapter bootstrapCacheProvider =
new SharedResolutionCacheAdapter(BOOTSTRAP_ID, sharedResolutionCache);
new SharedResolutionCacheAdapter(BOOTSTRAP_HASH, null, sharedResolutionCache);
private final Callable<Long> provisionIdCallable =
new Callable<Long>() {
@Override
public final Long call() throws Exception {
return provisionId();
}
};
final TypePool typePool(
final ClassFileLocator classFileLocator, final ClassLoader classLoader) {
if (classLoader == null) {
return createCachingTypePool(bootstrapCacheProvider, classFileLocator);
}
Long existingId = loaderIdCache.getIfPresent(classLoader);
if (existingId != null) {
return createCachingTypePool(existingId, classFileLocator);
}
if (exhaustedLoaderIdSeq()) {
return createNonCachingTypePool(classFileLocator);
}
long provisionedId = 0;
try {
provisionedId = loaderIdCache.get(classLoader, this.provisionIdCallable);
} catch (ExecutionException e) {
log.error("unexpected exception", e);
return createNonCachingTypePool(classFileLocator);
}
if (provisionedId == EXHAUSTED_ID) {
return createNonCachingTypePool(classFileLocator);
} else {
return createCachingTypePool(provisionedId, classFileLocator);
}
@Override
public final TypePool typePool(
final ClassFileLocator classFileLocator, final ClassLoader classLoader) {
if (classLoader == null) {
return createCachingTypePool(bootstrapCacheProvider, classFileLocator);
}
final boolean exhaustedLoaderIdSeq() {
return (loaderIdSeq.get() >= LIMIT_ID);
WeakReference<ClassLoader> loaderRef = loaderRefCache.getIfPresent(classLoader);
if ( loaderRef == null ) {
loaderRef = new WeakReference<>(classLoader);
loaderRefCache.put(classLoader, loaderRef);
}
final long provisionId() {
do {
long curId = loaderIdSeq.get();
if (curId >= LIMIT_ID) return EXHAUSTED_ID;
int loaderHash = classLoader.hashCode();
return createCachingTypePool(loaderHash, loaderRef, classFileLocator);
}
long newId = curId + 1;
boolean acquired = loaderIdSeq.compareAndSet(curId, newId);
if (acquired) return newId;
} while (!Thread.currentThread().isInterrupted());
private final TypePool createNonCachingTypePool(final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
TypePool.CacheProvider.NoOp.INSTANCE, classFileLocator, TypePool.Default.ReaderMode.FAST);
}
return EXHAUSTED_ID;
}
private final TypePool.CacheProvider createCacheProvider(
final int loaderHash,
final WeakReference<ClassLoader> loaderRef)
{
return new SharedResolutionCacheAdapter(loaderHash, loaderRef, sharedResolutionCache);
}
private final TypePool createNonCachingTypePool(final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
TypePool.CacheProvider.NoOp.INSTANCE, classFileLocator, TypePool.Default.ReaderMode.FAST);
}
private final TypePool createCachingTypePool(
final int loaderHash,
final WeakReference<ClassLoader> loaderRef,
final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
createCacheProvider(loaderHash, loaderRef),
classFileLocator,
TypePool.Default.ReaderMode.FAST);
}
private final TypePool.CacheProvider createCacheProvider(final long loaderId) {
return new SharedResolutionCacheAdapter(loaderId, sharedResolutionCache);
}
private final TypePool createCachingTypePool(
final TypePool.CacheProvider cacheProvider, final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
cacheProvider, classFileLocator, TypePool.Default.ReaderMode.FAST);
}
private final TypePool createCachingTypePool(
final long loaderId, final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
createCacheProvider(loaderId), classFileLocator, TypePool.Default.ReaderMode.FAST);
}
private final TypePool createCachingTypePool(
final TypePool.CacheProvider cacheProvider, final ClassFileLocator classFileLocator) {
return new TypePool.Default.WithLazyResolution(
cacheProvider, classFileLocator, TypePool.Default.ReaderMode.FAST);
}
final long approximateSize() {
return sharedResolutionCache.size();
}
final long approximateSize() {
return sharedResolutionCache.size();
}
/**
@ -211,16 +138,22 @@ public class DDCachingPoolStrategy implements PoolStrategy {
* name.
*/
static final class TypeCacheKey {
private final long cacheId;
private final String name;
private final int loaderHash;
private final WeakReference<ClassLoader> loaderRef;
private final String className;
private final int hashCode;
TypeCacheKey(final long cacheId, final String name) {
this.cacheId = cacheId;
this.name = name;
TypeCacheKey(
final int loaderHash,
final WeakReference<ClassLoader> loaderRef,
final String className)
{
this.loaderHash = loaderHash;
this.loaderRef = loaderRef;
this.className = className;
hashCode = (int) (31 * cacheId) ^ name.hashCode();
hashCode = (int) (31 * this.loaderHash) ^ className.hashCode();
}
@Override
@ -230,10 +163,34 @@ public class DDCachingPoolStrategy implements PoolStrategy {
@Override
public boolean equals(final Object obj) {
if (!(obj instanceof TypeCacheKey)) return false;
if ( !(obj instanceof TypeCacheKey) ) return false;
TypeCacheKey that = (TypeCacheKey) obj;
return (cacheId == that.cacheId) && name.equals(that.name);
TypeCacheKey that = (TypeCacheKey)obj;
if ( loaderHash != that.loaderHash ) return false;
// Fastpath loaderRef equivalence -- works because of WeakReference cache used
// Also covers the bootstrap null loaderRef case
if ( loaderRef == that.loaderRef ) {
// still need to check name
return className.equals(that.className);
} else if ( className.equals(that.className) ) {
// need to perform a deeper loader check -- requires calling Reference.get
// which can strengthened the Reference, so deliberately done last
// If either reference has gone null, they aren't considered equivalent
// Technically, this is a bit of violation of equals semantics, since
// two equivalent references can be not equivalent.
ClassLoader thisLoader = loaderRef.get();
if ( thisLoader == null ) return false;
ClassLoader thatLoader = that.loaderRef.get();
if ( thatLoader == null ) return false;
return (thisLoader == thatLoader);
} else {
return false;
}
}
}
@ -242,22 +199,26 @@ public class DDCachingPoolStrategy implements PoolStrategy {
private static final TypePool.Resolution OBJECT_RESOLUTION =
new TypePool.Resolution.Simple(TypeDescription.OBJECT);
private final long cacheId;
private final int loaderHash;
private final WeakReference<ClassLoader> loaderRef;
private final Cache<TypeCacheKey, TypePool.Resolution> sharedResolutionCache;
SharedResolutionCacheAdapter(
final long cacheId, final Cache<TypeCacheKey, TypePool.Resolution> sharedResolutionCache) {
this.cacheId = cacheId;
final int loaderHash,
final WeakReference<ClassLoader> loaderRef,
final Cache<TypeCacheKey, TypePool.Resolution> sharedResolutionCache) {
this.loaderHash = loaderHash;
this.loaderRef = loaderRef;
this.sharedResolutionCache = sharedResolutionCache;
}
@Override
public TypePool.Resolution find(final String name) {
public TypePool.Resolution find(final String className) {
TypePool.Resolution existingResolution =
sharedResolutionCache.getIfPresent(new TypeCacheKey(cacheId, name));
sharedResolutionCache.getIfPresent(new TypeCacheKey(loaderHash, loaderRef, className));
if (existingResolution != null) return existingResolution;
if (OBJECT_NAME.equals(name)) {
if (OBJECT_NAME.equals(className)) {
return OBJECT_RESOLUTION;
}
@ -265,12 +226,12 @@ public class DDCachingPoolStrategy implements PoolStrategy {
}
@Override
public TypePool.Resolution register(final String name, final TypePool.Resolution resolution) {
if (OBJECT_NAME.equals(name)) {
public TypePool.Resolution register(final String className, final TypePool.Resolution resolution) {
if (OBJECT_NAME.equals(className)) {
return resolution;
}
sharedResolutionCache.put(new TypeCacheKey(cacheId, name), resolution);
sharedResolutionCache.put(new TypeCacheKey(loaderHash, loaderRef, className), resolution);
return resolution;
}

View File

@ -6,49 +6,97 @@ import net.bytebuddy.dynamic.ClassFileLocator
import net.bytebuddy.pool.TypePool
import spock.lang.Timeout
import java.lang.ref.WeakReference
import java.security.SecureClassLoader
@Timeout(5)
class CacheProviderTest extends DDSpecification {
def "key equivalence"() {
setup:
def key1 = new DDCachingPoolStrategy.TypeCacheKey(1, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(1, "foo")
def "key bootstrap equivalence"() {
def loader = null
def loaderHash = DDCachingPoolStrategy.BOOTSTRAP_HASH
def loaderRef = null
def key1 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "foo")
expect:
key1.hashCode() == key2.hashCode()
key1.equals(key2)
}
def "different loader - same name"() {
def "key same ref equivalence"() {
setup:
def key1 = new DDCachingPoolStrategy.TypeCacheKey(1, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(2, "foo")
def loader = newClassLoader()
def loaderHash = loader.hashCode()
def loaderRef = new WeakReference<ClassLoader>(loader)
def key1 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "foo")
expect:
// not strictly guaranteed, but important for performance
key1.hashCode() != key2.hashCode()
!key1.equals(key2)
key1.hashCode() == key2.hashCode()
key1.equals(key2)
}
def "same loader - different name"() {
def "key different ref equivalence"() {
setup:
def key1 = new DDCachingPoolStrategy.TypeCacheKey(1, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(1, "foobar")
def loader = newClassLoader()
def loaderHash = loader.hashCode()
def loaderRef1 = new WeakReference<ClassLoader>(loader)
def loaderRef2 = new WeakReference<ClassLoader>(loader)
def key1 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef1, "foo")
def key2 = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef2, "foo")
expect:
// not strictly guaranteed, but important for performance
key1.hashCode() != key2.hashCode()
loaderRef1 != loaderRef2
!key1.equals(key2)
key1.hashCode() == key2.hashCode()
key1.equals(key2)
}
def "key mismatch -- same loader - diff name"() {
setup:
def loader = newClassLoader()
def loaderHash = loader.hashCode()
def loaderRef = new WeakReference<ClassLoader>(loader)
def fooKey = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "foo")
def barKey = new DDCachingPoolStrategy.TypeCacheKey(loaderHash, loaderRef, "bar")
expect:
// not strictly guaranteed -- but important for performance
fooKey.hashCode() != barKey.hashCode()
!fooKey.equals(barKey)
}
def "key mismatch -- same name - diff loader"() {
setup:
def loader1 = newClassLoader()
def loader1Hash = loader1.hashCode()
def loaderRef1 = new WeakReference<ClassLoader>(loader1)
def loader2 = newClassLoader()
def loader2Hash = loader2.hashCode()
def loaderRef2 = new WeakReference<ClassLoader>(loader2)
def fooKey1 = new DDCachingPoolStrategy.TypeCacheKey(loader1Hash, loaderRef1, "foo")
def fooKey2 = new DDCachingPoolStrategy.TypeCacheKey(loader2Hash, loaderRef2, "foo")
expect:
// not strictly guaranteed -- but important for performance
fooKey1.hashCode() != fooKey2.hashCode()
!fooKey1.equals(fooKey2)
}
def "test basic caching"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
def poolStrat = new DDCachingPoolStrategy()
def cacheProvider = cacheInstance.createCacheProvider(1)
def loader = newClassLoader()
def loaderHash = loader.hashCode()
def loaderRef = new WeakReference<ClassLoader>(loader)
def cacheProvider = poolStrat.createCacheProvider(loaderHash, loaderRef)
when:
cacheProvider.register("foo", new TypePool.Resolution.Simple(TypeDescription.VOID))
@ -56,15 +104,20 @@ class CacheProviderTest extends DDSpecification {
then:
// not strictly guaranteed, but fine for this test
cacheProvider.find("foo") != null
cacheInstance.approximateSize() == 1
poolStrat.approximateSize() == 1
}
def "test ID equivalence"() {
def "test loader equivalence"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
def poolStrat = new DDCachingPoolStrategy()
def cacheProvider1A = cacheInstance.createCacheProvider(1)
def cacheProvider1B = cacheInstance.createCacheProvider(1)
def loader1 = newClassLoader()
def loaderHash1 = loader1.hashCode()
def loaderRef1A = new WeakReference<ClassLoader>(loader1)
def loaderRef1B = new WeakReference<ClassLoader>(loader1)
def cacheProvider1A = poolStrat.createCacheProvider(loaderHash1, loaderRef1A)
def cacheProvider1B = poolStrat.createCacheProvider(loaderHash1, loaderRef1B)
when:
cacheProvider1A.register("foo", newVoid())
@ -75,15 +128,23 @@ class CacheProviderTest extends DDSpecification {
cacheProvider1B.find("foo") != null
cacheProvider1A.find("foo").is(cacheProvider1B.find("foo"))
cacheInstance.approximateSize() == 1
poolStrat.approximateSize() == 1
}
def "test ID separation"() {
def "test loader separation"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
def poolStrat = new DDCachingPoolStrategy()
def cacheProvider1 = cacheInstance.createCacheProvider(1)
def cacheProvider2 = cacheInstance.createCacheProvider(2)
def loader1 = newClassLoader()
def loaderHash1 = loader1.hashCode()
def loaderRef1 = new WeakReference<ClassLoader>(loader1)
def loader2 = newClassLoader()
def loaderHash2 = loader2.hashCode()
def loaderRef2 = new WeakReference<ClassLoader>(loader2)
def cacheProvider1 = poolStrat.createCacheProvider(loaderHash1, loaderRef1)
def cacheProvider2 = poolStrat.createCacheProvider(loaderHash2, loaderRef2)
when:
cacheProvider1.register("foo", newVoid())
@ -95,80 +156,24 @@ class CacheProviderTest extends DDSpecification {
cacheProvider2.find("foo") != null
!cacheProvider1.find("foo").is(cacheProvider2.find("foo"))
cacheInstance.approximateSize() == 2
poolStrat.approximateSize() == 2
}
def "test loader ID assignment"() {
def "test capacity"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
def poolStrat = new DDCachingPoolStrategy()
def capacity = DDCachingPoolStrategy.TYPE_CAPACITY
def locator1 = newLocator()
def loader1 = newClassLoader()
def loaderHash1 = loader1.hashCode()
def loaderRef1 = new WeakReference<ClassLoader>(loader1)
def locator2 = newLocator()
def loader2 = newClassLoader()
def loaderHash2 = loader2.hashCode()
def loaderRef2 = new WeakReference<ClassLoader>(loader2)
when:
cacheInstance.typePool(locator1, loader1)
cacheInstance.typePool(locator2, loader2)
then:
def loaderId1 = cacheInstance.loaderIdCache.getIfPresent(loader1)
def loaderId2 = cacheInstance.loaderIdCache.getIfPresent(loader2)
// both were assigned an ID -- technically these can fall out of the ID cache
loaderId1 != null
loaderId2 != null
// both IDs are not the BOOTSTRAP_ID
loaderId1 != DDCachingPoolStrategy.CacheInstance.BOOTSTRAP_ID
loaderId2 != DDCachingPoolStrategy.CacheInstance.BOOTSTRAP_ID
// class loaders don't share an ID
cacheInstance.loaderIdCache.getIfPresent(loader1) != cacheInstance.loaderIdCache.getIfPresent(loader2)
}
def "test loader ID exhaustion"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
when:
cacheInstance.loaderIdSeq.set(DDCachingPoolStrategy.CacheInstance.LIMIT_ID - 2)
then:
cacheInstance.provisionId() != DDCachingPoolStrategy.CacheInstance.EXHAUSTED_ID
then:
// once exhausted provisioning -- stays exhausted
cacheInstance.provisionId() == DDCachingPoolStrategy.CacheInstance.EXHAUSTED_ID
cacheInstance.exhaustedLoaderIdSeq()
cacheInstance.provisionId() == DDCachingPoolStrategy.CacheInstance.EXHAUSTED_ID
cacheInstance.exhaustedLoaderIdSeq()
cacheInstance.provisionId() == DDCachingPoolStrategy.CacheInstance.EXHAUSTED_ID
cacheInstance.exhaustedLoaderIdSeq()
}
def "test exhaustion cacheInstance switch"() {
setup:
def cachingStrat = new DDCachingPoolStrategy()
def origCacheInstance = cachingStrat.cacheInstance
cachingStrat.cacheInstance.loaderIdSeq.set(DDCachingPoolStrategy.CacheInstance.LIMIT_ID)
when:
cachingStrat.typePool(newLocator(), newClassLoader())
then:
cachingStrat.cacheInstance != origCacheInstance
}
def "test cacheInstance capacity"() {
setup:
def cacheInstance = new DDCachingPoolStrategy.CacheInstance()
def capacity = DDCachingPoolStrategy.CacheInstance.TYPE_CAPACITY
def cacheProvider1 = cacheInstance.createCacheProvider(1)
def cacheProvider2 = cacheInstance.createCacheProvider(2)
def cacheProvider1 = poolStrat.createCacheProvider(loaderHash1, loaderRef1)
def cacheProvider2 = poolStrat.createCacheProvider(loaderHash2, loaderRef2)
def id = 0
@ -181,7 +186,7 @@ class CacheProviderTest extends DDSpecification {
then:
// cache will start to proactively free slots & size calc is approximate
cacheInstance.approximateSize() > capacity - 4
poolStrat.approximateSize() > capacity - 4
when:
10.times {
@ -192,7 +197,7 @@ class CacheProviderTest extends DDSpecification {
then:
// cache will start to proactively free slots & size calc is approximate
cacheInstance.approximateSize() > capacity - 4
poolStrat.approximateSize() > capacity - 4
}
static newVoid() {