mirror of https://github.com/containers/podman.git
Improve documentation and unit tests for SHM locks
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:
parent
52d95f5072
commit
35cc71a9e8
|
@ -50,7 +50,7 @@ func CreateSHMLock(numLocks uint32) (*SHMLocks, error) {
|
||||||
// OpenSHMLock opens an existing shared-memory segment holding a given number of
|
// OpenSHMLock opens an existing shared-memory segment holding a given number of
|
||||||
// POSIX semaphores. numLocks must match the number of locks the shared memory
|
// POSIX semaphores. numLocks must match the number of locks the shared memory
|
||||||
// segment was created with and be a multiple of the lock bitmap size (default
|
// segment was created with and be a multiple of the lock bitmap size (default
|
||||||
// 32)
|
// 32).
|
||||||
func OpenSHMLock(numLocks uint32) (*SHMLocks, error) {
|
func OpenSHMLock(numLocks uint32) (*SHMLocks, error) {
|
||||||
if numLocks % bitmapSize != 0 || numLocks == 0 {
|
if numLocks % bitmapSize != 0 || numLocks == 0 {
|
||||||
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
|
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
|
||||||
|
@ -95,7 +95,9 @@ func (locks *SHMLocks) Close() error {
|
||||||
|
|
||||||
// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
|
// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
|
||||||
// by a container or pod.
|
// by a container or pod.
|
||||||
// Returns the index of the semaphore that was allocated
|
// Returns the index of the semaphore that was allocated.
|
||||||
|
// Allocations past the maximum number of locks given when the SHM segment was
|
||||||
|
// created will result in an error, and no semaphore will be allocated.
|
||||||
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
||||||
if !locks.valid {
|
if !locks.valid {
|
||||||
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||||
|
@ -110,8 +112,9 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
||||||
return uint32(retCode), nil
|
return uint32(retCode), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeallocateSemaphore frees a semaphore in a shared-memory segment for use by
|
// DeallocateSemaphore frees a semaphore in a shared-memory segment so it can be
|
||||||
// a container of pod
|
// reallocated to another container or pod.
|
||||||
|
// The given semaphore must be already allocated, or an error will be returned.
|
||||||
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
||||||
if !locks.valid {
|
if !locks.valid {
|
||||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||||
|
@ -130,7 +133,13 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockSemaphore locks the given semaphore
|
// LockSemaphore locks the given semaphore.
|
||||||
|
// If the semaphore is already locked, LockSemaphore will block until the lock
|
||||||
|
// can be acquired.
|
||||||
|
// There is no requirement that the given semaphore be allocated.
|
||||||
|
// This ensures that attempts to lock a container after it has been deleted,
|
||||||
|
// but before the caller has queried the database to determine this, will
|
||||||
|
// succeed.
|
||||||
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
||||||
if !locks.valid {
|
if !locks.valid {
|
||||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||||
|
@ -149,7 +158,12 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnlockSemaphore locks the given semaphore
|
// UnlockSemaphore unlocks the given semaphore.
|
||||||
|
// Unlocking a semaphore that is already unlocked with return EBUSY.
|
||||||
|
// There is no requirement that the given semaphore be allocated.
|
||||||
|
// This ensures that attempts to lock a container after it has been deleted,
|
||||||
|
// but before the caller has queried the database to determine this, will
|
||||||
|
// succeed.
|
||||||
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
|
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
|
||||||
if !locks.valid {
|
if !locks.valid {
|
||||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
@ -145,3 +146,99 @@ func TestAllocateTwoLocksGetsDifferentLocks(t *testing.T) {
|
||||||
assert.NotEqual(t, sem1, sem2)
|
assert.NotEqual(t, sem1, sem2)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test allocate all locks successful and all are unique
|
||||||
|
func TestAllocateAllLocksSucceeds(t *testing.T) {
|
||||||
|
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
|
||||||
|
sems := make(map[uint32]bool)
|
||||||
|
for i := 0; i < numLocks; i++ {
|
||||||
|
sem, err := locks.AllocateSemaphore()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Ensure the allocate semaphore is unique
|
||||||
|
_, ok := sems[sem]
|
||||||
|
assert.False(t, ok)
|
||||||
|
|
||||||
|
sems[sem] = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test allocating more than the given max fails
|
||||||
|
func TestAllocateTooManyLocksFails(t *testing.T) {
|
||||||
|
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
|
||||||
|
// Allocate all locks
|
||||||
|
for i := 0; i < numLocks; i++ {
|
||||||
|
_, err := locks.AllocateSemaphore()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try and allocate one more
|
||||||
|
_, err := locks.AllocateSemaphore()
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test allocating max locks, deallocating one, and then allocating again succeeds
|
||||||
|
func TestAllocateDeallocateCycle(t *testing.T) {
|
||||||
|
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
|
||||||
|
// Allocate all locks
|
||||||
|
for i := 0; i < numLocks; i++ {
|
||||||
|
_, err := locks.AllocateSemaphore()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now loop through again, deallocating and reallocating.
|
||||||
|
// Each time we free 1 semaphore, allocate again, and make sure
|
||||||
|
// we get the same semaphore back.
|
||||||
|
var j uint32
|
||||||
|
for j = 0; j < numLocks; j++ {
|
||||||
|
err := locks.DeallocateSemaphore(j)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
newSem, err := locks.AllocateSemaphore()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, j, newSem)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that locks actually lock
|
||||||
|
func TestLockSemaphoreActuallyLocks(t *testing.T) {
|
||||||
|
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
|
||||||
|
// This entire test is very ugly - lots of sleeps to try and get
|
||||||
|
// things to occur in the right order.
|
||||||
|
// It also doesn't even exercise the multiprocess nature of the
|
||||||
|
// locks.
|
||||||
|
|
||||||
|
// Get the current time
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Start a goroutine to take the lock and then release it after
|
||||||
|
// a second.
|
||||||
|
go func() {
|
||||||
|
err := locks.LockSemaphore(0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
err = locks.UnlockSemaphore(0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Sleep for a quarter of a second to give the goroutine time
|
||||||
|
// to kick off and grab the lock
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
|
||||||
|
// Take the lock
|
||||||
|
err := locks.LockSemaphore(0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Get the current time
|
||||||
|
endTime := time.Now()
|
||||||
|
|
||||||
|
// Verify that at least 1 second has passed since start
|
||||||
|
duration := endTime.Sub(startTime)
|
||||||
|
assert.True(t, duration.Seconds() > 1.0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue