Initial skeleton of in-memory locks

Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:
Matthew Heon 2018-04-27 16:06:02 -04:00 committed by Matthew Heon
parent 6868b5aa14
commit 27cebb780b
4 changed files with 634 additions and 0 deletions

167
libpod/lock/locks.go Normal file
View File

@ -0,0 +1,167 @@
package lock
// #cgo LDFLAGS: -lrt -lpthread
// #include "shm_lock.h"
// const uint32_t bitmap_size_c = BITMAP_SIZE;
import "C"
import (
"syscall"
"github.com/pkg/errors"
)
var (
bitmapSize uint32 = uint32(C.bitmap_size_c)
)
// SHMLocks is a struct enabling POSIX semaphore locking in a shared memory
// segment
type SHMLocks struct {
lockStruct *C.shm_struct_t
valid bool
maxLocks uint32
}
// CreateSHMLock sets up a shared-memory segment holding a given number of POSIX
// semaphores, and returns a struct that can be used to operate on those locks.
// numLocks must be a multiple of the lock bitmap size (by default, 32).
func CreateSHMLock(numLocks uint32) (*SHMLocks, error) {
if numLocks % bitmapSize != 0 {
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
}
locks := new(SHMLocks)
lockStruct := C.setup_lock_shm(C.uint32_t(numLocks))
if lockStruct == nil {
// We got a null pointer, so something errored
return nil, errors.Wrapf(syscall.ENOENT, "error creating shared memory locks")
}
locks.lockStruct = lockStruct
locks.maxLocks = numLocks
locks.valid = true
return locks, nil
}
// OpenSHMLock opens an existing shared-memory segment holding a given number of
// POSIX semaphores. numLocks must match the number of locks the shared memory
// segment was created with and be a multiple of the lock bitmap size (default
// 32)
func OpenSHMLock(numLocks uint32) (*SHMLocks, error) {
if numLocks % bitmapSize != 0 {
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be a multiple of %d", C.bitmap_size_c)
}
locks := new(SHMLocks)
lockStruct := C.open_lock_shm(C.uint32_t(numLocks))
if lockStruct == nil {
// We got a null pointer, so something errored
return nil, errors.Wrapf(syscall.ENOENT, "error creating shared memory locks")
}
locks.lockStruct = lockStruct
locks.maxLocks = numLocks
locks.valid = true
return locks, nil
}
// Close closes an existing shared-memory segment.
// The segment will be rendered unusable after closing.
// WARNING: If you Close() while there are still locks locked, these locks may
// fail to release, causing a program freeze.
// Close() is only intended to be used while testing the locks.
func (locks *SHMLocks) Close() error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
locks.valid = false
retCode := C.close_lock_shm(locks.lockStruct)
if retCode < 0 {
// Negative errno returned
return syscall.Errno(-1 * retCode)
}
return nil
}
// AllocateSemaphore allocates a semaphore from a shared-memory segment for use
// by a container or pod.
// Returns the index of the semaphore that was allocated
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
if !locks.valid {
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
retCode := C.allocate_semaphore(locks.lockStruct)
if retCode < 0 {
// Negative errno returned
return 0, syscall.Errno(-1 * retCode)
}
return uint32(retCode), nil
}
// DeallocateSemaphore frees a semaphore in a shared-memory segment for use by
// a container of pod
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
if sem > locks.maxLocks {
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
}
retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
if retCode < 0 {
// Negative errno returned
return syscall.Errno(-1 * retCode)
}
return nil
}
// LockSemaphore locks the given semaphore
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
if sem > locks.maxLocks {
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
}
retCode := C.lock_semaphore(locks.lockStruct, C.uint32_t(sem))
if retCode < 0 {
// Negative errno returned
return syscall.Errno(-1 * retCode)
}
return nil
}
// UnlockSemaphore locks the given semaphore
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
if !locks.valid {
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
}
if sem > locks.maxLocks {
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
}
retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
if retCode < 0 {
// Negative errno returned
return syscall.Errno(-1 * retCode)
}
return nil
}

84
libpod/lock/locks_test.go Normal file
View File

@ -0,0 +1,84 @@
package lock
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// All tests here are in the same process, which somewhat limits their utility
// The big intent of this package it multiprocess locking, which is really hard
// to test without actually having multiple processes...
// We can at least verify that the locks work within the local process.
// 4 * BITMAP_SIZE to ensure we have to traverse bitmaps
const numLocks = 128
// We need a test main to ensure that the SHM is created before the tests run
func TestMain(m *testing.M) {
shmLock, err := CreateSHMLock(numLocks)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating SHM for tests: %v\n", err)
os.Exit(-1)
}
// Close the SHM - every subsequent test will reopen
if err := shmLock.Close(); err != nil {
fmt.Fprintf(os.Stderr, "Error closing SHM locks: %v\n", err)
os.Exit(-1)
}
exitCode := m.Run()
// We need to remove the SHM segment to clean up after ourselves
os.RemoveAll("/dev/shm/libpod_lock")
os.Exit(exitCode)
}
func runLockTest(t *testing.T, testFunc func(*testing.T, *SHMLocks)) {
locks, err := OpenSHMLock(numLocks)
if err != nil {
t.Fatalf("Error opening locks: %v", err)
}
defer func() {
if err := locks.Close(); err != nil {
t.Fatalf("Error closing locks: %v", err)
}
}()
success := t.Run("locks", func (t *testing.T) {
testFunc(t, locks)
})
if !success {
t.Fail()
}
}
// Test that creating an SHM with a bad size fails
func TestCreateNewSHMBadSize(t *testing.T) {
// Odd number, not a power of 2, should never be a word size on a system
_, err := CreateSHMLock(7)
assert.Error(t, err)
}
// Test allocating - lock - unlock - deallocate cycle, single lock
func TestLockLifecycleSingleLock(t *testing.T) {
runLockTest(t, func(t *testing.T, locks *SHMLocks) {
sem, err := locks.AllocateSemaphore()
require.NoError(t, err)
err = locks.LockSemaphore(sem)
assert.NoError(t, err)
err = locks.UnlockSemaphore(sem)
assert.NoError(t, err)
err = locks.DeallocateSemaphore(sem)
assert.NoError(t, err)
})
}

340
libpod/lock/shm_lock.c Normal file
View File

@ -0,0 +1,340 @@
#include <errno.h>
#include <fcntl.h>
#include <semaphore.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "shm_lock.h"
// Compute the size of the SHM struct
size_t compute_shm_size(uint32_t num_bitmaps) {
return sizeof(shm_struct_t) + (num_bitmaps * sizeof(lock_group_t));
}
// Set up an SHM segment holding locks for libpod
// num_locks must be a multiple of BITMAP_SIZE (32 by default)
// Returns a valid pointer on success or NULL on error
shm_struct_t *setup_lock_shm(uint32_t num_locks) {
int shm_fd, i, j, ret_code;
uint32_t num_bitmaps;
size_t shm_size;
shm_struct_t *shm;
// TODO maybe set errno so we can get errors back to libpod?
// We need a nonzero number of locks
if (num_locks == 0) {
return NULL;
}
// Calculate the number of bitmaps required
if (num_locks % BITMAP_SIZE != 0) {
// Number of locks not a multiple of BITMAP_SIZE
return NULL;
}
num_bitmaps = num_locks / BITMAP_SIZE;
// Calculate size of the shm segment
shm_size = compute_shm_size(num_bitmaps);
// Create a new SHM segment for us
shm_fd = shm_open(SHM_NAME, O_RDWR | O_CREAT | O_EXCL, 0600);
if (shm_fd < 0) {
return NULL;
}
// Increase its size to what we need
ret_code = ftruncate(shm_fd, shm_size);
if (ret_code < 0) {
goto CLEANUP_UNLINK;
}
// Map the shared memory in
shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (shm == MAP_FAILED) {
goto CLEANUP_UNLINK;
}
// We have successfully mapped the memory, now initialize the region
shm->magic = MAGIC;
shm->num_locks = num_locks;
shm->num_bitmaps = num_bitmaps;
// Initialize the semaphore that protects the bitmaps
ret_code = sem_init(&(shm->segment_lock), 1, 1);
if (ret_code < 0) {
goto CLEANUP_UNMAP;
}
// Initialize all bitmaps to 0 initially
// And initialize all semaphores they use
for (i = 0; i < num_bitmaps; i++) {
shm->locks[i].bitmap = 0;
for (j = 0; j < BITMAP_SIZE; j++) {
ret_code = sem_init(&(shm->locks[i].locks[j]), 1, 1);
if (ret_code < 0) {
goto CLEANUP_UNMAP;
}
}
}
// Close the file descriptor, we're done with it
// Ignore errors, it's ok if we leak a single FD and this should only run once
close(shm_fd);
return shm;
// Cleanup after an error
CLEANUP_UNMAP:
munmap(shm, shm_size);
CLEANUP_UNLINK:
close(shm_fd);
shm_unlink(SHM_NAME);
return NULL;
}
// Open an existing SHM segment holding libpod locks
// num_locks is the number of locks that will be configured in the SHM segment
// num_locks must be a multiple of BITMAP_SIZE (32 by default)
// Returns a valid pointer on success or NULL on error
shm_struct_t *open_lock_shm(uint32_t num_locks) {
int shm_fd;
shm_struct_t *shm;
size_t shm_size;
uint32_t num_bitmaps;
// We need a nonzero number of locks
if (num_locks == 0) {
return NULL;
}
// Calculate the number of bitmaps required
if (num_locks % BITMAP_SIZE != 0) {
// Number of locks not a multiple of BITMAP_SIZE
return NULL;
}
num_bitmaps = num_locks / BITMAP_SIZE;
// Calculate size of the shm segment
shm_size = compute_shm_size(num_bitmaps);
shm_fd = shm_open(SHM_NAME, O_RDWR, 0600);
if (shm_fd < 0) {
return NULL;
}
// Map the shared memory in
shm = mmap(NULL, shm_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
// Ignore errors, it's ok if we leak a single FD since this only runs once
close(shm_fd);
// Check if we successfully mmap'd
if (shm == MAP_FAILED) {
return NULL;
}
// Need to check the SHM to see if it's actually our locks
if (shm->magic != MAGIC) {
goto CLEANUP;
}
if (shm->num_locks != num_locks) {
goto CLEANUP;
}
return shm;
CLEANUP:
munmap(shm, shm_size);
return NULL;
}
// Close an open SHM lock struct, unmapping the backing memory.
// The given shm_struct_t will be rendered unusable as a result.
// On success, 0 is returned. On failure, negative ERRNO values are returned.
int32_t close_lock_shm(shm_struct_t *shm) {
int ret_code;
size_t shm_size;
// We can't unmap null...
if (shm == NULL) {
return -1 * EINVAL;
}
shm_size = compute_shm_size(shm->num_bitmaps);
ret_code = munmap(shm, shm_size);
if (ret_code != 0) {
return -1 * errno;
}
return 0;
}
// Allocate the first available semaphore
// Returns a positive integer guaranteed to be less than UINT32_MAX on success,
// or negative errno values on failure
// On sucess, the returned integer is the number of the semaphore allocated
int64_t allocate_semaphore(shm_struct_t *shm) {
int ret_code, i;
bitmap_t test_map;
int64_t sem_number, num_within_bitmap;
if (shm == NULL) {
return -1 * EINVAL;
}
// Lock the semaphore controlling access to our shared memory
do {
ret_code = sem_wait(&(shm->segment_lock));
} while(ret_code == EINTR);
if (ret_code != 0) {
return -1 * errno;
}
// Loop through our bitmaps to search for one that is not full
for (i = 0; i < shm->num_bitmaps; i++) {
if (shm->locks[i].bitmap != 0xFFFFFFFF) {
test_map = 0x1;
num_within_bitmap = 0;
while (test_map != 0) {
if ((test_map & shm->locks[i].bitmap) == 0) {
// Compute the number of the semaphore we are allocating
sem_number = (BITMAP_SIZE * i) + num_within_bitmap;
// OR in the bitmap
shm->locks[i].bitmap = shm->locks[i].bitmap | test_map;
// Clear the semaphore
sem_post(&(shm->segment_lock));
// Return the semaphore we've allocated
return sem_number;
}
test_map = test_map << 1;
num_within_bitmap++;
}
// We should never fall through this loop
// TODO maybe an assert() here to panic if we do?
}
}
// Post to the semaphore to clear the lock
sem_post(&(shm->segment_lock));
// All bitmaps are full
// We have no available semaphores, report allocation failure
return -1 * ENOSPC;
}
// Deallocate a given semaphore
// Returns 0 on success, negative ERRNO values on failure
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index) {
bitmap_t test_map;
int bitmap_index, index_in_bitmap, ret_code, i;
if (shm == NULL) {
return -1 * EINVAL;
}
// Check if the lock index is valid
if (sem_index >= shm->num_locks) {
return -1 * EINVAL;
}
bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE;
// This should never happen if the sem_index test above succeeded, but better
// safe than sorry
if (bitmap_index >= shm->num_bitmaps) {
return -1 * EFAULT;
}
test_map = 0x1;
for (i = 0; i < index_in_bitmap; i++) {
test_map = test_map << 1;
}
// Lock the semaphore controlling access to our shared memory
do {
ret_code = sem_wait(&(shm->segment_lock));
} while(ret_code == EINTR);
if (ret_code != 0) {
return -1 * errno;
}
// Check if the semaphore is allocated
if ((test_map & shm->locks[bitmap_index].bitmap) == 0) {
// Post to the semaphore to clear the lock
sem_post(&(shm->segment_lock));
return -1 * ENOENT;
}
// The semaphore is allocated, clear it
// Invert the bitmask we used to test to clear the bit
test_map = ~test_map;
shm->locks[bitmap_index].bitmap = shm->locks[bitmap_index].bitmap & test_map;
// Post to the semaphore to clear the lock
sem_post(&(shm->segment_lock));
return 0;
}
// Lock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
// subsequently realize they have been removed).
// Returns 0 on success, -1 on failure
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
int bitmap_index, index_in_bitmap, ret_code;
if (shm == NULL) {
return -1 * EINVAL;
}
if (sem_index >= shm->num_locks) {
return -1 * EINVAL;
}
bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE;
// Lock the semaphore controlling access to our shared memory
do {
ret_code = sem_wait(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
} while(ret_code == EINTR);
if (ret_code != 0) {
return -1 * errno;
}
return 0;
}
// Unlock a given semaphore
// Does not check if the semaphore is allocated - this ensures that, even for
// removed containers, we can still successfully lock to check status (and
// subsequently realize they have been removed).
// Returns 0 on success, -1 on failure
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index) {
int bitmap_index, index_in_bitmap;
if (shm == NULL) {
return -1 * EINVAL;
}
if (sem_index >= shm->num_locks) {
return -1 * EINVAL;
}
bitmap_index = sem_index / BITMAP_SIZE;
index_in_bitmap = sem_index % BITMAP_SIZE;
sem_post(&(shm->locks[bitmap_index].locks[index_in_bitmap]));
return 0;
}

43
libpod/lock/shm_lock.h Normal file
View File

@ -0,0 +1,43 @@
#ifndef shm_locks_h_
#define shm_locks_h_
#include <semaphore.h>
#include <stdint.h>
// Magic number to ensure we open the right SHM segment
#define MAGIC 0xA5A5
// Name of the SHM
#define SHM_NAME "/libpod_lock"
// Type for our bitmaps
typedef uint32_t bitmap_t;
// bitmap size
#define BITMAP_SIZE (sizeof(bitmap_t) * 8)
// Struct to hold a single bitmap and associated locks
typedef struct lock_group {
bitmap_t bitmap;
sem_t locks[BITMAP_SIZE];
} lock_group_t;
// Struct to hold our SHM locks
typedef struct shm_struct {
uint16_t magic;
sem_t segment_lock;
uint32_t num_bitmaps;
uint32_t num_locks;
lock_group_t locks[];
} shm_struct_t;
size_t compute_shm_size(uint32_t num_bitmaps);
shm_struct_t *setup_lock_shm(uint32_t num_locks);
shm_struct_t *open_lock_shm(uint32_t num_locks);
int32_t close_lock_shm(shm_struct_t *shm);
int64_t allocate_semaphore(shm_struct_t *shm);
int32_t deallocate_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t lock_semaphore(shm_struct_t *shm, uint32_t sem_index);
int32_t unlock_semaphore(shm_struct_t *shm, uint32_t sem_index);
#endif