33 #define GEN_NAME_SIZE 32
37 #define ARENA_MAGIC 0xCB35CC0D
40 #define IS_FREE (01 << 0)
42 #define WITH_RWLOCK (01 << 1)
47 #define PLUS_SIZE(x,s) ((((x)+(s)-1)/(s))*(s))
50 #define PAGES(x, pagesize) PLUS_SIZE((x), (pagesize))
54 #define CHUNKS(x) PLUS_SIZE((x), CHUNK)
60 typedef int16_t mapnum_t;
62 #define MAPNUM_FORMAT "%"PRId16
83 typedef uint32_t offset_t;
85 #define OFFSET_FORMAT "%"PRIu32
95 typedef int32_t height_t;
97 #define HEIGHT_FORMAT "%"PRId32
520 if((err = pthread_setspecific(a->
rwlock_key, attr)))
525 SPEW_SYS_ERR_RET(NULL, _WARN, err,
526 "pthread_setspecific() failed");
534 SPEW(_DEBUG,
"Initialized thread arena recursive read-write lock flags");
548 char *get_seg_name(
const struct seg_header *seg)
554 ((get_seg_footer(seg)->
flags & WITH_RWLOCK)?
555 CHUNKS(
sizeof(pthread_rwlock_t)):0))));
573 offset_t get_offset(
const struct shm_arena *a,
578 (((uintptr_t) seg) - ((uintptr_t)(a->
mapping[seg_mapnum].
start)))/CHUNK
584 mapnum_t map_num, offset_t offset)
588 (((uint8_t *) (a->
mapping[map_num].
start)) + (offset*CHUNK));
602 ASSERT(mapnum <= a->num_mappings && mapnum >= -1);
607 if(mapnum != 0 && mapnum != 1)
621 pthread_rwlock_t *ptr_rwlock(
const void *ptr)
625 (((uint8_t *) ptr) - CHUNKS(
sizeof(
struct seg_header)));
627 if(get_seg_footer(seg)->
flags & WITH_RWLOCK)
628 return (pthread_rwlock_t *)
634 pthread_rwlock_t *get_seg_rwlock(
const struct seg_header *seg)
636 ASSERT(get_seg_footer(seg)->flags & WITH_RWLOCK);
647 _shm_arena_sync_mappings(IF_SPEW(
const char *func,)
struct arena_header * header
struct shm_mapping * mapping
pthread_mutex_t mapping_mutex
pthread_mutex_t rwlock_mutex