get.c
1 /*
2  shm-arena shared memory arena
3  Copyright (C) 2006-2008 Lance Arsenault (LGPL v3)
4 
5 
6  This file is part of shm-arena.
7 
8  shm-arena is free software; you can redistribute it and/or modify
9  it under the terms of the GNU Lesser General Public License as
10  published by the Free Software Foundation; either version 3 of the
11  License, or (at your option) any later version.
12 
13  shm-arena is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  Lesser General Public License for more details.
17 
18  You should have received a copy of the GNU Lesser General Public
19  License along with this program. If not, see
20  <http://www.gnu.org/licenses/>.
21 */
22 
27 #include "config.h"
28 #include <stdio.h>
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <fcntl.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <stdlib.h>
36 #include <sys/mman.h>
37 #include <stdint.h>
38 #include <pthread.h>
39 
40 #include "spew.h"
41 #include "assert.h"
42 #include "debug_lock.h"
43 #include "shm_arena.h"
44 #include "arena.h"
45 #include "arena_lock.h"
46 #include "avl.h"
47 
48 #define DEFAULT_NAME_PREFIX "~"
49 #define DEFAULT_NAME_SUFFIX "~"
50 
51 
52 static inline
53 void *allocate_segment(struct shm_arena *a,
54  const char *name, size_t size,
55  int with_rwlock)
59 {
60  size_t total_seg_size; /* total size needed for the allocated segment */
61  struct seg_header *seg;
62  mapnum_t mapnum = -1;
63  struct seg_footer *fdr;
64 
65  SPEW(_DEBUG, "Creating segment \"%s\" with size=%zu",
66  name, size);
67 
68  ASSERT(a->change_count == ((struct arena_header *)
69  (a->mapping[0].start))->change_count);
70  ASSERT(a->num_mappings == ((struct arena_header *)
71  (a->mapping[0].start))->num_mappings);
72 
73  /* total size needed for the allocated segment */
74  total_seg_size =
75  CHUNKS(sizeof(struct seg_header)) +
76  CHUNKS(size) +
77  (with_rwlock?CHUNKS(sizeof(pthread_rwlock_t)):0) +
78  CHUNKS(strlen(name)+1) +
79  CHUNKS(sizeof(struct seg_footer));
80 
81  seg = find_free_segment(a, total_seg_size, &mapnum);
82 
83  /********************************************************/
84  /**** There are 6 cases of code paths to check here. ****/
85  /********************************************************/
86 
87  if(seg)
88  {
89  /* We found a big enough free segment. */
90 
91 #ifdef SHM_DEBUG
92  int detach_err;
93  detach_err =
94 #endif
95 
96  _shm_detach_free_segment(a, seg, mapnum);
97 
98  ASSERT(!detach_err);
99 
100  if(seg->length*CHUNK >= (CHUNKS(sizeof(struct seg_header)) +
101  total_seg_size + CHUNKS(sizeof(struct seg_footer))))
102  {
103  /* We will split the segment into two, first the allocated
104  * one and then a free one. So if the free one is at the end
105  * of the last mapping we can truncate the arena file, and
106  * maybe unmap it. */
107  struct seg_header *free_hdr;
108 
109  SPEW(_DEBUG, "Case 1: Found Free & Split");
110 
111  /* This is where the free memory will be. */
112  free_hdr = (struct seg_header *)(((uint8_t *)seg) + total_seg_size);
113 
114  free_hdr->length = (seg->length*CHUNK - total_seg_size)/CHUNK;
115  fdr = (struct seg_footer *)
116  (((uint8_t *)free_hdr) + (free_hdr->length*CHUNK -
117  CHUNKS(sizeof(struct seg_footer))));
118  fdr->length = free_hdr->length;
119  fdr->flags = IS_FREE;
120 
121  seg->length = total_seg_size/CHUNK;
122 
123  insert_free_segment(a, free_hdr, mapnum);
124  }
125 #ifdef WITH_SPEW
126  else
127  {
128  /* else *
129  * We are not slitting the segment. We'll just use what we
130  * found. */
131  /* The seg->length does not change. */
132  SPEW(_DEBUG, "Case 2: Found Free With This Size");
133  }
134 #endif /*#ifdef SHM_DEBUG */
135 
136  }
137  else /* make more shared memory */
138  {
139  /* Here we make more memory by extending the arena file and
140  * mapping more if required. There could be more memory
141  * available in the last mapping. All other mappings would have
142  * free segments that would have been found with
143  * find_free_segment() above if they were big enough. */
144  struct mapping_header *mhdr = NULL;
145  struct stat st;
146 
147  if(fstat(a->fd, &st)) /* Get the size of the arena file. */
148  return /* We are screwed now. */
149  SPEW_SYS_ERR_RET(NULL, _WARN, errno,
150  "fstat(fd=%d,) failed", a->fd);
151 
152  mhdr = get_mapping_header(a, a->num_mappings-1);
153 
154 #ifdef SHM_DEBUG
155  if(mhdr->length_used*CHUNK >
156  (CHUNKS(sizeof(struct mapping_header)) +
157  ((a->num_mappings-1 == 0)?CHUNKS(sizeof(struct arena_header)):0)))
158  {
159  /* We can find the footer in the last segment at the end of
160  * the mapping, if there are any segments at all. */
161  fdr = (struct seg_footer *)
162  (a->mapping[a->num_mappings-1].start +
163  ((mhdr->length_used*CHUNK - CHUNKS(sizeof(struct seg_footer)))));
164 
165  /* The last segment in the last mapping should not be free,
166  * otherwise it would have been removed. We have a coding
167  * error if it is free. */
168  ASSERT(!(fdr->flags & IS_FREE));
169  }
170 #endif /* #ifdef SHM_DEBUG */
171 
172 
173  if((mhdr->map_length - mhdr->length_used)*CHUNK >= total_seg_size)
174  {
175  SPEW(_DEBUG, "Case 3: Ftrunate & Add Segment");
176  /* We extend the end of the arena file in the last mapping,
177  * without adding another mapping. */
178 
179  mapnum = a->num_mappings - 1;
180 
181  ASSERT(st.st_size >= (off_t) (mhdr->length_used*CHUNK));
182 
183  if(ftruncate(a->fd, st.st_size + total_seg_size))
184  return /* Shit. */
185  SPEW_SYS_ERR_RET(NULL, _WARN, errno,
186  "ftruncate(fd=%d,) failed", a->fd);
187 
188  SPEW(_DEBUG, "Total arena file size has increased to %lu bytes",
189  st.st_size + total_seg_size);
190 
191 
192  /* The memory will be at what was the end of the last
193  * mapping. */
194  seg = (struct seg_header *)
195  (a->mapping[mapnum].start + mhdr->length_used*CHUNK);
196 
197  mhdr->length_used += total_seg_size/CHUNK;
198  }
199  else
200  {
201  /* We need to create a new arena file mapping */
202  void *map_start = NULL;
203  int pagesize;
204  off_t old_map_end;
205 
206  ASSERT(mhdr->map_length >= mhdr->length_used);
207 
208  pagesize = getpagesize();
209 
210  /* First we need to pad in the end of the arena file with a
211  * free segment, or extend the last segment if it's not big
212  * enough to hold a free segment, plus the size needed and
213  * used in the new mapping to come. */
214  old_map_end = st.st_size + (mhdr->map_length - mhdr->length_used)*CHUNK;
215 
216  if(ftruncate(a->fd, old_map_end +
217  total_seg_size + CHUNKS(sizeof(struct mapping_header))))
218  return /* error */
219  SPEW_SYS_ERR_RET(NULL, _WARN, errno,
220  "ftruncate(fd=%d,) failed", a->fd);
221 
222  SPEW(_DEBUG, "Total arena file size has increased to %lu bytes",
223  old_map_end + total_seg_size + CHUNKS(sizeof(struct mapping_header)));
224 
225  if(mhdr->map_length - mhdr->length_used >=
226  CHUNKS(sizeof(struct seg_header)) + CHUNKS(sizeof(struct seg_footer)))
227  {
228  SPEW(_DEBUG, "Case 4: Ftrunate & Add Free Pad & Add Mapping");
229  /* add a free memory to the end of the mapping */
230 
231  /* adding one free segment to the end of the mapping
232  * before we add the next mapping with an allocated
233  * segment at the top. */
234  seg = (struct seg_header *) /* using seg as a dummy var */
235  (a->mapping[a->num_mappings-1].start + mhdr->length_used*CHUNK);
236 
237  seg->length = mhdr->map_length - mhdr->length_used;
238  seg->left_mapnum = -1;
239  seg->right_mapnum = -1;
240  seg->left_offset = 0;
241  seg->right_offset = 0;
242 
243  /* Put a footer the new end of the mapping. */
244  fdr = (struct seg_footer *)
245  (((uint8_t *) seg) + (seg->length*CHUNK -
246  CHUNKS(sizeof(struct seg_footer))));
247  fdr->length = seg->length;
248  fdr->flags = IS_FREE;
249 
250  mhdr->length_used = mhdr->map_length;
251 
252  insert_free_segment(a, seg, a->num_mappings-1);
253  }
254  else if(mhdr->map_length > mhdr->length_used)
255  {
256  SPEW(_DEBUG, "Case 5: Ftrunate & Add Pad To Last Segment & Add Mapping");
257  /* We have an unusable small piece of memory at the end
258  * of the mapping, so we must put it in the last segment
259  * in the mapping so that we do not leak it. */
260  fdr = (struct seg_footer *)
261  (a->mapping[a->num_mappings-1].start +
262  ((mhdr->length_used*CHUNK - CHUNKS(sizeof(struct seg_footer)))));
263 
264  seg = (struct seg_header *) /* using seg as a dummy var */
265  (((uint8_t *) fdr) -
266  (fdr->length*CHUNK - CHUNKS(sizeof(struct seg_footer))));
267 
268  seg->length += mhdr->map_length - mhdr->length_used;
269 
270  /* The footer moves to the bottom of the now longer segment. */
271  fdr = (struct seg_footer *)
272  (((uint8_t *) seg) +
273  (seg->length*CHUNK - CHUNKS(sizeof(struct seg_footer))));
274 
275  fdr->flags = 0;
276  if(with_rwlock) fdr->flags |= WITH_RWLOCK;
277  fdr->length = seg->length;
278 
279  mhdr->length_used = mhdr->map_length;
280  }
281 #ifdef WITH_SPEW
282  else
283  {
284  /* mhdr->map_length == mhdr->length_used */
285  SPEW(_DEBUG, "Case 6: Ftrunate & Add Mapping");
286  }
287 #endif
288 
289  /* now add the new mapping and create a segment. */
290  mapnum = a->num_mappings;
291  (a->num_mappings)++;
292  a->mapping = (struct shm_mapping *)
293  realloc(a->mapping, a->num_mappings*sizeof(struct shm_mapping));
294 
295  if(!(a->mapping))
296  return /* error */
297  SPEW_SYS_ERR_RET(NULL, _WARN, errno, "realloc() failed");
298 
299  a->mapping[mapnum].map_length =
300  PAGES(total_seg_size + CHUNKS(sizeof(struct mapping_header)),
301  pagesize);
302  if(a->user_shm_address)
303  /* If this arena struct was created when env SHM_ADDRESS was set. */
304  map_start = (void*)(((uintptr_t) a->mapping[mapnum-1].start) +
305  a->mapping[mapnum-1].map_length*CHUNK);
306  a->mapping[mapnum].start =
307  mmap(map_start, a->mapping[mapnum].map_length, PROT_READ|PROT_WRITE,
308  MAP_SHARED, a->fd, old_map_end);
309 
310 
311  /* map_length in CHUNKs */
312  a->mapping[mapnum].map_length /= CHUNK;
313 
314  if(a->mapping[mapnum].start == MAP_FAILED)
315  {
316  (a->num_mappings)--;
317  return /* error */
318  SPEW_SYS_ERR_RET(NULL, _WARN, errno, "mmap() failed");
319  }
320  else if(a->user_shm_address && a->mapping[mapnum].start != map_start)
321  {
322  (a->num_mappings)--;
323  errno = EFAULT;
324  return /* error */
325  SPEW_SYS_ERR_RET(NULL, _WARN, errno,
326  "mmap() failed with SHM_ADDRESS set");
327  }
328 
329  /* sync the arena change_count */
330  a->change_count = ++(a->header->change_count);
331 
332  (a->header->num_mappings)++;
333 
334 
335  mhdr = get_mapping_header(a, mapnum);
336 
337  mhdr->map_length = a->mapping[mapnum].map_length;
338 
339  mhdr->length_used =
340  (CHUNKS(sizeof(struct mapping_header)) + total_seg_size)/CHUNK;
341 
342  seg = (struct seg_header *)
343  (a->mapping[mapnum].start + CHUNKS(sizeof(struct mapping_header)));
344 
345  SPEW(_DEBUG, "Created new mapping of %zu pages = %zu bytes",
346  mhdr->map_length*CHUNK/pagesize, mhdr->map_length*CHUNK);
347  }
348 
349  seg->length = total_seg_size/CHUNK; /* in units of CHUNKs. */
350  }
351 
352  /* The allocated/named memory will start after seg. seg points
353  * to its struct seg_header that is before the user
354  * data and then name string. */
355 
356  seg->user_length = size;
357  seg->left_mapnum = -1;
358  seg->right_mapnum = -1;
359  seg->left_offset = 0;
360  seg->right_offset = 0;
361  fdr = (struct seg_footer *)
362  (((uint8_t *)seg) + (seg->length*CHUNK - CHUNKS(sizeof(struct seg_footer))));
363  fdr->length = seg->length;
364  fdr->flags = 0;
365  if(with_rwlock) fdr->flags |= WITH_RWLOCK;
366 
367  strcpy(get_seg_name(seg), name);
368 
369  insert_alloc_segment(a, seg, mapnum);
370 
371  if(with_rwlock)
372  {
373  int err;
374  pthread_rwlockattr_t attr;
375  if((err = pthread_rwlockattr_init(&attr)))
376  return
377  SPEW_SYS_ERR_RET(NULL, _WARN, err, "pthread_rwlockattr_init() failed");
378  if((err = pthread_rwlockattr_setpshared(&attr, PTHREAD_PROCESS_SHARED)))
379  return
380  SPEW_SYS_ERR_RET(NULL, _WARN, err, "pthread_rwlockattr_setpshared() failed");
381  if((err = pthread_rwlock_init(get_seg_rwlock(seg), &attr)))
382  return
383  SPEW_SYS_ERR_RET(NULL, _WARN, err, "pthread_rwlock_init() failed");
384  SPEW(_DEBUG, "Initialized segment rwlock");
385  }
386 
387  return get_seg_ptr(seg);
388 }
389 
437 void *shm_get(shm_arena_t arena, size_t size, const char *name, int flags)
438 {
439  char gen_name[GEN_NAME_SIZE] = "";
440  void *ret_ptr = NULL;
441  int err = 0;
442 
443  SPEW(_INFO, "%s(arena=%p, size=%zu, name=\"%s\", flags=%d)",
444  __func__, arena, (size==SHM_UNKNOWN_SIZE)?0:size,
445  name, flags);
446 
447  if((!name || !name[0]) && !((flags & O_CREAT) && (flags & O_EXCL)))
448  /* name is not set, so we must be creating a segment */
449  return SPEW_SYS_ERR_RET(NULL, _WARN, EINVAL,
450  "%s() failed: flags O_CREAT and "
451  "O_EXCL bits must be set if name "
452  "is not set", __func__);
453 
454  if((flags & O_CREAT) && size == SHM_UNKNOWN_SIZE)
455  return SPEW_SYS_ERR_RET(NULL, _WARN, EINVAL,
456  "%s() failed: O_CREAT is set "
457  "and size == SHM_UNKNOWN_SIZE",
458  __func__);
459 
460  /* This will get the arena object and in case it creates the arena
461  * file we give the first mapping a size that is at least big enough
462  * to hold this new segment, in case size is larger than one
463  * page. */
464  arena = get_arena_min_size_and_autolock(arena,
465  (flags & O_CREAT)? 2/* write */: 1/* read */,
466  &err IF_SPEW(, __func__), size);
467  if(!arena)
468  {
469  ASSERT(err);
470  errno = err;
471  return NULL;
472  }
473 
474  ASSERT(arena->header);
475 
476  /* Make sure that we have the current number of memory mappings for
477  * this arena file, by comparing the local and arena file
478  * change_count.
479  *
480  * On mutex locking:
481  *
482  * Case 1: We have an arena file write lock from the O_CREATE flag:
483  * We are excluding all other threads, so we do not need a mutex
484  * lock at all.
485  *
486  * Case 2: We have an arena file read lock: The arena file read lock
487  * will stop any allocations, but we may be out of sync in which
488  * case we need to change the local arena object mapping data, and
489  * so we need a mutex lock to change it, because other threads may
490  * be doing the same thing. After this syncing we are guaranteed
491  * that the mapping data will not change while we keep the arena
492  * file read lock. We will not be allocating in this case.
493  */
494 
495  if(!(flags & O_CREAT))
496  pthread_mutex_lock(&arena->mapping_mutex);
497  if(arena->change_count != arena->header->change_count)
498  if(_shm_arena_sync_mappings(IF_SPEW(__func__,) arena))
499  {
500  pthread_mutex_unlock(&arena->mapping_mutex);
501  /* errno should already be set */
502  goto shm_get_ret;
503  }
504  if(!(flags & O_CREAT))
505  pthread_mutex_unlock(&arena->mapping_mutex);
506 
507  /* The mappings will stay in sync or we will be messing with them
508  * here in this one thread if we allocate. We have the arena file
509  * write lock to block all other threads if we are allocating. We
510  * have a arena file read lock which will the mapping data fixed
511  * until we release the arena file read lock. */
512 
513  if(!name || !name[0])
514  {
515  if(!((flags & O_CREAT) && (flags & O_EXCL)))
516  {
517  err = EINVAL; /* "Invalid argument" */
518  SPEW_SYS_ERR(_WARN, EINVAL,
519  "%s() failed: flags O_CREAT and "
520  "O_EXCL bits must be set if segment name "
521  "is not set", __func__);
522  goto shm_get_ret;
523  }
524  /* generate a new segment name */
525 
526  for(ret_ptr = (void *) 1; ret_ptr; (arena->header->name_count)++)
527  {
530  snprintf(gen_name, GEN_NAME_SIZE,
531  DEFAULT_NAME_PREFIX"%u"DEFAULT_NAME_SUFFIX,
532  arena->header->name_count);
533  ret_ptr = find_segment_name(arena, gen_name, NULL);
534  }
535  name = gen_name;
536  SPEW(_DEBUG, "Generated segment name \"%s\"", name);
537  }
538  else /* we have a name string from the user */
539  {
540  size_t user_size;
541 
542  ret_ptr = find_segment_name(arena, name, &user_size);
543 
544  if((flags & O_CREAT) && (flags & O_EXCL) && ret_ptr)
545  {
546  SPEW_SYS_ERR(_WARN, EINVAL,
547  "%s() failed: flags O_CREAT and "
548  "O_EXCL bits must not be set: segment named "
549  "\"%s\" exists.", __func__, name);
550  err = EBADRQC; /* "Invalid request code" */
551  goto shm_get_ret;
552  }
553  else if(!ret_ptr && !(flags & O_CREAT))
554  {
555  /* This case is NOT an ERROR: we still return NULL but the
556  * err and errno is not set. We need this to not be an
557  * error so that a user can use it to test if a segment
558  * exists. */
559  SPEW(_INFO,
560  "%s() failed: flag O_CREAT is not set and segment named "
561  "\"%s\" does not exist.", __func__, name);
562  goto shm_get_ret;
563  }
564  else if(ret_ptr)
565  {
566  /* return an existing segment if it's the right size */
567  if(user_size != size && size != SHM_UNKNOWN_SIZE)
568  {
569  /* This sucks as an error code, but it's better than
570  * none. */
571  err = EADDRINUSE; /* "Address already in use" */
572  SPEW_SYS_ERR(_WARN, EINVAL,
573  "%s() failed: segment \"%s\" exists and has "
574  "a size %zu bytes not %zu bytes", __func__,
575  name, user_size, size);
576  }
577  else
578  {
579  struct seg_header *seg;
580  seg = (struct seg_header *)
581  (((uint8_t *) ret_ptr) - CHUNKS(sizeof(struct seg_header)));
582  if(get_seg_footer(seg)->flags & WITH_RWLOCK &&
583  flags & SHM_WITHOUT_RWLOCK)
584  {
585  err = EADDRINUSE; /* "Address already in use" */
586  SPEW_SYS_ERR(_WARN, EINVAL,
587  "%s() failed: segment \"%s\" exists and has "
588  "read-write locks. SHM_WITHOUT_RWLOCK"
589  " should not be set.",
590  __func__, name);
591  }
592  else if(!(get_seg_footer(seg)->flags & WITH_RWLOCK) &&
593  !(flags & SHM_WITHOUT_RWLOCK))
594  {
595  err = EADDRINUSE; /* "Address already in use" */
596  SPEW_SYS_ERR(_WARN, EINVAL,
597  "%s() failed: segment \"%s\" exists and has "
598  "no read-write locks. SHM_WITHOUT_RWLOCK"
599  " should be set.",
600  __func__, name);
601  }
602 #ifdef WITH_SPEW
603  else
604  {
605  if(size == SHM_UNKNOWN_SIZE)
606  size = seg->user_length;
607  SPEW(_INFO, "Connected to existing segment \"%s\" with size %zu bytes",
608  name, size);
609  }
610 #endif
611  }
612  goto shm_get_ret;
613  }
614  /* Else -- we will create it outside this block. */
615  }
616 
617  /* we can and need to create a new segment with name string name */
618  /* We must have an arena write lock so we do not need a mutex lock
619  * here. */
620  ret_ptr = allocate_segment(arena, name, size, !(flags & SHM_WITHOUT_RWLOCK));
621 
622 
623 #ifdef WITH_SPEW
624  if(ret_ptr)
625  SPEW(_INFO, "Created segment \"%s\" with size %zu bytes", name, size);
626  else
627  SPEW(_WARN, "Failed to create segment \"%s\" with size %zu bytes", name, size);
628 #endif
629 
630  shm_get_ret:
631 
632  if(!err)
633  err = arena_autounlock(arena IF_SPEW(, __func__));
634  else
635  arena_autounlock(arena IF_SPEW(, __func__));
636 
637  if(err)
638  {
639  errno = err;
640  ret_ptr = NULL;
641  }
642 
643  return ret_ptr;
644 }
struct arena_header * header
Definition: arena.h:446
struct shm_mapping * mapping
Definition: arena.h:454
int num_mappings
Definition: arena.h:459
pthread_mutex_t mapping_mutex
Definition: arena.h:468
mapnum_t num_mappings
Definition: arena.h:387
mapnum_t right_mapnum
Definition: arena.h:142
offset_t right_offset
Definition: arena.h:147
#define SHM_UNKNOWN_SIZE
shm_get() flag for when you don't know the segment size
Definition: shm_arena.h:165
void * shm_get(shm_arena_t arena, size_t size, const char *name, int flags)
get a shared memory segment
Definition: get.c:437
offset_t length_used
Definition: arena.h:352
mapnum_t left_mapnum
Definition: arena.h:142
uint32_t change_count
Definition: arena.h:464
offset_t map_length
Definition: arena.h:345
offset_t length
Definition: arena.h:153
void * user_shm_address
Definition: arena.h:484
offset_t map_length
Definition: arena.h:410
offset_t left_offset
Definition: arena.h:147
uint32_t change_count
Definition: arena.h:383
size_t user_length
Definition: arena.h:156
uint8_t * start
Definition: arena.h:405
#define SHM_WITHOUT_RWLOCK
shm_get() flag for not having segment read-write lock
Definition: shm_arena.h:150
int fd
Definition: arena.h:436
uint32_t name_count
Definition: arena.h:377

Shared Memory Arena version RC-0.0.25