Fast Auxiliary Space Preconditioning  1.8.4 Feb/15/2016
dlmalloc.h
1 /*
2  Default header file for malloc-2.8.x, written by Doug Lea
3  and released to the public domain, as explained at
4  http://creativecommons.org/licenses/publicdomain.
5 
6  last update: Wed May 27 14:25:17 2009 Doug Lea (dl at gee)
7 
8  This header is for ANSI C/C++ only. You can set any of
9  the following #defines before including:
10 
11  * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12  was also compiled with this option, so all routines
13  have names starting with "dl".
14 
15  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16  file will be #included AFTER <malloc.h>. This is needed only if
17  your system defines a struct mallinfo that is incompatible with the
18  standard one declared here. Otherwise, you can include this file
19  INSTEAD of your system system <malloc.h>. At least on ANSI, all
20  declarations should be compatible with system versions
21 
22  * If MSPACES is defined, declarations for mspace versions are included.
23 */
24 
25 #ifndef MALLOC_280_H
26 #define MALLOC_280_H
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
32 #include <stddef.h> /* for size_t */
33 
34 #ifndef ONLY_MSPACES
35 #define ONLY_MSPACES 0 /* define to a value */
36 #endif /* ONLY_MSPACES */
37 #ifndef NO_MALLINFO
38 #define NO_MALLINFO 0
39 #endif /* NO_MALLINFO */
40 
41 
42 #if !ONLY_MSPACES
43 
44 #ifndef USE_DL_PREFIX
45 #define dlcalloc calloc
46 #define dlfree free
47 #define dlmalloc malloc
48 #define dlmemalign memalign
49 #define dlrealloc realloc
50 #define dlvalloc valloc
51 #define dlpvalloc pvalloc
52 #define dlmallinfo mallinfo
53 #define dlmallopt mallopt
54 #define dlmalloc_trim malloc_trim
55 #define dlmalloc_stats malloc_stats
56 #define dlmalloc_usable_size malloc_usable_size
57 #define dlmalloc_footprint malloc_footprint
58 #define dlindependent_calloc independent_calloc
59 #define dlindependent_comalloc independent_comalloc
60 #endif /* USE_DL_PREFIX */
61 #if !NO_MALLINFO
62 #ifndef HAVE_USR_INCLUDE_MALLOC_H
63 #ifndef _MALLOC_H
64 #ifndef MALLINFO_FIELD_TYPE
65 #define MALLINFO_FIELD_TYPE size_t
66 #endif /* MALLINFO_FIELD_TYPE */
67 #ifndef STRUCT_MALLINFO_DECLARED
68 #define STRUCT_MALLINFO_DECLARED 1
69 struct mallinfo {
70  MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
71  MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
72  MALLINFO_FIELD_TYPE smblks; /* always 0 */
73  MALLINFO_FIELD_TYPE hblks; /* always 0 */
74  MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
75  MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
76  MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
77  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
78  MALLINFO_FIELD_TYPE fordblks; /* total free space */
79  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
80 };
81 #endif /* STRUCT_MALLINFO_DECLARED */
82 #endif /* _MALLOC_H */
83 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
84 #endif /* !NO_MALLINFO */
85 
86 /*
87  malloc(size_t n)
88  Returns a pointer to a newly allocated chunk of at least n bytes, or
89  null if no space is available, in which case errno is set to ENOMEM
90  on ANSI C systems.
91 
92  If n is zero, malloc returns a minimum-sized chunk. (The minimum
93  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
94  systems.) Note that size_t is an unsigned type, so calls with
95  arguments that would be negative if signed are interpreted as
96  requests for huge amounts of space, which will often fail. The
97  maximum supported value of n differs across systems, but is in all
98  cases less than the maximum representable value of a size_t.
99 */
100 void* dlmalloc(size_t);
101 
102 /*
103  free(void* p)
104  Releases the chunk of memory pointed to by p, that had been previously
105  allocated using malloc or a related routine such as realloc.
106  It has no effect if p is null. If p was not malloced or already
107  freed, free(p) will by default cuase the current program to abort.
108 */
109 void dlfree(void*);
110 
111 /*
112  calloc(size_t n_elements, size_t element_size);
113  Returns a pointer to n_elements * element_size bytes, with all locations
114  set to zero.
115 */
116 void* dlcalloc(size_t, size_t);
117 
118 /*
119  realloc(void* p, size_t n)
120  Returns a pointer to a chunk of size n that contains the same data
121  as does chunk p up to the minimum of (n, p's size) bytes, or null
122  if no space is available.
123 
124  The returned pointer may or may not be the same as p. The algorithm
125  prefers extending p in most cases when possible, otherwise it
126  employs the equivalent of a malloc-copy-free sequence.
127 
128  If p is null, realloc is equivalent to malloc.
129 
130  If space is not available, realloc returns null, errno is set (if on
131  ANSI) and p is NOT freed.
132 
133  if n is for fewer bytes than already held by p, the newly unused
134  space is lopped off and freed if possible. realloc with a size
135  argument of zero (re)allocates a minimum-sized chunk.
136 
137  The old unix realloc convention of allowing the last-free'd chunk
138  to be used as an argument to realloc is not supported.
139 */
140 
141 void* dlrealloc(void*, size_t);
142 
143 /*
144  memalign(size_t alignment, size_t n);
145  Returns a pointer to a newly allocated chunk of n bytes, aligned
146  in accord with the alignment argument.
147 
148  The alignment argument should be a power of two. If the argument is
149  not a power of two, the nearest greater power is used.
150  8-byte alignment is guaranteed by normal malloc calls, so don't
151  bother calling memalign with an argument of 8 or less.
152 
153  Overreliance on memalign is a sure way to fragment space.
154 */
155 void* dlmemalign(size_t, size_t);
156 
157 /*
158  valloc(size_t n);
159  Equivalent to memalign(pagesize, n), where pagesize is the page
160  size of the system. If the pagesize is unknown, 4096 is used.
161 */
162 void* dlvalloc(size_t);
163 
164 /*
165  mallopt(int parameter_number, int parameter_value)
166  Sets tunable parameters The format is to provide a
167  (parameter-number, parameter-value) pair. mallopt then sets the
168  corresponding parameter to the argument value if it can (i.e., so
169  long as the value is meaningful), and returns 1 if successful else
170  0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
171  normally defined in malloc.h. None of these are use in this malloc,
172  so setting them has no effect. But this malloc also supports other
173  options in mallopt:
174 
175  Symbol param # default allowed param values
176  M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
177  M_GRANULARITY -2 page size any power of 2 >= page size
178  M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
179 */
180 int dlmallopt(int, int);
181 
182 #define M_TRIM_THRESHOLD (-1)
183 #define M_GRANULARITY (-2)
184 #define M_MMAP_THRESHOLD (-3)
185 
186 
187 /*
188  malloc_footprint();
189  Returns the number of bytes obtained from the system. The total
190  number of bytes allocated by malloc, realloc etc., is less than this
191  value. Unlike mallinfo, this function returns only a precomputed
192  result, so can be called frequently to monitor memory consumption.
193  Even if locks are otherwise defined, this function does not use them,
194  so results might not be up to date.
195 */
196 size_t dlmalloc_footprint();
197 
198 #if !NO_MALLINFO
199 /*
200  mallinfo()
201  Returns (by copy) a struct containing various summary statistics:
202 
203  arena: current total non-mmapped bytes allocated from system
204  ordblks: the number of free chunks
205  smblks: always zero.
206  hblks: current number of mmapped regions
207  hblkhd: total bytes held in mmapped regions
208  usmblks: the maximum total allocated space. This will be greater
209  than current total if trimming has occurred.
210  fsmblks: always zero
211  uordblks: current total allocated space (normal or mmapped)
212  fordblks: total free space
213  keepcost: the maximum number of bytes that could ideally be released
214  back to system via malloc_trim. ("ideally" means that
215  it ignores page restrictions etc.)
216 
217  Because these fields are ints, but internal bookkeeping may
218  be kept as longs, the reported values may wrap around zero and
219  thus be inaccurate.
220 */
221 
222 struct mallinfo dlmallinfo(void);
223 #endif /* NO_MALLINFO */
224 
225 /*
226  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
227 
228  independent_calloc is similar to calloc, but instead of returning a
229  single cleared space, it returns an array of pointers to n_elements
230  independent elements that can hold contents of size elem_size, each
231  of which starts out cleared, and can be independently freed,
232  realloc'ed etc. The elements are guaranteed to be adjacently
233  allocated (this is not guaranteed to occur with multiple callocs or
234  mallocs), which may also improve cache locality in some
235  applications.
236 
237  The "chunks" argument is optional (i.e., may be null, which is
238  probably the most typical usage). If it is null, the returned array
239  is itself dynamically allocated and should also be freed when it is
240  no longer needed. Otherwise, the chunks array must be of at least
241  n_elements in length. It is filled in with the pointers to the
242  chunks.
243 
244  In either case, independent_calloc returns this pointer array, or
245  null if the allocation failed. If n_elements is zero and "chunks"
246  is null, it returns a chunk representing an array with zero elements
247  (which should be freed if not wanted).
248 
249  Each element must be individually freed when it is no longer
250  needed. If you'd like to instead be able to free all at once, you
251  should instead use regular calloc and assign pointers into this
252  space to represent elements. (In this case though, you cannot
253  independently free elements.)
254 
255  independent_calloc simplifies and speeds up implementations of many
256  kinds of pools. It may also be useful when constructing large data
257  structures that initially have a fixed number of fixed-sized nodes,
258  but the number is not known at compile time, and some of the nodes
259  may later need to be freed. For example:
260 
261  struct Node { int item; struct Node* next; };
262 
263  struct Node* build_list() {
264  struct Node** pool;
265  int n = read_number_of_nodes_needed();
266  if (n <= 0) return 0;
267  pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
268  if (pool == 0) die();
269  // organize into a linked list...
270  struct Node* first = pool[0];
271  for (i = 0; i < n-1; ++i)
272  pool[i]->next = pool[i+1];
273  free(pool); // Can now free the array (or not, if it is needed later)
274  return first;
275  }
276 */
277 void** dlindependent_calloc(size_t, size_t, void**);
278 
279 /*
280  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
281 
282  independent_comalloc allocates, all at once, a set of n_elements
283  chunks with sizes indicated in the "sizes" array. It returns
284  an array of pointers to these elements, each of which can be
285  independently freed, realloc'ed etc. The elements are guaranteed to
286  be adjacently allocated (this is not guaranteed to occur with
287  multiple callocs or mallocs), which may also improve cache locality
288  in some applications.
289 
290  The "chunks" argument is optional (i.e., may be null). If it is null
291  the returned array is itself dynamically allocated and should also
292  be freed when it is no longer needed. Otherwise, the chunks array
293  must be of at least n_elements in length. It is filled in with the
294  pointers to the chunks.
295 
296  In either case, independent_comalloc returns this pointer array, or
297  null if the allocation failed. If n_elements is zero and chunks is
298  null, it returns a chunk representing an array with zero elements
299  (which should be freed if not wanted).
300 
301  Each element must be individually freed when it is no longer
302  needed. If you'd like to instead be able to free all at once, you
303  should instead use a single regular malloc, and assign pointers at
304  particular offsets in the aggregate space. (In this case though, you
305  cannot independently free elements.)
306 
307  independent_comallac differs from independent_calloc in that each
308  element may have a different size, and also that it does not
309  automatically clear elements.
310 
311  independent_comalloc can be used to speed up allocation in cases
312  where several structs or objects must always be allocated at the
313  same time. For example:
314 
315  struct Head { ... }
316  struct Foot { ... }
317 
318  void send_message(char* msg) {
319  int msglen = strlen(msg);
320  size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
321  void* chunks[3];
322  if (independent_comalloc(3, sizes, chunks) == 0)
323  die();
324  struct Head* head = (struct Head*)(chunks[0]);
325  char* body = (char*)(chunks[1]);
326  struct Foot* foot = (struct Foot*)(chunks[2]);
327  // ...
328  }
329 
330  In general though, independent_comalloc is worth using only for
331  larger values of n_elements. For small values, you probably won't
332  detect enough difference from series of malloc calls to bother.
333 
334  Overuse of independent_comalloc can increase overall memory usage,
335  since it cannot reuse existing noncontiguous small chunks that
336  might be available for some of the elements.
337 */
338 void** dlindependent_comalloc(size_t, size_t*, void**);
339 
340 
341 /*
342  pvalloc(size_t n);
343  Equivalent to valloc(minimum-page-that-holds(n)), that is,
344  round up n to nearest pagesize.
345  */
346 void* dlpvalloc(size_t);
347 
348 /*
349  malloc_trim(size_t pad);
350 
351  If possible, gives memory back to the system (via negative arguments
352  to sbrk) if there is unused memory at the `high' end of the malloc
353  pool or in unused MMAP segments. You can call this after freeing
354  large blocks of memory to potentially reduce the system-level memory
355  requirements of a program. However, it cannot guarantee to reduce
356  memory. Under some allocation patterns, some large free blocks of
357  memory will be locked between two used chunks, so they cannot be
358  given back to the system.
359 
360  The `pad' argument to malloc_trim represents the amount of free
361  trailing space to leave untrimmed. If this argument is zero, only
362  the minimum amount of memory to maintain internal data structures
363  will be left. Non-zero arguments can be supplied to maintain enough
364  trailing space to service future expected allocations without having
365  to re-obtain memory from the system.
366 
367  Malloc_trim returns 1 if it actually released any memory, else 0.
368 */
369 int dlmalloc_trim(size_t);
370 
371 /*
372  malloc_stats();
373  Prints on stderr the amount of space obtained from the system (both
374  via sbrk and mmap), the maximum amount (which may be more than
375  current if malloc_trim and/or munmap got called), and the current
376  number of bytes allocated via malloc (or realloc, etc) but not yet
377  freed. Note that this is the number of bytes allocated, not the
378  number requested. It will be larger than the number requested
379  because of alignment and bookkeeping overhead. Because it includes
380  alignment wastage as being in use, this figure may be greater than
381  zero even when no user-level chunks are allocated.
382 
383  The reported current and maximum system memory can be inaccurate if
384  a program makes other calls to system memory allocation functions
385  (normally sbrk) outside of malloc.
386 
387  malloc_stats prints only the most commonly interesting statistics.
388  More information can be obtained by calling mallinfo.
389 */
390 void dlmalloc_stats();
391 
392 #endif /* !ONLY_MSPACES */
393 
394 /*
395  malloc_usable_size(void* p);
396 
397  Returns the number of bytes you can actually use in
398  an allocated chunk, which may be more than you requested (although
399  often not) due to alignment and minimum size constraints.
400  You can use this many bytes without worrying about
401  overwriting other allocated objects. This is not a particularly great
402  programming practice. malloc_usable_size can be more useful in
403  debugging and assertions, for example:
404 
405  p = malloc(n);
406  assert(malloc_usable_size(p) >= 256);
407 */
408 size_t dlmalloc_usable_size(void*);
409 
410 
411 #if MSPACES
412 
413 /*
414  mspace is an opaque type representing an independent
415  region of space that supports mspace_malloc, etc.
416 */
417 typedef void* mspace;
418 
419 /*
420  create_mspace creates and returns a new independent space with the
421  given initial capacity, or, if 0, the default granularity size. It
422  returns null if there is no system memory available to create the
423  space. If argument locked is non-zero, the space uses a separate
424  lock to control access. The capacity of the space will grow
425  dynamically as needed to service mspace_malloc requests. You can
426  control the sizes of incremental increases of this space by
427  compiling with a different DEFAULT_GRANULARITY or dynamically
428  setting with mallopt(M_GRANULARITY, value).
429 */
430 mspace create_mspace(size_t capacity, int locked);
431 
432 /*
433  destroy_mspace destroys the given space, and attempts to return all
434  of its memory back to the system, returning the total number of
435  bytes freed. After destruction, the results of access to all memory
436  used by the space become undefined.
437 */
438 size_t destroy_mspace(mspace msp);
439 
440 /*
441  create_mspace_with_base uses the memory supplied as the initial base
442  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
443  space is used for bookkeeping, so the capacity must be at least this
444  large. (Otherwise 0 is returned.) When this initial space is
445  exhausted, additional memory will be obtained from the system.
446  Destroying this space will deallocate all additionally allocated
447  space (if possible) but not the initial base.
448 */
449 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
450 
451 /*
452  mspace_track_large_chunks controls whether requests for large chunks
453  are allocated in their own untracked mmapped regions, separate from
454  others in this mspace. By default large chunks are not tracked,
455  which reduces fragmentation. However, such chunks are not
456  necessarily released to the system upon destroy_mspace. Enabling
457  tracking by setting to true may increase fragmentation, but avoids
458  leakage when relying on destroy_mspace to release all memory
459  allocated using this space. The function returns the previous
460  setting.
461 */
462 int mspace_track_large_chunks(mspace msp, int enable);
463 
464 /*
465  mspace_malloc behaves as malloc, but operates within
466  the given space.
467 */
468 void* mspace_malloc(mspace msp, size_t bytes);
469 
470 /*
471  mspace_free behaves as free, but operates within
472  the given space.
473 
474  If compiled with FOOTERS==1, mspace_free is not actually needed.
475  free may be called instead of mspace_free because freed chunks from
476  any space are handled by their originating spaces.
477 */
478 void mspace_free(mspace msp, void* mem);
479 
480 /*
481  mspace_realloc behaves as realloc, but operates within
482  the given space.
483 
484  If compiled with FOOTERS==1, mspace_realloc is not actually
485  needed. realloc may be called instead of mspace_realloc because
486  realloced chunks from any space are handled by their originating
487  spaces.
488 */
489 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
490 
491 /*
492  mspace_calloc behaves as calloc, but operates within
493  the given space.
494 */
495 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
496 
497 /*
498  mspace_memalign behaves as memalign, but operates within
499  the given space.
500 */
501 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
502 
503 /*
504  mspace_independent_calloc behaves as independent_calloc, but
505  operates within the given space.
506 */
507 void** mspace_independent_calloc(mspace msp, size_t n_elements,
508  size_t elem_size, void* chunks[]);
509 
510 /*
511  mspace_independent_comalloc behaves as independent_comalloc, but
512  operates within the given space.
513 */
514 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
515  size_t sizes[], void* chunks[]);
516 
517 /*
518  mspace_footprint() returns the number of bytes obtained from the
519  system for this space.
520 */
521 size_t mspace_footprint(mspace msp);
522 
523 
524 #if !NO_MALLINFO
525 /*
526  mspace_mallinfo behaves as mallinfo, but reports properties of
527  the given space.
528 */
529 struct mallinfo mspace_mallinfo(mspace msp);
530 #endif /* NO_MALLINFO */
531 
532 /*
533  malloc_usable_size(void* p) behaves the same as malloc_usable_size;
534 */
535  size_t mspace_usable_size(void* mem);
536 
537 /*
538  mspace_malloc_stats behaves as malloc_stats, but reports
539  properties of the given space.
540 */
541 void mspace_malloc_stats(mspace msp);
542 
543 /*
544  mspace_trim behaves as malloc_trim, but
545  operates within the given space.
546 */
547 int mspace_trim(mspace msp, size_t pad);
548 
549 /*
550  An alias for mallopt.
551 */
552 int mspace_mallopt(int, int);
553 
554 #endif /* MSPACES */
555 
556 #ifdef __cplusplus
557 }; /* end of extern "C" */
558 #endif
559 
560 #endif /* MALLOC_280_H */