Thanks to Martin Fong and others for supplying this.
*/
-
#ifdef WIN32
#define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
}
-
void* wsbrk (long size)
{
void* tmp;
#endif
-
-
/*
Type declarations
*/
-
struct malloc_chunk
{
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
An allocated chunk looks like this:
-
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of previous chunk, if allocated | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Size of chunk |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
Where "chunk" is the front of the chunk for the purpose of most of
the malloc code, but "mem" is the pointer that is returned to the
user. "Nextchunk" is the beginning of the next contiguous chunk.
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
-
-
-
/*
Physical chunk operations
*/
-
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
#define PREV_INUSE 0x1
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
-
/* Ptr to next physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
#define prev_chunk(p)\
((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
-
/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
-
-
-
/*
Dealing with use bits
*/
#define clear_inuse_bit_at_offset(p, s)\
(((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
-
-
-
/*
Dealing with size fields
*/
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
-
-
-
-
/*
Bins
#define top (av_[2]) /* The topmost chunk */
#define last_remainder (bin_at(1)) /* remainder from last split */
-
/*
Because top initially points to its own bin with initial
zero size, thus forcing extension on the first malloc request,
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
-
-
/*
To help compensate for the large number of bins, a one-level index
structure is used for bin-by-bin searching. `binblocks' is a
#define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
#define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
-
-
-
-
/* Other static bookkeeping data */
/* variables holding tunable values */
#ifdef DEBUG
-
/*
These routines make a number of assertions about the states
of data structures that should be true at all times. If any
}
-
#if __STD_C
static void do_check_free_chunk(mchunkptr p)
#else
/* ... and alignment */
assert(aligned_OK(chunk2mem(p)));
-
/* ... and was allocated at front of an available chunk */
assert(prev_inuse(p));
}
-
#define check_free_chunk(P) do_check_free_chunk(P)
#define check_inuse_chunk(P) do_check_inuse_chunk(P)
#define check_chunk(P) do_check_chunk(P)
#define check_malloced_chunk(P,N)
#endif
-
-
/*
Macro-based internal utilities
*/
-
/*
Linking chunks in bin lists.
Call these only with variables, not arbitrary expressions, as arguments.
putting it ahead of others of same size.
*/
-
#define frontlink(P, S, IDX, BK, FD) \
{ \
if (S < MAX_SMALLBIN_SIZE) \
} \
}
-
/* take a chunk off a list */
#define unlink(P, BK, FD) \
#define clear_last_remainder \
(last_remainder->fd = last_remainder->bk = last_remainder)
-
-
-
-
/* Routines dealing with mmap(). */
#if HAVE_MMAP
assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
}
-
-
-
/* Main public routines */
-
/*
Malloc Algorthim:
contiguous memory. Thus, it should be safe to intersperse
mallocs with other sbrk calls.
-
All allocations are made from the the `lowest' part of any found
chunk. (The implementation invariant is that prev_inuse is
always true of any allocated chunk; i.e., that each allocated
}
}
-
/* Try to use top chunk */
/* Require that there be a remainder, ensuring top always exists */
}
-
-
-
/*
free() algorithm :
*/
-
STATIC_IF_MCHECK
#if __STD_C
void fREe_impl(Void_t* mem)
unlink(next, bck, fwd);
}
-
set_head(p, sz | PREV_INUSE);
set_foot(p, sz);
if (!islr)
frontlink(p, sz, idx, bck, fwd);
}
-
-
-
-
/*
Realloc algorithm:
and allowing it would also allow too many other incorrect
usages of realloc to be sensible.
-
*/
-
STATIC_IF_MCHECK
#if __STD_C
Void_t* rEALLOc_impl(Void_t* oldmem, size_t bytes)
newp = oldp = mem2chunk(oldmem);
newsize = oldsize = chunksize(oldp);
-
nb = request2size(bytes);
#if HAVE_MMAP
VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
}
-
split: /* split off extra room in old or expanded chunk */
if (newsize - nb >= MINSIZE) /* split off remainder */
return chunk2mem(newp);
}
-
-
-
/*
memalign algorithm:
*/
-
STATIC_IF_MCHECK
#if __STD_C
Void_t* mEMALIGn_impl(size_t alignment, size_t bytes)
}
-
-
-
/*
valloc just invokes memalign with alignment argument equal
to the page size of the system (or as near to this as can
that will accommodate request
*/
-
#if __STD_C
Void_t* pvALLOc(size_t bytes)
#else
INTERNAL_SIZE_T sz = n * elem_size;
-
/* check if expand_top called, in which case don't need to clear */
#if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
#if MORECORE_CLEARS
/* Two optional cases in which clearing not necessary */
-
#if HAVE_MMAP
if (chunk_is_mmapped(p)) return mem;
#endif
}
#endif
-
#ifdef MCHECK_HEAP_PROTECTION
#include "mcheck_core.inc.h"
#if !__STD_C
// mcheck API }
#endif
-
/*
Malloc_trim gives memory back to the system (via negative
}
}
-
-
/*
malloc_usable_size:
}
}
-
-
-
/* Utility to update current_mallinfo for malloc_stats and mallinfo() */
#ifdef DEBUG
}
#endif /* DEBUG */
-
-
/*
malloc_stats:
}
#endif /* DEBUG */
-
-
-
/*
mallopt: