]>
Commit | Line | Data |
---|---|---|
c906108c SS |
1 | /* Memory allocator `malloc'. |
2 | Copyright 1990, 1991, 1992 Free Software Foundation | |
3 | ||
4 | Written May 1989 by Mike Haertel. | |
5 | Heavily modified Mar 1992 by Fred Fish for mmap'd version. | |
6 | ||
7 | The GNU C Library is free software; you can redistribute it and/or | |
8 | modify it under the terms of the GNU Library General Public License as | |
9 | published by the Free Software Foundation; either version 2 of the | |
10 | License, or (at your option) any later version. | |
11 | ||
12 | The GNU C Library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | Library General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU Library General Public | |
18 | License along with the GNU C Library; see the file COPYING.LIB. If | |
19 | not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
20 | Boston, MA 02111-1307, USA. | |
21 | ||
22 | The author may be reached (Email) at the address [email protected], | |
23 | or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
24 | ||
25 | #include <string.h> /* Prototypes for memcpy, memmove, memset, etc */ | |
26 | ||
27 | #include "mmprivate.h" | |
28 | ||
29 | /* Prototypes for local functions */ | |
30 | ||
31 | static int initialize PARAMS ((struct mdesc *)); | |
32 | static PTR morecore PARAMS ((struct mdesc *, size_t)); | |
33 | static PTR align PARAMS ((struct mdesc *, size_t)); | |
34 | ||
35 | /* Aligned allocation. */ | |
36 | ||
37 | static PTR | |
38 | align (mdp, size) | |
39 | struct mdesc *mdp; | |
40 | size_t size; | |
41 | { | |
42 | PTR result; | |
43 | unsigned long int adj; | |
44 | ||
45 | result = mdp -> morecore (mdp, size); | |
46 | adj = RESIDUAL (result, BLOCKSIZE); | |
47 | if (adj != 0) | |
48 | { | |
49 | adj = BLOCKSIZE - adj; | |
50 | mdp -> morecore (mdp, adj); | |
51 | result = (char *) result + adj; | |
52 | } | |
53 | return (result); | |
54 | } | |
55 | ||
56 | /* Set everything up and remember that we have. */ | |
57 | ||
58 | static int | |
59 | initialize (mdp) | |
60 | struct mdesc *mdp; | |
61 | { | |
62 | mdp -> heapsize = HEAP / BLOCKSIZE; | |
63 | mdp -> heapinfo = (malloc_info *) | |
64 | align (mdp, mdp -> heapsize * sizeof (malloc_info)); | |
65 | if (mdp -> heapinfo == NULL) | |
66 | { | |
67 | return (0); | |
68 | } | |
69 | memset ((PTR)mdp -> heapinfo, 0, mdp -> heapsize * sizeof (malloc_info)); | |
70 | mdp -> heapinfo[0].free.size = 0; | |
71 | mdp -> heapinfo[0].free.next = mdp -> heapinfo[0].free.prev = 0; | |
72 | mdp -> heapindex = 0; | |
73 | mdp -> heapbase = (char *) mdp -> heapinfo; | |
74 | mdp -> flags |= MMALLOC_INITIALIZED; | |
75 | return (1); | |
76 | } | |
77 | ||
78 | /* Get neatly aligned memory, initializing or | |
79 | growing the heap info table as necessary. */ | |
80 | ||
81 | static PTR | |
82 | morecore (mdp, size) | |
83 | struct mdesc *mdp; | |
84 | size_t size; | |
85 | { | |
86 | PTR result; | |
87 | malloc_info *newinfo, *oldinfo; | |
88 | size_t newsize; | |
89 | ||
90 | result = align (mdp, size); | |
91 | if (result == NULL) | |
92 | { | |
93 | return (NULL); | |
94 | } | |
95 | ||
96 | /* Check if we need to grow the info table. */ | |
97 | if ((size_t) BLOCK ((char *) result + size) > mdp -> heapsize) | |
98 | { | |
99 | newsize = mdp -> heapsize; | |
100 | while ((size_t) BLOCK ((char *) result + size) > newsize) | |
101 | { | |
102 | newsize *= 2; | |
103 | } | |
104 | newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info)); | |
105 | if (newinfo == NULL) | |
106 | { | |
107 | mdp -> morecore (mdp, -size); | |
108 | return (NULL); | |
109 | } | |
110 | memset ((PTR) newinfo, 0, newsize * sizeof (malloc_info)); | |
111 | memcpy ((PTR) newinfo, (PTR) mdp -> heapinfo, | |
112 | mdp -> heapsize * sizeof (malloc_info)); | |
113 | oldinfo = mdp -> heapinfo; | |
114 | newinfo[BLOCK (oldinfo)].busy.type = 0; | |
115 | newinfo[BLOCK (oldinfo)].busy.info.size | |
116 | = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info)); | |
117 | mdp -> heapinfo = newinfo; | |
118 | __mmalloc_free (mdp, (PTR)oldinfo); | |
119 | mdp -> heapsize = newsize; | |
120 | } | |
121 | ||
122 | mdp -> heaplimit = BLOCK ((char *) result + size); | |
123 | return (result); | |
124 | } | |
125 | ||
126 | /* Allocate memory from the heap. */ | |
127 | ||
128 | PTR | |
129 | mmalloc (md, size) | |
130 | PTR md; | |
131 | size_t size; | |
132 | { | |
133 | struct mdesc *mdp; | |
134 | PTR result; | |
135 | size_t block, blocks, lastblocks, start; | |
136 | register size_t i; | |
137 | struct list *next; | |
138 | register size_t log; | |
139 | ||
140 | if (size == 0) | |
141 | { | |
142 | return (NULL); | |
143 | } | |
144 | ||
145 | mdp = MD_TO_MDP (md); | |
146 | ||
147 | if (mdp -> mmalloc_hook != NULL) | |
148 | { | |
149 | return ((*mdp -> mmalloc_hook) (md, size)); | |
150 | } | |
151 | ||
152 | if (!(mdp -> flags & MMALLOC_INITIALIZED)) | |
153 | { | |
154 | if (!initialize (mdp)) | |
155 | { | |
156 | return (NULL); | |
157 | } | |
158 | } | |
159 | ||
160 | if (size < sizeof (struct list)) | |
161 | { | |
162 | size = sizeof (struct list); | |
163 | } | |
164 | ||
165 | /* Determine the allocation policy based on the request size. */ | |
166 | if (size <= BLOCKSIZE / 2) | |
167 | { | |
168 | /* Small allocation to receive a fragment of a block. | |
169 | Determine the logarithm to base two of the fragment size. */ | |
170 | log = 1; | |
171 | --size; | |
172 | while ((size /= 2) != 0) | |
173 | { | |
174 | ++log; | |
175 | } | |
176 | ||
177 | /* Look in the fragment lists for a | |
178 | free fragment of the desired size. */ | |
179 | next = mdp -> fraghead[log].next; | |
180 | if (next != NULL) | |
181 | { | |
182 | /* There are free fragments of this size. | |
183 | Pop a fragment out of the fragment list and return it. | |
184 | Update the block's nfree and first counters. */ | |
185 | result = (PTR) next; | |
186 | next -> prev -> next = next -> next; | |
187 | if (next -> next != NULL) | |
188 | { | |
189 | next -> next -> prev = next -> prev; | |
190 | } | |
191 | block = BLOCK (result); | |
192 | if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0) | |
193 | { | |
194 | mdp -> heapinfo[block].busy.info.frag.first = | |
195 | RESIDUAL (next -> next, BLOCKSIZE) >> log; | |
196 | } | |
197 | ||
198 | /* Update the statistics. */ | |
199 | mdp -> heapstats.chunks_used++; | |
200 | mdp -> heapstats.bytes_used += 1 << log; | |
201 | mdp -> heapstats.chunks_free--; | |
202 | mdp -> heapstats.bytes_free -= 1 << log; | |
203 | } | |
204 | else | |
205 | { | |
206 | /* No free fragments of the desired size, so get a new block | |
207 | and break it into fragments, returning the first. */ | |
208 | result = mmalloc (md, BLOCKSIZE); | |
209 | if (result == NULL) | |
210 | { | |
211 | return (NULL); | |
212 | } | |
213 | ||
214 | /* Link all fragments but the first into the free list. */ | |
215 | for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) | |
216 | { | |
217 | next = (struct list *) ((char *) result + (i << log)); | |
218 | next -> next = mdp -> fraghead[log].next; | |
219 | next -> prev = &mdp -> fraghead[log]; | |
220 | next -> prev -> next = next; | |
221 | if (next -> next != NULL) | |
222 | { | |
223 | next -> next -> prev = next; | |
224 | } | |
225 | } | |
226 | ||
227 | /* Initialize the nfree and first counters for this block. */ | |
228 | block = BLOCK (result); | |
229 | mdp -> heapinfo[block].busy.type = log; | |
230 | mdp -> heapinfo[block].busy.info.frag.nfree = i - 1; | |
231 | mdp -> heapinfo[block].busy.info.frag.first = i - 1; | |
232 | ||
233 | mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; | |
234 | mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); | |
235 | mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); | |
236 | } | |
237 | } | |
238 | else | |
239 | { | |
240 | /* Large allocation to receive one or more blocks. | |
241 | Search the free list in a circle starting at the last place visited. | |
242 | If we loop completely around without finding a large enough | |
243 | space we will have to get more memory from the system. */ | |
244 | blocks = BLOCKIFY(size); | |
245 | start = block = MALLOC_SEARCH_START; | |
246 | while (mdp -> heapinfo[block].free.size < blocks) | |
247 | { | |
248 | block = mdp -> heapinfo[block].free.next; | |
249 | if (block == start) | |
250 | { | |
251 | /* Need to get more from the system. Check to see if | |
252 | the new core will be contiguous with the final free | |
253 | block; if so we don't need to get as much. */ | |
254 | block = mdp -> heapinfo[0].free.prev; | |
255 | lastblocks = mdp -> heapinfo[block].free.size; | |
256 | if (mdp -> heaplimit != 0 && | |
257 | block + lastblocks == mdp -> heaplimit && | |
258 | mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) && | |
259 | (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) | |
260 | { | |
261 | /* Which block we are extending (the `final free | |
262 | block' referred to above) might have changed, if | |
263 | it got combined with a freed info table. */ | |
264 | block = mdp -> heapinfo[0].free.prev; | |
265 | ||
266 | mdp -> heapinfo[block].free.size += (blocks - lastblocks); | |
267 | mdp -> heapstats.bytes_free += | |
268 | (blocks - lastblocks) * BLOCKSIZE; | |
269 | continue; | |
270 | } | |
271 | result = morecore(mdp, blocks * BLOCKSIZE); | |
272 | if (result == NULL) | |
273 | { | |
274 | return (NULL); | |
275 | } | |
276 | block = BLOCK (result); | |
277 | mdp -> heapinfo[block].busy.type = 0; | |
278 | mdp -> heapinfo[block].busy.info.size = blocks; | |
279 | mdp -> heapstats.chunks_used++; | |
280 | mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; | |
281 | return (result); | |
282 | } | |
283 | } | |
284 | ||
285 | /* At this point we have found a suitable free list entry. | |
286 | Figure out how to remove what we need from the list. */ | |
287 | result = ADDRESS(block); | |
288 | if (mdp -> heapinfo[block].free.size > blocks) | |
289 | { | |
290 | /* The block we found has a bit left over, | |
291 | so relink the tail end back into the free list. */ | |
292 | mdp -> heapinfo[block + blocks].free.size | |
293 | = mdp -> heapinfo[block].free.size - blocks; | |
294 | mdp -> heapinfo[block + blocks].free.next | |
295 | = mdp -> heapinfo[block].free.next; | |
296 | mdp -> heapinfo[block + blocks].free.prev | |
297 | = mdp -> heapinfo[block].free.prev; | |
298 | mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next | |
299 | = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev | |
300 | = mdp -> heapindex = block + blocks; | |
301 | } | |
302 | else | |
303 | { | |
304 | /* The block exactly matches our requirements, | |
305 | so just remove it from the list. */ | |
306 | mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev | |
307 | = mdp -> heapinfo[block].free.prev; | |
308 | mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next | |
309 | = mdp -> heapindex = mdp -> heapinfo[block].free.next; | |
310 | mdp -> heapstats.chunks_free--; | |
311 | } | |
312 | ||
313 | mdp -> heapinfo[block].busy.type = 0; | |
314 | mdp -> heapinfo[block].busy.info.size = blocks; | |
315 | mdp -> heapstats.chunks_used++; | |
316 | mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; | |
317 | mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; | |
318 | } | |
319 | ||
320 | return (result); | |
321 | } | |
322 | ||
323 | /* When using this package, provide a version of malloc/realloc/free built | |
324 | on top of it, so that if we use the default sbrk() region we will not | |
325 | collide with another malloc package trying to do the same thing, if | |
326 | the application contains any "hidden" calls to malloc/realloc/free (such | |
327 | as inside a system library). */ | |
328 | ||
329 | PTR | |
330 | malloc (size) | |
331 | size_t size; | |
332 | { | |
333 | PTR result; | |
334 | ||
335 | result = mmalloc ((PTR) NULL, size); | |
336 | return (result); | |
337 | } |