Commit | Line | Data |
---|---|---|
9640d887 FF |
1 | /* Memory allocator `malloc'. |
2 | Copyright 1990, 1991, 1992 Free Software Foundation | |
3 | ||
4 | Written May 1989 by Mike Haertel. | |
5 | Heavily modified Mar 1992 by Fred Fish for mmap'c version. | |
6 | ||
7 | The GNU C Library is free software; you can redistribute it and/or | |
8 | modify it under the terms of the GNU Library General Public License as | |
9 | published by the Free Software Foundation; either version 2 of the | |
10 | License, or (at your option) any later version. | |
11 | ||
12 | The GNU C Library is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | Library General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU Library General Public | |
18 | License along with the GNU C Library; see the file COPYING.LIB. If | |
19 | not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
20 | Cambridge, MA 02139, USA. | |
21 | ||
22 | The author may be reached (Email) at the address mike@ai.mit.edu, | |
23 | or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
24 | ||
25 | #include "mmalloc.h" | |
26 | ||
27 | /* Prototypes for local functions */ | |
28 | ||
29 | static int initialize PARAMS ((struct mdesc *)); | |
30 | static PTR morecore PARAMS ((struct mdesc *, size_t)); | |
31 | static PTR align PARAMS ((struct mdesc *, size_t)); | |
32 | ||
33 | /* Aligned allocation. */ | |
34 | ||
35 | static PTR | |
36 | align (mdp, size) | |
37 | struct mdesc *mdp; | |
38 | size_t size; | |
39 | { | |
40 | PTR result; | |
41 | unsigned long int adj; | |
42 | ||
43 | result = mdp -> morecore (mdp, size); | |
44 | adj = RESIDUAL (result, BLOCKSIZE); | |
45 | if (adj != 0) | |
46 | { | |
47 | adj = BLOCKSIZE - adj; | |
48 | (void) mdp -> morecore (mdp, adj); | |
49 | result = (char *) result + adj; | |
50 | } | |
51 | return (result); | |
52 | } | |
53 | ||
54 | /* Set everything up and remember that we have. */ | |
55 | ||
56 | static int | |
57 | initialize (mdp) | |
58 | struct mdesc *mdp; | |
59 | { | |
60 | mdp -> heapsize = HEAP / BLOCKSIZE; | |
61 | mdp -> heapinfo = (malloc_info *) | |
62 | align (mdp, mdp -> heapsize * sizeof (malloc_info)); | |
63 | if (mdp -> heapinfo == NULL) | |
64 | { | |
65 | return (0); | |
66 | } | |
67 | memset ((PTR)mdp -> heapinfo, 0, mdp -> heapsize * sizeof (malloc_info)); | |
68 | mdp -> heapinfo[0].free.size = 0; | |
69 | mdp -> heapinfo[0].free.next = mdp -> heapinfo[0].free.prev = 0; | |
70 | mdp -> heapindex = 0; | |
71 | mdp -> heapbase = (char *) mdp -> heapinfo; | |
72 | mdp -> flags |= MMALLOC_INITIALIZED; | |
73 | return (1); | |
74 | } | |
75 | ||
76 | /* Get neatly aligned memory, initializing or | |
77 | growing the heap info table as necessary. */ | |
78 | ||
79 | static PTR | |
80 | morecore (mdp, size) | |
81 | struct mdesc *mdp; | |
82 | size_t size; | |
83 | { | |
84 | PTR result; | |
85 | malloc_info *newinfo, *oldinfo; | |
86 | size_t newsize; | |
87 | ||
88 | result = align (mdp, size); | |
89 | if (result == NULL) | |
90 | { | |
91 | return (NULL); | |
92 | } | |
93 | ||
94 | /* Check if we need to grow the info table. */ | |
95 | if ((size_t) BLOCK ((char *) result + size) > mdp -> heapsize) | |
96 | { | |
97 | newsize = mdp -> heapsize; | |
98 | while ((size_t) BLOCK ((char *) result + size) > newsize) | |
99 | { | |
100 | newsize *= 2; | |
101 | } | |
102 | newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info)); | |
103 | if (newinfo == NULL) | |
104 | { | |
105 | mdp -> morecore (mdp, -size); | |
106 | return (NULL); | |
107 | } | |
108 | memset ((PTR)newinfo, 0, newsize * sizeof (malloc_info)); | |
109 | memcpy ((PTR)newinfo, (PTR)mdp -> heapinfo, | |
110 | mdp -> heapsize * sizeof (malloc_info)); | |
111 | oldinfo = mdp -> heapinfo; | |
112 | newinfo[BLOCK (oldinfo)].busy.type = 0; | |
113 | newinfo[BLOCK (oldinfo)].busy.info.size | |
114 | = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info)); | |
115 | mdp -> heapinfo = newinfo; | |
116 | __mmalloc_free (mdp, (PTR)oldinfo); | |
117 | mdp -> heapsize = newsize; | |
118 | } | |
119 | ||
120 | mdp -> heaplimit = BLOCK ((char *) result + size); | |
121 | return (result); | |
122 | } | |
123 | ||
124 | /* Allocate memory from the heap. */ | |
125 | ||
126 | PTR | |
127 | mmalloc (md, size) | |
128 | PTR md; | |
129 | size_t size; | |
130 | { | |
131 | struct mdesc *mdp; | |
132 | PTR result; | |
133 | size_t block, blocks, lastblocks, start; | |
134 | register size_t i; | |
135 | struct list *next; | |
136 | register size_t log; | |
137 | ||
138 | if (size == 0) | |
139 | { | |
140 | return (NULL); | |
141 | } | |
142 | ||
143 | mdp = MD_TO_MDP (md); | |
144 | ||
145 | if (mdp -> mmalloc_hook != NULL) | |
146 | { | |
147 | return ((*mdp -> mmalloc_hook) (md, size)); | |
148 | } | |
149 | ||
150 | if (!(mdp -> flags & MMALLOC_INITIALIZED)) | |
151 | { | |
152 | if (!initialize (mdp)) | |
153 | { | |
154 | return (NULL); | |
155 | } | |
156 | } | |
157 | ||
158 | if (size < sizeof (struct list)) | |
159 | { | |
160 | size = sizeof (struct list); | |
161 | } | |
162 | ||
163 | /* Determine the allocation policy based on the request size. */ | |
164 | if (size <= BLOCKSIZE / 2) | |
165 | { | |
166 | /* Small allocation to receive a fragment of a block. | |
167 | Determine the logarithm to base two of the fragment size. */ | |
168 | log = 1; | |
169 | --size; | |
170 | while ((size /= 2) != 0) | |
171 | { | |
172 | ++log; | |
173 | } | |
174 | ||
175 | /* Look in the fragment lists for a | |
176 | free fragment of the desired size. */ | |
177 | next = mdp -> fraghead[log].next; | |
178 | if (next != NULL) | |
179 | { | |
180 | /* There are free fragments of this size. | |
181 | Pop a fragment out of the fragment list and return it. | |
182 | Update the block's nfree and first counters. */ | |
183 | result = (PTR) next; | |
184 | next -> prev -> next = next -> next; | |
185 | if (next -> next != NULL) | |
186 | { | |
187 | next -> next -> prev = next -> prev; | |
188 | } | |
189 | block = BLOCK (result); | |
190 | if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0) | |
191 | { | |
192 | mdp -> heapinfo[block].busy.info.frag.first = | |
193 | RESIDUAL (next -> next, BLOCKSIZE) >> log; | |
194 | } | |
195 | ||
196 | /* Update the statistics. */ | |
197 | mdp -> heapstats.chunks_used++; | |
198 | mdp -> heapstats.bytes_used += 1 << log; | |
199 | mdp -> heapstats.chunks_free--; | |
200 | mdp -> heapstats.bytes_free -= 1 << log; | |
201 | } | |
202 | else | |
203 | { | |
204 | /* No free fragments of the desired size, so get a new block | |
205 | and break it into fragments, returning the first. */ | |
206 | result = mmalloc (md, BLOCKSIZE); | |
207 | if (result == NULL) | |
208 | { | |
209 | return (NULL); | |
210 | } | |
211 | ||
212 | /* Link all fragments but the first into the free list. */ | |
213 | for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) | |
214 | { | |
215 | next = (struct list *) ((char *) result + (i << log)); | |
216 | next -> next = mdp -> fraghead[log].next; | |
217 | next -> prev = &mdp -> fraghead[log]; | |
218 | next -> prev -> next = next; | |
219 | if (next -> next != NULL) | |
220 | { | |
221 | next -> next -> prev = next; | |
222 | } | |
223 | } | |
224 | ||
225 | /* Initialize the nfree and first counters for this block. */ | |
226 | block = BLOCK (result); | |
227 | mdp -> heapinfo[block].busy.type = log; | |
228 | mdp -> heapinfo[block].busy.info.frag.nfree = i - 1; | |
229 | mdp -> heapinfo[block].busy.info.frag.first = i - 1; | |
230 | ||
231 | mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; | |
232 | mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); | |
233 | mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); | |
234 | } | |
235 | } | |
236 | else | |
237 | { | |
238 | /* Large allocation to receive one or more blocks. | |
239 | Search the free list in a circle starting at the last place visited. | |
240 | If we loop completely around without finding a large enough | |
241 | space we will have to get more memory from the system. */ | |
242 | blocks = BLOCKIFY(size); | |
243 | start = block = MALLOC_SEARCH_START; | |
244 | while (mdp -> heapinfo[block].free.size < blocks) | |
245 | { | |
246 | block = mdp -> heapinfo[block].free.next; | |
247 | if (block == start) | |
248 | { | |
249 | /* Need to get more from the system. Check to see if | |
250 | the new core will be contiguous with the final free | |
251 | block; if so we don't need to get as much. */ | |
252 | block = mdp -> heapinfo[0].free.prev; | |
253 | lastblocks = mdp -> heapinfo[block].free.size; | |
254 | if (mdp -> heaplimit != 0 && | |
255 | block + lastblocks == mdp -> heaplimit && | |
256 | mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) && | |
257 | (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) | |
258 | { | |
259 | mdp -> heapinfo[block].free.size = blocks; | |
260 | mdp -> heapstats.bytes_free += | |
261 | (blocks - lastblocks) * BLOCKSIZE; | |
262 | continue; | |
263 | } | |
264 | result = morecore(mdp, blocks * BLOCKSIZE); | |
265 | if (result == NULL) | |
266 | { | |
267 | return (NULL); | |
268 | } | |
269 | block = BLOCK (result); | |
270 | mdp -> heapinfo[block].busy.type = 0; | |
271 | mdp -> heapinfo[block].busy.info.size = blocks; | |
272 | mdp -> heapstats.chunks_used++; | |
273 | mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; | |
274 | return (result); | |
275 | } | |
276 | } | |
277 | ||
278 | /* At this point we have found a suitable free list entry. | |
279 | Figure out how to remove what we need from the list. */ | |
280 | result = ADDRESS(block); | |
281 | if (mdp -> heapinfo[block].free.size > blocks) | |
282 | { | |
283 | /* The block we found has a bit left over, | |
284 | so relink the tail end back into the free list. */ | |
285 | mdp -> heapinfo[block + blocks].free.size | |
286 | = mdp -> heapinfo[block].free.size - blocks; | |
287 | mdp -> heapinfo[block + blocks].free.next | |
288 | = mdp -> heapinfo[block].free.next; | |
289 | mdp -> heapinfo[block + blocks].free.prev | |
290 | = mdp -> heapinfo[block].free.prev; | |
291 | mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next | |
292 | = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev | |
293 | = mdp -> heapindex = block + blocks; | |
294 | } | |
295 | else | |
296 | { | |
297 | /* The block exactly matches our requirements, | |
298 | so just remove it from the list. */ | |
299 | mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev | |
300 | = mdp -> heapinfo[block].free.prev; | |
301 | mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next | |
302 | = mdp -> heapindex = mdp -> heapinfo[block].free.next; | |
303 | mdp -> heapstats.chunks_free--; | |
304 | } | |
305 | ||
306 | mdp -> heapinfo[block].busy.type = 0; | |
307 | mdp -> heapinfo[block].busy.info.size = blocks; | |
308 | mdp -> heapstats.chunks_used++; | |
309 | mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; | |
310 | mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; | |
311 | } | |
312 | ||
313 | return (result); | |
314 | } | |
315 | ||
316 | /* When using this package, provide a version of malloc/realloc/free built | |
317 | on top of it, so that if we use the default sbrk() region we will not | |
318 | collide with another malloc package trying to do the same thing, if | |
319 | the application contains any "hidden" calls to malloc/realloc/free (such | |
320 | as inside a system library). */ | |
321 | ||
322 | PTR | |
323 | malloc (size) | |
324 | size_t size; | |
325 | { | |
326 | return (mmalloc ((void *) NULL, size)); | |
327 | } |