1 /* Synched up with: Not synched up with FSF 19.28, even though I
4 /* Get the configuration files if we're being compiled for Emacs. */
7 # include "getpagesize.h"
13 #if defined (__STDC__) && !defined (STDC_HEADERS)
14 /* The ANSI standard says that defining __STDC__ to a non-zero value means
15 that the compiler conforms to that standard. The standard requires
16 certain header files and library functions to be present. Therefore,
17 if your compiler defines __STDC__ to non-0 but does not have ANSI headers
18 and the ANSI library routines, then your compiler is buggy. Conversely,
19 an ANSI-conforming environment (which has both the ANSI headers and
20 library routines, i.e., stdlib.h and `memmove') does not necessarily
21 define the STDC_HEADERS flag. Lucid Emacs requires an ANSI compiler.
22 Therefore, there is no need to consult the abominable STDC_HEADERS flag.
31 /* DO NOT EDIT THIS FILE -- it is automagically generated. -*- C -*- */
32 /* Bwaa-haa-haa! Not a chance that this is actually true! */
34 #define _MALLOC_INTERNAL
36 /* The malloc headers and source files from the C library follow here. */
38 /* Declarations for `malloc' and friends.
39 Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
40 Written May 1989 by Mike Haertel.
42 This library is free software; you can redistribute it and/or
43 modify it under the terms of the GNU Library General Public License as
44 published by the Free Software Foundation; either version 2 of the
45 License, or (at your option) any later version.
47 This library is distributed in the hope that it will be useful,
48 but WITHOUT ANY WARRANTY; without even the implied warranty of
49 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
50 Library General Public License for more details.
52 You should have received a copy of the GNU General Public License
53 along with this library; see the file COPYING. If not, write to
54 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
55 Boston, MA 02111-1307, USA.
57 The author may be reached (Email) at the address mike@ai.mit.edu,
58 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
64 #ifdef _MALLOC_INTERNAL
77 #endif /* _MALLOC_INTERNAL. */
86 #define __P(args) args
88 #define __ptr_t void *
91 #define __malloc_size_t size_t
97 /* XEmacs: I thought this should be int under SunOS, but that
98 apparently fails. Curses on all this shit. */
99 #define __free_ret_t void
101 /* XEmacs: I tried commenting these out and including stdlib.h,
102 but that fails badly. Urk! This sucks. */
103 /* Allocate SIZE bytes of memory. */
104 extern __ptr_t malloc __P ((size_t __size));
105 /* Re-allocate the previously allocated block
106 in __ptr_t, making the new block SIZE bytes long. */
107 extern __ptr_t realloc __P ((__ptr_t __ptr, size_t __size));
108 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
109 extern __ptr_t calloc __P ((size_t __nmemb, size_t __size));
110 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
111 extern __free_ret_t free __P ((__ptr_t __ptr));
113 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
114 extern __ptr_t memalign __P ((size_t __alignment, size_t __size));
116 /* Allocate SIZE bytes on a page boundary. */
117 extern __ptr_t valloc __P ((size_t __size));
120 #ifdef _MALLOC_INTERNAL
122 /* The allocator divides the heap into blocks of fixed size; large
123 requests receive one or more whole blocks, and small requests
124 receive a fragment of a block. Fragment sizes are powers of two,
125 and all fragments of a block are the same size. When all the
126 fragments in a block have been freed, the block itself is freed. */
127 #define INT_BIT (CHAR_BIT * sizeof(int))
128 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
129 #define BLOCKSIZE (1 << BLOCKLOG)
130 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
132 /* Determine the amount of memory spanned by the initial heap table
133 (not an absolute limit). */
134 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
136 /* Number of contiguous free blocks allowed to build up at the end of
137 memory before they will be returned to the system. */
138 #define FINAL_FREE_BLOCKS 8
140 /* Data structure giving per-block information. */
143 /* Heap information for a busy block. */
146 /* Zero for a large block, or positive giving the
147 logarithm to the base two of the fragment size. */
153 __malloc_size_t nfree; /* Free frags in a fragmented block. */
154 __malloc_size_t first; /* First free fragment of the block. */
156 /* Size (in blocks) of a large cluster. */
157 __malloc_size_t size;
160 /* Heap information for a free block
161 (that may be the first of a free cluster). */
164 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
165 __malloc_size_t next; /* Index of next free cluster. */
166 __malloc_size_t prev; /* Index of previous free cluster. */
170 /* Pointer to first block of the heap. */
171 extern char *_heapbase;
173 /* Table indexed by block number giving per-block information. */
174 extern malloc_info *_heapinfo;
176 /* Address to block number and vice versa. */
177 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
178 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
180 /* Current search index for the heap table. */
181 extern __malloc_size_t _heapindex;
183 /* Limit of valid info table indices. */
184 extern __malloc_size_t _heaplimit;
186 /* Doubly linked lists of free fragments. */
193 /* Free list headers for each fragment size. */
194 extern struct list _fraghead[];
196 /* List of blocks allocated with `memalign' (or `valloc'). */
199 struct alignlist *next;
200 __ptr_t aligned; /* The address that memaligned returned. */
201 __ptr_t exact; /* The address that malloc returned. */
203 extern struct alignlist *_aligned_blocks;
205 /* Instrumentation. */
206 extern __malloc_size_t _chunks_used;
207 extern __malloc_size_t _bytes_used;
208 extern __malloc_size_t _chunks_free;
209 extern __malloc_size_t _bytes_free;
211 /* Internal version of `free' used in `morecore' (malloc.c). */
212 extern void _free_internal __P ((__ptr_t __ptr));
214 #endif /* _MALLOC_INTERNAL. */
216 /* Underlying allocation function; successive calls should
217 return contiguous pieces of memory. */
218 extern __ptr_t (*__morecore) __P ((ptrdiff_t __size));
220 /* Default value of `__morecore'. */
221 extern __ptr_t __default_morecore __P ((ptrdiff_t __size));
223 /* If not NULL, this function is called after each time
224 `__morecore' is called to increase the data size. */
225 extern void (*__after_morecore_hook) __P ((void));
227 /* Nonzero if `malloc' has been called and done its initialization. */
228 /* extern int __malloc_initialized; */
230 /* Hooks for debugging versions. */
231 extern void (*__free_hook) __P ((__ptr_t __ptr));
232 extern __ptr_t (*__malloc_hook) __P ((size_t __size));
233 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size));
235 /* Return values for `mprobe': these are the kinds of inconsistencies that
236 `mcheck' enables detection of. */
239 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
240 MCHECK_OK, /* Block is fine. */
241 MCHECK_FREE, /* Block freed twice. */
242 MCHECK_HEAD, /* Memory before the block was clobbered. */
243 MCHECK_TAIL /* Memory after the block was clobbered. */
246 /* Activate a standard collection of debugging hooks. This must be called
247 before `malloc' is ever called. ABORTFUNC is called with an error code
248 (see enum above) when an inconsistency is detected. If ABORTFUNC is
249 null, the standard function prints on stderr and then calls `abort'. */
250 extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
252 /* Check for aberrations in a particular malloc'd block. You must have
253 called `mcheck' already. These are the same checks that `mcheck' does
254 when you free or reallocate a block. */
255 extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
257 /* Activate a standard collection of tracing hooks. */
258 extern void mtrace __P ((void));
259 extern void muntrace __P ((void));
261 /* Statistics available to the user. */
264 __malloc_size_t bytes_total; /* Total size of the heap. */
265 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
266 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
267 __malloc_size_t chunks_free; /* Chunks in the free list. */
268 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
271 /* Pick up the current statistics. */
272 extern struct mstats mstats __P ((void));
274 /* Call WARNFUN with a warning message when memory usage is high. */
275 extern void memory_warnings __P ((__ptr_t __start,
276 void (*__warnfun) __P ((__const char *))));
279 #if 0 /* unused in this file, and conflicting prototypes anyway */
280 /* Relocating allocator. */
282 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
283 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, size_t __size));
285 /* Free the storage allocated in HANDLEPTR. */
286 extern void r_alloc_free __P ((__ptr_t *__handleptr));
288 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
289 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, size_t __size));
296 #endif /* malloc.h */
297 /* Allocate memory on a page boundary.
298 Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
300 This library is free software; you can redistribute it and/or
301 modify it under the terms of the GNU Library General Public License as
302 published by the Free Software Foundation; either version 2 of the
303 License, or (at your option) any later version.
305 This library is distributed in the hope that it will be useful,
306 but WITHOUT ANY WARRANTY; without even the implied warranty of
307 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
308 Library General Public License for more details.
310 You should have received a copy of the GNU General Public License
311 along with this library; see the file COPYING. If not, write to
312 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
313 Boston, MA 02111-1307, USA.
315 The author may be reached (Email) at the address mike@ai.mit.edu,
316 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
318 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
320 #include <sys/cdefs.h>
321 #if ! (defined (__GLIBC__) && (__GLIBC__ >= 2))
322 extern size_t __getpagesize __P ((void));
325 #include "getpagesize.h"
326 #define __getpagesize() getpagesize()
329 #ifndef _MALLOC_INTERNAL
330 #define _MALLOC_INTERNAL
334 static __malloc_size_t pagesize;
337 valloc (__malloc_size_t size)
340 pagesize = __getpagesize ();
342 return memalign (pagesize, size);
344 /* Memory allocator `malloc'.
345 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation
346 Written May 1989 by Mike Haertel.
348 This library is free software; you can redistribute it and/or
349 modify it under the terms of the GNU Library General Public License as
350 published by the Free Software Foundation; either version 2 of the
351 License, or (at your option) any later version.
353 This library is distributed in the hope that it will be useful,
354 but WITHOUT ANY WARRANTY; without even the implied warranty of
355 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
356 Library General Public License for more details.
358 You should have received a copy of the GNU General Public License
359 along with this library; see the file COPYING. If not, write to
360 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
361 Boston, MA 02111-1307, USA.
363 The author may be reached (Email) at the address mike@ai.mit.edu,
364 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
366 #ifndef _MALLOC_INTERNAL
367 #define _MALLOC_INTERNAL
371 /* How to really get more memory. */
373 /* once dumped, free() & realloc() on static heap space will fail */
374 #define PURE_DATA(x) \
375 ((static_heap_dumped && (char*)x >= static_heap_base \
376 && (char*)x <= (static_heap_base + static_heap_size) ) ? 1 : 0)
377 extern int initialized;
378 extern int purify_flag;
379 extern char* static_heap_base;
380 extern char* static_heap_ptr;
381 extern char* static_heap_dumped;
382 extern unsigned long static_heap_size;
383 extern __ptr_t more_static_core __P ((ptrdiff_t __size));
384 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = more_static_core;
386 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
387 #define PURE_DATA(x) 0
390 /* Debugging hook for `malloc'. */
391 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
393 /* Pointer to the base of the first block. */
396 /* Block information table. Allocated with align/__free (not malloc/free). */
397 malloc_info *_heapinfo;
399 /* Number of info entries. */
400 static __malloc_size_t heapsize;
402 /* Search index in the info table. */
403 __malloc_size_t _heapindex;
405 /* Limit of valid info table indices. */
406 __malloc_size_t _heaplimit;
408 /* Free lists for each fragment size. */
409 struct list _fraghead[BLOCKLOG];
411 /* Instrumentation. */
412 __malloc_size_t _chunks_used;
413 __malloc_size_t _bytes_used;
414 __malloc_size_t _chunks_free;
415 __malloc_size_t _bytes_free;
417 /* Are you experienced? */
418 int __malloc_initialized;
420 void (*__after_morecore_hook) __P ((void));
422 /* Aligned allocation. */
423 static __ptr_t align __P ((__malloc_size_t));
425 align (__malloc_size_t size)
428 unsigned long int adj;
430 result = (*__morecore) (size);
431 adj = (unsigned long int) ((unsigned long int) ((char *) result -
432 (char *) NULL)) % BLOCKSIZE;
435 adj = BLOCKSIZE - adj;
436 (void) (*__morecore) (adj);
437 result = (char *) result + adj;
440 if (__after_morecore_hook)
441 (*__after_morecore_hook) ();
446 /* Set everything up and remember that we have. */
447 static int initialize __P ((void));
452 if (static_heap_dumped && __morecore == more_static_core)
454 __morecore = __default_morecore;
457 heapsize = HEAP / BLOCKSIZE;
458 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
459 if (_heapinfo == NULL)
461 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
462 memset (_fraghead, 0, BLOCKLOG * sizeof (struct list));
463 _heapinfo[0].free.size = 0;
464 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
467 _heapbase = (char *) _heapinfo;
469 /* Account for the _heapinfo block itself in the statistics. */
470 _bytes_used = heapsize * sizeof (malloc_info);
476 __malloc_initialized = 1;
480 /* Get neatly aligned memory, initializing or
481 growing the heap info table as necessary. */
482 static __ptr_t morecore __P ((__malloc_size_t));
484 morecore (__malloc_size_t size)
487 malloc_info *newinfo, *oldinfo;
488 __malloc_size_t newsize;
490 result = align (size);
494 /* Check if we need to grow the info table. */
495 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
498 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize)
500 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
503 (*__morecore) (-(int)size);
506 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
507 memset (&newinfo[heapsize], 0,
508 (newsize - heapsize) * sizeof (malloc_info));
510 newinfo[BLOCK (oldinfo)].busy.type = 0;
511 newinfo[BLOCK (oldinfo)].busy.info.size
512 = BLOCKIFY (heapsize * sizeof (malloc_info));
514 /* Account for the _heapinfo block itself in the statistics. */
515 _bytes_used += newsize * sizeof (malloc_info);
517 _free_internal (oldinfo);
521 _heaplimit = BLOCK ((char *) result + size);
525 /* Allocate memory from the heap. */
527 malloc (__malloc_size_t size)
530 __malloc_size_t block, blocks, lastblocks, start;
534 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
535 valid address you can realloc and free (though not dereference).
537 It turns out that some extant code (sunrpc, at least Ultrix's version)
538 expects `malloc (0)' to return non-NULL and breaks otherwise.
541 #ifdef HAVE_X_WINDOWS
542 /* there is at least one Xt bug where calloc(n,x) is blindly called
543 where n can be 0, and yet if 0 is returned, Xt barfs */
545 size = sizeof (struct list);
551 if (__malloc_hook != NULL)
552 return (*__malloc_hook) (size);
554 if (!__malloc_initialized)
558 #ifdef SUNOS_LOCALTIME_BUG
559 /* Workaround for localtime() allocating 8 bytes and writing 9 bug... */
564 if (size < sizeof (struct list))
565 size = sizeof (struct list);
567 /* Determine the allocation policy based on the request size. */
568 if (size <= BLOCKSIZE / 2)
570 /* Small allocation to receive a fragment of a block.
571 Determine the logarithm to base two of the fragment size. */
572 __malloc_size_t log = 1;
574 while ((size /= 2) != 0)
577 /* Look in the fragment lists for a
578 free fragment of the desired size. */
579 next = _fraghead[log].next;
582 /* There are free fragments of this size.
583 Pop a fragment out of the fragment list and return it.
584 Update the block's nfree and first counters. */
585 result = (__ptr_t) next;
586 next->prev->next = next->next;
587 if (next->next != NULL)
588 next->next->prev = next->prev;
589 block = BLOCK (result);
590 if (--_heapinfo[block].busy.info.frag.nfree != 0)
591 _heapinfo[block].busy.info.frag.first = (unsigned long int)
592 ((unsigned long int) ((char *) next->next - (char *) NULL)
595 /* Update the statistics. */
597 _bytes_used += 1 << log;
599 _bytes_free -= 1 << log;
603 /* No free fragments of the desired size, so get a new block
604 and break it into fragments, returning the first. */
605 result = malloc (BLOCKSIZE);
609 /* Link all fragments but the first into the free list. */
610 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
612 next = (struct list *) ((char *) result + (i << log));
613 next->next = _fraghead[log].next;
614 next->prev = &_fraghead[log];
615 next->prev->next = next;
616 if (next->next != NULL)
617 next->next->prev = next;
620 /* Initialize the nfree and first counters for this block. */
621 block = BLOCK (result);
622 _heapinfo[block].busy.type = log;
623 _heapinfo[block].busy.info.frag.nfree = i - 1;
624 _heapinfo[block].busy.info.frag.first = i - 1;
626 _chunks_free += (BLOCKSIZE >> log) - 1;
627 _bytes_free += BLOCKSIZE - (1 << log);
628 _bytes_used -= BLOCKSIZE - (1 << log);
633 /* Large allocation to receive one or more blocks.
634 Search the free list in a circle starting at the last place visited.
635 If we loop completely around without finding a large enough
636 space we will have to get more memory from the system. */
637 blocks = BLOCKIFY (size);
638 start = block = _heapindex;
639 while (_heapinfo[block].free.size < blocks)
641 block = _heapinfo[block].free.next;
644 /* Need to get more from the system. Check to see if
645 the new core will be contiguous with the final free
646 block; if so we don't need to get as much. */
647 block = _heapinfo[0].free.prev;
648 lastblocks = _heapinfo[block].free.size;
649 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
650 (*__morecore) (0) == ADDRESS (block + lastblocks) &&
651 (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
653 /* Which block we are extending (the `final free
654 block' referred to above) might have changed, if
655 it got combined with a freed info table. */
656 block = _heapinfo[0].free.prev;
657 _heapinfo[block].free.size += (blocks - lastblocks);
658 _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
661 result = morecore (blocks * BLOCKSIZE);
664 block = BLOCK (result);
665 _heapinfo[block].busy.type = 0;
666 _heapinfo[block].busy.info.size = blocks;
668 _bytes_used += blocks * BLOCKSIZE;
673 /* At this point we have found a suitable free list entry.
674 Figure out how to remove what we need from the list. */
675 result = ADDRESS (block);
676 if (_heapinfo[block].free.size > blocks)
678 /* The block we found has a bit left over,
679 so relink the tail end back into the free list. */
680 _heapinfo[block + blocks].free.size
681 = _heapinfo[block].free.size - blocks;
682 _heapinfo[block + blocks].free.next
683 = _heapinfo[block].free.next;
684 _heapinfo[block + blocks].free.prev
685 = _heapinfo[block].free.prev;
686 _heapinfo[_heapinfo[block].free.prev].free.next
687 = _heapinfo[_heapinfo[block].free.next].free.prev
688 = _heapindex = block + blocks;
692 /* The block exactly matches our requirements,
693 so just remove it from the list. */
694 _heapinfo[_heapinfo[block].free.next].free.prev
695 = _heapinfo[block].free.prev;
696 _heapinfo[_heapinfo[block].free.prev].free.next
697 = _heapindex = _heapinfo[block].free.next;
701 _heapinfo[block].busy.type = 0;
702 _heapinfo[block].busy.info.size = blocks;
704 _bytes_used += blocks * BLOCKSIZE;
705 _bytes_free -= blocks * BLOCKSIZE;
713 /* On some ANSI C systems, some libc functions call _malloc, _free
714 and _realloc. Make them use the GNU functions. */
716 __ptr_t _malloc (__malloc_size_t size);
718 _malloc (__malloc_size_t size)
720 return malloc (size);
723 void _free (__ptr_t ptr);
730 __ptr_t _realloc (__ptr_t ptr, __malloc_size_t size);
732 _realloc (__ptr_t ptr, __malloc_size_t size)
734 return realloc (ptr, size);
738 /* Free a block of memory allocated by `malloc'.
739 Copyright 1990, 1991, 1992, 1994 Free Software Foundation
740 Written May 1989 by Mike Haertel.
742 This library is free software; you can redistribute it and/or
743 modify it under the terms of the GNU Library General Public License as
744 published by the Free Software Foundation; either version 2 of the
745 License, or (at your option) any later version.
747 This library is distributed in the hope that it will be useful,
748 but WITHOUT ANY WARRANTY; without even the implied warranty of
749 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
750 Library General Public License for more details.
752 You should have received a copy of the GNU General Public License
753 along with this library; see the file COPYING. If not, write to
754 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
755 Boston, MA 02111-1307, USA.
757 The author may be reached (Email) at the address mike@ai.mit.edu,
758 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
760 #ifndef _MALLOC_INTERNAL
761 #define _MALLOC_INTERNAL
765 /* Debugging hook for free. */
766 void (*__free_hook) __P ((__ptr_t __ptr));
768 /* List of blocks allocated by memalign. */
769 struct alignlist *_aligned_blocks = NULL;
771 /* Return memory to the heap.
772 Like `free' but don't call a __free_hook if there is one. */
774 _free_internal (__ptr_t ptr)
777 __malloc_size_t block, blocks;
779 struct list *prev, *next;
783 type = _heapinfo[block].busy.type;
787 /* Get as many statistics as early as we can. */
789 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
790 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
792 /* Find the free cluster previous to this one in the free list.
793 Start searching at the last block referenced; this may benefit
794 programs with locality of allocation. */
798 i = _heapinfo[i].free.prev;
802 i = _heapinfo[i].free.next;
803 while (i > 0 && i < block);
804 i = _heapinfo[i].free.prev;
807 /* Determine how to link this block into the free list. */
808 if (block == i + _heapinfo[i].free.size)
810 /* Coalesce this block with its predecessor. */
811 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
816 /* Really link this block back into the free list. */
817 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
818 _heapinfo[block].free.next = _heapinfo[i].free.next;
819 _heapinfo[block].free.prev = i;
820 _heapinfo[i].free.next = block;
821 _heapinfo[_heapinfo[block].free.next].free.prev = block;
825 /* Now that the block is linked in, see if we can coalesce it
826 with its successor (by deleting its successor from the list
827 and adding in its size). */
828 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
830 _heapinfo[block].free.size
831 += _heapinfo[_heapinfo[block].free.next].free.size;
832 _heapinfo[block].free.next
833 = _heapinfo[_heapinfo[block].free.next].free.next;
834 _heapinfo[_heapinfo[block].free.next].free.prev = block;
838 /* Now see if we can return stuff to the system. */
839 blocks = _heapinfo[block].free.size;
840 if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
841 && (*__morecore) (0) == ADDRESS (block + blocks))
843 __malloc_size_t bytes = blocks * BLOCKSIZE;
844 _heaplimit -= blocks;
845 (*__morecore) (-(int)bytes);
846 _heapinfo[_heapinfo[block].free.prev].free.next
847 = _heapinfo[block].free.next;
848 _heapinfo[_heapinfo[block].free.next].free.prev
849 = _heapinfo[block].free.prev;
850 block = _heapinfo[block].free.prev;
852 _bytes_free -= bytes;
855 /* Set the next search to begin at this block. */
860 /* Do some of the statistics. */
862 _bytes_used -= 1 << type;
864 _bytes_free += 1 << type;
866 /* Get the address of the first free fragment in this block. */
867 prev = (struct list *) ((char *) ADDRESS (block) +
868 (_heapinfo[block].busy.info.frag.first << type));
870 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
872 /* If all fragments of this block are free, remove them
873 from the fragment list and free the whole block. */
875 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
877 prev->prev->next = next;
879 next->prev = prev->prev;
880 _heapinfo[block].busy.type = 0;
881 _heapinfo[block].busy.info.size = 1;
883 /* Keep the statistics accurate. */
885 _bytes_used += BLOCKSIZE;
886 _chunks_free -= BLOCKSIZE >> type;
887 _bytes_free -= BLOCKSIZE;
889 free (ADDRESS (block));
891 else if (_heapinfo[block].busy.info.frag.nfree != 0)
893 /* If some fragments of this block are free, link this
894 fragment into the fragment list after the first free
895 fragment of this block. */
896 next = (struct list *) ptr;
897 next->next = prev->next;
900 if (next->next != NULL)
901 next->next->prev = next;
902 ++_heapinfo[block].busy.info.frag.nfree;
906 /* No fragments of this block are free, so link this
907 fragment into the fragment list and announce that
908 it is the first free fragment of this block. */
909 prev = (struct list *) ptr;
910 _heapinfo[block].busy.info.frag.nfree = 1;
911 _heapinfo[block].busy.info.frag.first = (unsigned long int)
912 ((unsigned long int) ((char *) ptr - (char *) NULL)
913 % BLOCKSIZE >> type);
914 prev->next = _fraghead[type].next;
915 prev->prev = &_fraghead[type];
916 prev->prev->next = prev;
917 if (prev->next != NULL)
918 prev->next->prev = prev;
924 /* Return memory to the heap. */
938 for (l = _aligned_blocks; l != NULL; l = l->next)
939 if (l->aligned == ptr)
941 l->aligned = NULL; /* Mark the slot in the list as free. */
946 if (__free_hook != NULL)
947 (*__free_hook) (ptr);
949 _free_internal (ptr);
951 /* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
952 This file is part of the GNU C Library.
954 The GNU C Library is free software; you can redistribute it and/or
955 modify it under the terms of the GNU Library General Public License as
956 published by the Free Software Foundation; either version 2 of the
957 License, or (at your option) any later version.
959 The GNU C Library is distributed in the hope that it will be useful,
960 but WITHOUT ANY WARRANTY; without even the implied warranty of
961 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
962 Library General Public License for more details.
964 You should have received a copy of the GNU General Public License
965 along with this library; see the file COPYING. If not, write to
966 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
967 Boston, MA 02111-1307, USA. */
969 #ifndef _MALLOC_INTERNAL
970 #define _MALLOC_INTERNAL
976 #include <ansidecl.h>
977 #include <gnu-stabs.h>
981 function_alias(cfree, free, void, (ptr),
982 DEFUN(cfree, (ptr), PTR ptr))
986 void cfree (__ptr_t ptr);
994 /* Change the size of a block allocated by `malloc'.
995 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
996 Written May 1989 by Mike Haertel.
998 This library is free software; you can redistribute it and/or
999 modify it under the terms of the GNU Library General Public License as
1000 published by the Free Software Foundation; either version 2 of the
1001 License, or (at your option) any later version.
1003 This library is distributed in the hope that it will be useful,
1004 but WITHOUT ANY WARRANTY; without even the implied warranty of
1005 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1006 Library General Public License for more details.
1008 You should have received a copy of the GNU General Public License
1009 along with this library; see the file COPYING. If not, write to
1010 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1011 Boston, MA 02111-1307, USA.
1013 The author may be reached (Email) at the address mike@ai.mit.edu,
1014 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
1016 #ifndef _MALLOC_INTERNAL
1017 #define _MALLOC_INTERNAL
1022 #define min(A, B) ((A) < (B) ? (A) : (B))
1025 /* Debugging hook for realloc. */
1026 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
1028 /* Resize the given region to the new size, returning a pointer
1029 to the (possibly moved) region. This is optimized for speed;
1030 some benchmarks seem to indicate that greater compactness is
1031 achieved by unconditionally allocating and copying to a
1032 new region. This module has incestuous knowledge of the
1033 internals of both free and malloc. */
1035 realloc (__ptr_t ptr, __malloc_size_t size)
1039 __malloc_size_t block, blocks, oldlimit;
1043 result = malloc (size);
1044 memcpy(result, ptr, size);
1053 else if (ptr == NULL)
1054 return malloc (size);
1056 if (__realloc_hook != NULL)
1057 return (*__realloc_hook) (ptr, size);
1059 block = BLOCK (ptr);
1061 type = _heapinfo[block].busy.type;
1065 /* Maybe reallocate a large block to a small fragment. */
1066 if (size <= BLOCKSIZE / 2)
1068 result = malloc (size);
1071 memcpy (result, ptr, size);
1072 _free_internal (ptr);
1077 /* The new size is a large allocation as well;
1078 see if we can hold it in place. */
1079 blocks = BLOCKIFY (size);
1080 if (blocks < _heapinfo[block].busy.info.size)
1082 /* The new size is smaller; return
1083 excess memory to the free list. */
1084 _heapinfo[block + blocks].busy.type = 0;
1085 _heapinfo[block + blocks].busy.info.size
1086 = _heapinfo[block].busy.info.size - blocks;
1087 _heapinfo[block].busy.info.size = blocks;
1088 /* We have just created a new chunk by splitting a chunk in two.
1089 Now we will free this chunk; increment the statistics counter
1090 so it doesn't become wrong when _free_internal decrements it. */
1092 _free_internal (ADDRESS (block + blocks));
1095 else if (blocks == _heapinfo[block].busy.info.size)
1096 /* No size change necessary. */
1100 /* Won't fit, so allocate a new region that will.
1101 Free the old region first in case there is sufficient
1102 adjacent free space to grow without moving. */
1103 blocks = _heapinfo[block].busy.info.size;
1104 /* Prevent free from actually returning memory to the system. */
1105 oldlimit = _heaplimit;
1108 _heaplimit = oldlimit;
1109 result = malloc (size);
1112 /* Now we're really in trouble. We have to unfree
1113 the thing we just freed. Unfortunately it might
1114 have been coalesced with its neighbors. */
1115 if (_heapindex == block)
1116 (void) malloc (blocks * BLOCKSIZE);
1119 __ptr_t previous = malloc ((block - _heapindex) * BLOCKSIZE);
1120 (void) malloc (blocks * BLOCKSIZE);
1126 memmove (result, ptr, blocks * BLOCKSIZE);
1131 /* Old size is a fragment; type is logarithm
1132 to base two of the fragment size. */
1133 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1134 size <= (__malloc_size_t) (1 << type))
1135 /* The new size is the same kind of fragment. */
1139 /* The new size is different; allocate a new space,
1140 and copy the lesser of the new size and the old. */
1141 result = malloc (size);
1144 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1152 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1154 This library is free software; you can redistribute it and/or
1155 modify it under the terms of the GNU Library General Public License as
1156 published by the Free Software Foundation; either version 2 of the
1157 License, or (at your option) any later version.
1159 This library is distributed in the hope that it will be useful,
1160 but WITHOUT ANY WARRANTY; without even the implied warranty of
1161 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1162 Library General Public License for more details.
1164 You should have received a copy of the GNU General Public License
1165 along with this library; see the file COPYING. If not, write to
1166 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1167 Boston, MA 02111-1307, USA.
1169 The author may be reached (Email) at the address mike@ai.mit.edu,
1170 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
1172 #ifndef _MALLOC_INTERNAL
1173 #define _MALLOC_INTERNAL
1177 /* Allocate an array of NMEMB elements each SIZE bytes long.
1178 The entire array is initialized to zeros. */
1180 calloc (__malloc_size_t nmemb, __malloc_size_t size)
1182 __ptr_t result = malloc (nmemb * size);
1185 (void) memset (result, 0, nmemb * size);
1189 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1190 This file is part of the GNU C Library.
1192 The GNU C Library is free software; you can redistribute it and/or modify
1193 it under the terms of the GNU General Public License as published by
1194 the Free Software Foundation; either version 2, or (at your option)
1197 The GNU C Library is distributed in the hope that it will be useful,
1198 but WITHOUT ANY WARRANTY; without even the implied warranty of
1199 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1200 GNU General Public License for more details.
1202 You should have received a copy of the GNU General Public License
1203 along with the GNU C Library; see the file COPYING. If not, write to
1204 the Free the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1205 Boston, MA 02111-1307, USA. */
1207 #ifndef _MALLOC_INTERNAL
1208 #define _MALLOC_INTERNAL
1212 /* #ifndef __GNU_LIBRARY__ */
1216 #ifdef GMALLOC_NEEDS_SBRK_DECL
1217 /* some versions of OSF1 need this */
1218 extern __ptr_t __sbrk __P ((ssize_t increment));
1220 #ifdef __GNU_LIBRARY__
1221 /* It is best not to declare this and cast its result on foreign operating
1222 systems with potentially hostile include files. */
1223 #if !(defined(linux) && defined(sparc))
1224 extern __ptr_t __sbrk __P ((int increment));
1233 /* Allocate INCREMENT more bytes of data space,
1234 and return the start of data space, or NULL on errors.
1235 If INCREMENT is negative, shrink data space. */
1237 __default_morecore (
1250 __ptr_t result = (__ptr_t) __sbrk ((ssize_t) increment);
1252 __ptr_t result = (__ptr_t) __sbrk ((int) increment);
1254 if (result == (__ptr_t) -1)
1258 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1260 This library is free software; you can redistribute it and/or
1261 modify it under the terms of the GNU Library General Public License as
1262 published by the Free Software Foundation; either version 2 of the
1263 License, or (at your option) any later version.
1265 This library is distributed in the hope that it will be useful,
1266 but WITHOUT ANY WARRANTY; without even the implied warranty of
1267 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1268 Library General Public License for more details.
1270 You should have received a copy of the GNU General Public License
1271 along with this library; see the file COPYING. If not, write to
1272 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1273 Boston, MA 02111-1307, USA. */
1275 #ifndef _MALLOC_INTERNAL
1276 #define _MALLOC_INTERNAL
1281 memalign (__malloc_size_t alignment, __malloc_size_t size)
1284 unsigned long int adj;
1286 size = ((size + alignment - 1) / alignment) * alignment;
1288 result = malloc (size);
1291 adj = (unsigned long int) ((unsigned long int) ((char *) result -
1292 (char *) NULL)) % alignment;
1295 struct alignlist *l;
1296 for (l = _aligned_blocks; l != NULL; l = l->next)
1297 if (l->aligned == NULL)
1298 /* This slot is free. Use it. */
1302 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1308 l->next = _aligned_blocks;
1309 _aligned_blocks = l;
1312 result = l->aligned = (char *) result + alignment - adj;