1 /* Block-relocating memory allocator.
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4 This file is part of XEmacs.
6 XEmacs is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA.
21 Synched Up with: FSF 20.2 (non-mmap portion only)
26 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
27 rather than all of them. This means allowing for a possible
28 hole between the first bloc and the end of malloc storage. */
35 #include <unistd.h> /* for getpagesize() */
42 /* The important properties of this type are that 1) it's a pointer, and
43 2) arithmetic on it should work as if the size of the object pointed
44 to has a size of 1. */
45 #if 0 /* Arithmetic on void* is a GCC extension. */
47 typedef void *POINTER;
49 typedef unsigned char *POINTER;
53 /* Unconditionally use unsigned char * for this. */
54 typedef unsigned char *POINTER;
56 #ifdef DOUG_LEA_MALLOC
61 #include "getpagesize.h"
64 void refill_memory_reserve (void);
66 #else /* Not emacs. */
70 typedef void *POINTER;
78 void init_ralloc (void);
80 #define NIL ((POINTER) 0)
83 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
85 /* A flag to indicate whether we have initialized ralloc yet. For
86 Emacs's sake, please do not make this local to malloc_init; on some
87 machines, the dumping procedure makes all static variables
88 read-only. On these machines, the word static is #defined to be
89 the empty string, meaning that r_alloc_initialized becomes an
90 automatic variable, and loses its value each time Emacs is started up. */
91 static int r_alloc_initialized = 0;
94 /* Declarations for working with the malloc, ralloc, and system breaks. */
96 /* Function to set the real break value. */
97 static POINTER (*real_morecore) (ptrdiff_t size);
99 /* The break value, as seen by malloc (). */
100 static POINTER virtual_break_value;
102 /* The break value, viewed by the relocatable blocs. */
103 static POINTER break_value;
105 /* This is the size of a page. We round memory requests to this boundary. */
106 static size_t page_size;
108 /* Whenever we get memory from the system, get this many extra bytes. This
109 must be a multiple of page_size. */
110 static int extra_bytes;
112 /* Macros for rounding. Note that rounding to any value is possible
113 by changing the definition of PAGE. */
114 #define PAGE (getpagesize ())
115 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
116 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
118 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
120 #define MEM_ALIGN sizeof(double)
121 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
124 /* Data structures of heaps and blocs. */
126 /* The relocatable objects, or blocs, and the malloc data
127 both reside within one or more heaps.
128 Each heap contains malloc data, running from `start' to `bloc_start',
129 and relocatable objects, running from `bloc_start' to `free'.
131 Relocatable objects may relocate within the same heap
132 or may move into another heap; the heaps themselves may grow
135 We try to make just one heap and make it larger as necessary.
136 But sometimes we can't do that, because we can't get contiguous
137 space to add onto the heap. When that happens, we start a new heap. */
143 /* Start of memory range of this heap. */
145 /* End of memory range of this heap. */
147 /* Start of relocatable data in this heap. */
149 /* Start of unused space in this heap. */
151 /* First bloc in this heap. */
152 struct bp *first_bloc;
153 /* Last bloc in this heap. */
154 struct bp *last_bloc;
157 #define NIL_HEAP ((heap_ptr) 0)
158 #define HEAP_PTR_SIZE (sizeof (struct heap))
160 /* This is the first heap object.
161 If we need additional heap objects, each one resides at the beginning of
162 the space it covers. */
163 static struct heap heap_base;
165 /* Head and tail of the list of heaps. */
166 static heap_ptr first_heap, last_heap;
168 /* These structures are allocated in the malloc arena.
169 The linked list is kept in order of increasing '.data' members.
170 The data blocks abut each other; if b->next is non-nil, then
171 b->data + b->size == b->next->data.
173 An element with variable==NIL denotes a freed block, which has not yet
174 been collected. They may only appear while r_alloc_freeze > 0, and will be
175 freed when the arena is thawed. Currently, these blocs are not reusable,
176 while the arena is frozen. Very inefficient. */
185 POINTER new_data; /* temporarily used for relocation */
186 struct heap *heap; /* Heap this bloc is in. */
189 #define NIL_BLOC ((bloc_ptr) 0)
190 #define BLOC_PTR_SIZE (sizeof (struct bp))
192 /* Head and tail of the list of relocatable blocs. */
193 static bloc_ptr first_bloc, last_bloc;
195 static int use_relocatable_buffers;
197 /* If >0, no relocation whatsoever takes place. */
198 static int r_alloc_freeze_level;
200 /* Obtain SIZE bytes of space. If enough space is not presently available
201 in our process reserve, (i.e., (page_break_value - break_value)),
202 this means getting more page-aligned space from the system.
204 Return non-zero if all went well, or zero if we couldn't allocate
207 /* Functions to get and return memory from the system. */
209 /* Find the heap that ADDRESS falls within. */
212 find_heap (POINTER address)
216 for (heap = last_heap; heap; heap = heap->prev)
218 if (heap->start <= address && address <= heap->end)
225 /* Find SIZE bytes of space in a heap.
226 Try to get them at ADDRESS (which must fall within some heap's range)
227 if we can get that many within one heap.
229 If enough space is not presently available in our reserve, this means
230 getting more page-aligned space from the system. If the returned space
231 is not contiguous to the last heap, allocate a new heap, and append it
233 obtain does not try to keep track of whether space is in use
234 or not in use. It just returns the address of SIZE bytes that
235 fall within a single heap. If you call obtain twice in a row
236 with the same arguments, you typically get the same value.
237 to the heap list. It's the caller's responsibility to keep
238 track of what space is in use.
240 Return the address of the space if all went well, or zero if we couldn't
241 allocate the memory. */
244 obtain (POINTER address, size_t size)
247 size_t already_available;
249 /* Find the heap that ADDRESS falls within. */
250 for (heap = last_heap; heap; heap = heap->prev)
252 if (heap->start <= address && address <= heap->end)
259 /* If we can't fit SIZE bytes in that heap,
260 try successive later heaps. */
261 while (heap && address + size > heap->end)
264 if (heap == NIL_HEAP)
266 address = heap->bloc_start;
269 /* If we can't fit them within any existing heap,
271 if (heap == NIL_HEAP)
273 POINTER new = (*real_morecore)(0);
276 already_available = (char *)last_heap->end - (char *)address;
278 if (new != last_heap->end)
280 /* Someone else called sbrk. Make a new heap. */
282 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
283 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
285 if ((*real_morecore) (bloc_start - new) != new)
288 new_heap->start = new;
289 new_heap->end = bloc_start;
290 new_heap->bloc_start = bloc_start;
291 new_heap->free = bloc_start;
292 new_heap->next = NIL_HEAP;
293 new_heap->prev = last_heap;
294 new_heap->first_bloc = NIL_BLOC;
295 new_heap->last_bloc = NIL_BLOC;
296 last_heap->next = new_heap;
297 last_heap = new_heap;
299 address = bloc_start;
300 already_available = 0;
303 /* Add space to the last heap (which we may have just created).
304 Get some extra, so we can come here less often. */
306 get = size + extra_bytes - already_available;
307 get = (char *) ROUNDUP ((char *)last_heap->end + get)
308 - (char *) last_heap->end;
310 if ((*real_morecore) (get) != last_heap->end)
313 last_heap->end += get;
320 /* Obtain SIZE bytes of space and return a pointer to the new area.
321 If we could not allocate the space, return zero. */
324 get_more_space (size_t size)
326 POINTER ptr = break_value;
334 /* Note that SIZE bytes of space have been relinquished by the process.
335 If SIZE is more than a page, return the space to the system. */
343 /* Add the amount of space beyond break_value
344 in all heaps which have extend beyond break_value at all. */
346 for (h = last_heap; h && break_value < h->end; h = h->prev)
348 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
349 ? h->bloc_start : break_value);
352 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
354 /* Keep extra_bytes worth of empty space.
355 And don't free anything unless we can free at least extra_bytes. */
356 excess -= extra_bytes;
358 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
360 /* This heap should have no blocs in it. */
361 if (last_heap->first_bloc != NIL_BLOC
362 || last_heap->last_bloc != NIL_BLOC)
365 /* Return the last heap, with its header, to the system. */
366 excess = (char *)last_heap->end - (char *)last_heap->start;
367 last_heap = last_heap->prev;
368 last_heap->next = NIL_HEAP;
372 excess = (char *) last_heap->end
373 - (char *) ROUNDUP ((char *)last_heap->end - excess);
374 last_heap->end -= excess;
377 if ((*real_morecore) (- excess) == 0)
382 /* Return the total size in use by relocating allocator,
383 above where malloc gets space. */
385 long r_alloc_size_in_use (void);
387 r_alloc_size_in_use (void)
389 return break_value - virtual_break_value;
392 /* The meat - allocating, freeing, and relocating blocs. */
395 /* Find the bloc referenced by the address in PTR. Returns a pointer
399 find_bloc (POINTER *ptr)
401 register bloc_ptr p = first_bloc;
403 while (p != NIL_BLOC)
405 if (p->variable == ptr && p->data == *ptr)
414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
415 Returns a pointer to the new bloc, or zero if we couldn't allocate
416 memory for the new block. */
419 get_bloc (size_t size)
421 register bloc_ptr new_bloc;
422 register heap_ptr heap;
424 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
425 || ! (new_bloc->data = obtain (break_value, size)))
433 break_value = new_bloc->data + size;
435 new_bloc->size = size;
436 new_bloc->next = NIL_BLOC;
437 new_bloc->variable = (POINTER *) NIL;
438 new_bloc->new_data = 0;
440 /* Record in the heap that this space is in use. */
441 heap = find_heap (new_bloc->data);
442 heap->free = break_value;
444 /* Maintain the correspondence between heaps and blocs. */
445 new_bloc->heap = heap;
446 heap->last_bloc = new_bloc;
447 if (heap->first_bloc == NIL_BLOC)
448 heap->first_bloc = new_bloc;
450 /* Put this bloc on the doubly-linked list of blocs. */
453 new_bloc->prev = last_bloc;
454 last_bloc->next = new_bloc;
455 last_bloc = new_bloc;
459 first_bloc = last_bloc = new_bloc;
460 new_bloc->prev = NIL_BLOC;
466 /* Calculate new locations of blocs in the list beginning with BLOC,
467 relocating it to start at ADDRESS, in heap HEAP. If enough space is
468 not presently available in our reserve, call obtain for
471 Store the new location of each bloc in its new_data field.
472 Do not touch the contents of blocs or break_value. */
475 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
477 register bloc_ptr b = bloc;
479 /* No need to ever call this if arena is frozen, bug somewhere! */
480 if (r_alloc_freeze_level)
485 /* If bloc B won't fit within HEAP,
486 move to the next heap and try again. */
487 while (heap && address + b->size > heap->end)
490 if (heap == NIL_HEAP)
492 address = heap->bloc_start;
495 /* If BLOC won't fit in any heap,
496 get enough new space to hold BLOC and all following blocs. */
497 if (heap == NIL_HEAP)
499 register bloc_ptr tb = b;
500 register size_t s = 0;
502 /* Add up the size of all the following blocs. */
503 while (tb != NIL_BLOC)
511 /* Get that space. */
512 address = obtain (address, s);
519 /* Record the new address of this bloc
520 and update where the next bloc can start. */
521 b->new_data = address;
531 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
532 This is necessary if we put the memory of space of BLOC
533 before that of BEFORE. */
536 reorder_bloc (bloc_ptr bloc, bloc_ptr before)
540 /* Splice BLOC out from where it is. */
549 /* Splice it in before BEFORE. */
561 /* Update the records of which heaps contain which blocs, starting
562 with heap HEAP and bloc BLOC. */
565 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
569 /* Initialize HEAP's status to reflect blocs before BLOC. */
570 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
572 /* The previous bloc is in HEAP. */
573 heap->last_bloc = bloc->prev;
574 heap->free = bloc->prev->data + bloc->prev->size;
578 /* HEAP contains no blocs before BLOC. */
579 heap->first_bloc = NIL_BLOC;
580 heap->last_bloc = NIL_BLOC;
581 heap->free = heap->bloc_start;
584 /* Advance through blocs one by one. */
585 for (b = bloc; b != NIL_BLOC; b = b->next)
587 /* Advance through heaps, marking them empty,
588 till we get to the one that B is in. */
591 if (heap->bloc_start <= b->data && b->data <= heap->end)
594 /* We know HEAP is not null now,
595 because there has to be space for bloc B. */
596 heap->first_bloc = NIL_BLOC;
597 heap->last_bloc = NIL_BLOC;
598 heap->free = heap->bloc_start;
601 /* Update HEAP's status for bloc B. */
602 heap->free = b->data + b->size;
604 if (heap->first_bloc == NIL_BLOC)
605 heap->first_bloc = b;
607 /* Record that B is in HEAP. */
611 /* If there are any remaining heaps and no blocs left,
612 mark those heaps as empty. */
616 heap->first_bloc = NIL_BLOC;
617 heap->last_bloc = NIL_BLOC;
618 heap->free = heap->bloc_start;
623 /* Resize BLOC to SIZE bytes. This relocates the blocs
624 that come after BLOC in memory. */
627 resize_bloc (bloc_ptr bloc, size_t size)
634 /* No need to ever call this if arena is frozen, bug somewhere! */
635 if (r_alloc_freeze_level)
638 if (bloc == NIL_BLOC || size == bloc->size)
641 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
643 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
647 if (heap == NIL_HEAP)
650 old_size = bloc->size;
653 /* Note that bloc could be moved into the previous heap. */
654 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
655 : first_heap->bloc_start);
658 if (heap->bloc_start <= address && address <= heap->end)
663 if (! relocate_blocs (bloc, heap, address))
665 bloc->size = old_size;
671 for (b = last_bloc; b != bloc; b = b->prev)
676 b->data = b->new_data;
680 memmove (b->new_data, b->data, b->size);
681 *b->variable = b->data = b->new_data;
687 bloc->data = bloc->new_data;
691 memmove (bloc->new_data, bloc->data, old_size);
692 memset (bloc->new_data + old_size, 0, size - old_size);
693 *bloc->variable = bloc->data = bloc->new_data;
698 for (b = bloc; b != NIL_BLOC; b = b->next)
703 b->data = b->new_data;
707 memmove (b->new_data, b->data, b->size);
708 *b->variable = b->data = b->new_data;
713 update_heap_bloc_correspondence (bloc, heap);
715 break_value = (last_bloc ? last_bloc->data + last_bloc->size
716 : first_heap->bloc_start);
720 /* Free BLOC from the chain of blocs, relocating any blocs above it
721 and returning BLOC->size bytes to the free area. */
724 free_bloc (bloc_ptr bloc)
726 heap_ptr heap = bloc->heap;
728 if (r_alloc_freeze_level)
730 bloc->variable = (POINTER *) NIL;
734 resize_bloc (bloc, 0);
736 if (bloc == first_bloc && bloc == last_bloc)
738 first_bloc = last_bloc = NIL_BLOC;
740 else if (bloc == last_bloc)
742 last_bloc = bloc->prev;
743 last_bloc->next = NIL_BLOC;
745 else if (bloc == first_bloc)
747 first_bloc = bloc->next;
748 first_bloc->prev = NIL_BLOC;
752 bloc->next->prev = bloc->prev;
753 bloc->prev->next = bloc->next;
756 /* Update the records of which blocs are in HEAP. */
757 if (heap->first_bloc == bloc)
759 if (bloc->next != 0 && bloc->next->heap == heap)
760 heap->first_bloc = bloc->next;
762 heap->first_bloc = heap->last_bloc = NIL_BLOC;
764 if (heap->last_bloc == bloc)
766 if (bloc->prev != 0 && bloc->prev->heap == heap)
767 heap->last_bloc = bloc->prev;
769 heap->first_bloc = heap->last_bloc = NIL_BLOC;
776 /* Interface routines. */
778 /* Obtain SIZE bytes of storage from the free pool, or the system, as
779 necessary. If relocatable blocs are in use, this means relocating
780 them. This function gets plugged into the GNU malloc's __morecore
783 We provide hysteresis, never relocating by less than extra_bytes.
785 If we're out of memory, we should return zero, to imitate the other
786 __morecore hook values - in particular, __default_morecore in the
787 GNU malloc package. */
789 POINTER r_alloc_sbrk (ptrdiff_t size);
791 r_alloc_sbrk (ptrdiff_t size)
796 if (! r_alloc_initialized)
799 if (! use_relocatable_buffers)
800 return (*real_morecore) (size);
803 return virtual_break_value;
807 /* Allocate a page-aligned space. GNU malloc would reclaim an
808 extra space if we passed an unaligned one. But we could
809 not always find a space which is contiguous to the previous. */
810 POINTER new_bloc_start;
811 heap_ptr h = first_heap;
812 size_t get = ROUNDUP (size);
814 address = (POINTER) ROUNDUP (virtual_break_value);
816 /* Search the list upward for a heap which is large enough. */
817 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
822 address = (POINTER) ROUNDUP (h->start);
825 /* If not found, obtain more space. */
828 get += extra_bytes + page_size;
830 if (! obtain (address, get))
833 if (first_heap == last_heap)
834 address = (POINTER) ROUNDUP (virtual_break_value);
836 address = (POINTER) ROUNDUP (last_heap->start);
840 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
842 if (first_heap->bloc_start < new_bloc_start)
844 /* This is no clean solution - no idea how to do it better. */
845 if (r_alloc_freeze_level)
848 /* There is a bug here: if the above obtain call succeeded, but the
849 relocate_blocs call below does not succeed, we need to free
850 the memory that we got with obtain. */
852 /* Move all blocs upward. */
853 if (! relocate_blocs (first_bloc, h, new_bloc_start))
856 /* Note that (POINTER)(h+1) <= new_bloc_start since
857 get >= page_size, so the following does not destroy the heap
859 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
861 memmove (b->new_data, b->data, b->size);
862 *b->variable = b->data = b->new_data;
865 h->bloc_start = new_bloc_start;
867 update_heap_bloc_correspondence (first_bloc, h);
871 /* Give up managing heaps below the one the new
872 virtual_break_value points to. */
873 first_heap->prev = NIL_HEAP;
874 first_heap->next = h->next;
875 first_heap->start = h->start;
876 first_heap->end = h->end;
877 first_heap->free = h->free;
878 first_heap->first_bloc = h->first_bloc;
879 first_heap->last_bloc = h->last_bloc;
880 first_heap->bloc_start = h->bloc_start;
882 if (first_heap->next)
883 first_heap->next->prev = first_heap;
885 last_heap = first_heap;
888 memset (address, 0, size);
892 EMACS_INT excess = (char *)first_heap->bloc_start
893 - ((char *)virtual_break_value + size);
895 address = virtual_break_value;
897 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
899 excess -= extra_bytes;
900 first_heap->bloc_start
901 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
903 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
905 for (b = first_bloc; b != NIL_BLOC; b = b->next)
907 memmove (b->new_data, b->data, b->size);
908 *b->variable = b->data = b->new_data;
912 if ((char *)virtual_break_value + size < (char *)first_heap->start)
914 /* We found an additional space below the first heap */
915 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
919 virtual_break_value = (POINTER) ((char *)address + size);
920 break_value = (last_bloc
921 ? last_bloc->data + last_bloc->size
922 : first_heap->bloc_start);
929 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
930 the data is returned in *PTR. PTR is thus the address of some variable
931 which will use the data area.
933 The allocation of 0 bytes is valid.
934 In case r_alloc_freeze is set, a best fit of unused blocs could be done
935 before allocating a new area. Not yet done.
937 If we can't allocate the necessary memory, set *PTR to zero, and
940 POINTER r_alloc (POINTER *ptr, size_t size);
942 r_alloc (POINTER *ptr, size_t size)
946 if (! r_alloc_initialized)
949 new_bloc = get_bloc (size);
952 new_bloc->variable = ptr;
953 *ptr = new_bloc->data;
961 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
962 Store 0 in *PTR to show there's no block allocated. */
964 void r_alloc_free (POINTER *ptr);
966 r_alloc_free (POINTER *ptr)
968 register bloc_ptr dead_bloc;
970 if (! r_alloc_initialized)
973 dead_bloc = find_bloc (ptr);
974 if (dead_bloc == NIL_BLOC)
977 free_bloc (dead_bloc);
981 refill_memory_reserve ();
985 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
986 Do this by shifting all blocks above this one up in memory, unless
987 SIZE is less than or equal to the current bloc size, in which case
990 In case r_alloc_freeze is set, a new bloc is allocated, and the
991 memory copied to it. Not very efficient. We could traverse the
992 bloc_list for a best fit of free blocs first.
994 Change *PTR to reflect the new bloc, and return this value.
996 If more memory cannot be allocated, then leave *PTR unchanged, and
999 POINTER r_re_alloc (POINTER *ptr, size_t size);
1001 r_re_alloc (POINTER *ptr, size_t size)
1003 register bloc_ptr bloc;
1005 if (! r_alloc_initialized)
1009 return r_alloc (ptr, size);
1013 return r_alloc (ptr, 0);
1016 bloc = find_bloc (ptr);
1017 if (bloc == NIL_BLOC)
1020 if (size < bloc->size)
1022 /* Wouldn't it be useful to actually resize the bloc here? */
1023 /* I think so too, but not if it's too expensive... */
1024 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1025 && r_alloc_freeze_level == 0)
1027 resize_bloc (bloc, MEM_ROUNDUP (size));
1028 /* Never mind if this fails, just do nothing... */
1029 /* It *should* be infallible! */
1032 else if (size > bloc->size)
1034 if (r_alloc_freeze_level)
1037 new_bloc = get_bloc (MEM_ROUNDUP (size));
1040 new_bloc->variable = ptr;
1041 *ptr = new_bloc->data;
1042 bloc->variable = (POINTER *) NIL;
1049 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1056 /* Disable relocations, after making room for at least SIZE bytes
1057 of non-relocatable heap if possible. The relocatable blocs are
1058 guaranteed to hold still until thawed, even if this means that
1059 malloc must return a null pointer. */
1061 void r_alloc_freeze (long size);
1063 r_alloc_freeze (long size)
1065 if (! r_alloc_initialized)
1068 /* If already frozen, we can't make any more room, so don't try. */
1069 if (r_alloc_freeze_level > 0)
1071 /* If we can't get the amount requested, half is better than nothing. */
1072 while (size > 0 && r_alloc_sbrk (size) == 0)
1074 ++r_alloc_freeze_level;
1076 r_alloc_sbrk (-size);
1079 void r_alloc_thaw (void);
1084 if (! r_alloc_initialized)
1087 if (--r_alloc_freeze_level < 0)
1090 /* This frees all unused blocs. It is not too inefficient, as the resize
1091 and memmove is done only once. Afterwards, all unreferenced blocs are
1092 already shrunk to zero size. */
1093 if (!r_alloc_freeze_level)
1095 bloc_ptr *b = &first_bloc;
1097 if (!(*b)->variable)
1105 /* The hook `malloc' uses for the function which gets more space
1107 #ifndef DOUG_LEA_MALLOC
1108 extern POINTER (*__morecore) (ptrdiff_t size);
1111 /* Initialize various things for memory allocation. */
1116 if (r_alloc_initialized)
1119 r_alloc_initialized = 1;
1120 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
1123 (__typeof__ (__morecore))
1127 first_heap = last_heap = &heap_base;
1128 first_heap->next = first_heap->prev = NIL_HEAP;
1129 first_heap->start = first_heap->bloc_start
1130 = virtual_break_value = break_value = (*real_morecore) (0);
1131 if (break_value == NIL)
1135 extra_bytes = ROUNDUP (50000);
1137 #ifdef DOUG_LEA_MALLOC
1138 mallopt (M_TOP_PAD, 64 * 4096);
1140 #if 0 /* Hasn't been synched yet */
1141 /* Give GNU malloc's morecore some hysteresis
1142 so that we move all the relocatable blocks much less often. */
1143 __malloc_extra_blocks = 64;
1147 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1149 /* The extra call to real_morecore guarantees that the end of the
1150 address space is a multiple of page_size, even if page_size is
1151 not really the page size of the system running the binary in
1152 which page_size is stored. This allows a binary to be built on a
1153 system with one page size and run on a system with a smaller page
1155 (*real_morecore) (first_heap->end - first_heap->start);
1157 /* Clear the rest of the last page; this memory is in our address space
1158 even though it is after the sbrk value. */
1159 /* Doubly true, with the additional call that explicitly adds the
1160 rest of that page to the address space. */
1161 memset (first_heap->start, 0, first_heap->end - first_heap->start);
1162 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1163 use_relocatable_buffers = 1;
1166 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1168 /* Reinitialize the morecore hook variables after restarting a dumped
1169 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1170 void r_alloc_reinit (void);
1172 r_alloc_reinit (void)
1174 /* Only do this if the hook has been reset, so that we don't get an
1175 infinite loop, in case Emacs was linked statically. */
1176 if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk)
1178 real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
1181 (__typeof__ (__morecore))
1190 r_alloc_check (void)
1196 if (!r_alloc_initialized)
1199 assert (first_heap);
1200 assert (last_heap->end <= (POINTER) sbrk (0));
1201 assert ((POINTER) first_heap < first_heap->start);
1202 assert (first_heap->start <= virtual_break_value);
1203 assert (virtual_break_value <= first_heap->end);
1205 for (h = first_heap; h; h = h->next)
1207 assert (h->prev == ph);
1208 assert ((POINTER) ROUNDUP (h->end) == h->end);
1209 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1210 the heap start has any sort of alignment.
1211 Perhaps it should. */
1212 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1214 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1215 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1219 assert (ph->end < h->start);
1220 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1223 if (h->bloc_start <= break_value && break_value <= h->end)
1230 assert (last_heap == ph);
1232 for (b = first_bloc; b; b = b->next)
1234 assert (b->prev == pb);
1235 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1236 assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
1239 for (h = first_heap; h; h = h->next)
1241 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1248 if (pb && pb->data + pb->size != b->data)
1250 assert (ph && b->data == h->bloc_start);
1253 if (ph->bloc_start <= pb->data
1254 && pb->data + pb->size <= ph->end)
1256 assert (pb->data + pb->size + b->size > ph->end);
1261 assert (ph->bloc_start + b->size > ph->end);
1269 assert (last_bloc == pb);
1272 assert (last_bloc->data + last_bloc->size == break_value);
1274 assert (first_heap->bloc_start == break_value);
1281 #else /* HAVE_MMAP */
1284 A relocating allocator built using the mmap(2) facility available
1285 in some OSes. Based on another version written by Paul Flinders,
1286 from which code (and comments) are snarfed.
1288 The OS should support mmap() with MAP_ANONYMOUS attribute, or have
1289 /dev/zero. It should support private memory mapping.
1291 Paul Flinders wrote a version which works well for systems that
1292 allow callers to specify (virtual) addresses to mmap().
1293 Unfortunately, such a scheme doesn't work for certain systems like
1294 HP-UX that have a system-wide virtual->real address map, and
1295 consequently impose restrictions on the virtual address values
1298 NB: The mapping scheme in HP-UX is motivated by the inverted page
1299 table design in some HP processors.
1301 This alternate implementation allows for the addresses to be
1302 optionally chosen by the system. Fortunately, buffer allocation
1303 doesn't insist upon contiguous memory which Flinders' scheme
1304 provides, and this one doesn't.
1306 We don't really provide for hysteresis here, but add some metering
1307 to monitor how poorly the allocator actually works. See the
1308 documentation for `mmap-hysteresis'.
1310 This implementation actually cycles through the blocks allocated
1311 via mmap() and only sends it to free() if it wasn't one of them.
1312 Unfortunately, this is O(n) in the number of mmapped blocks. (Not
1313 really, as we have a hash table which tries to reduce the cost.)
1314 Also, this dereferences the pointer passed, so it would cause a
1315 segfault if garbage was passed to it. */
1318 #include <sys/mman.h>
1321 typedef void *VM_ADDR; /* VM addresses */
1322 static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
1324 /* Configuration for relocating allocator. */
1326 /* #define MMAP_GENERATE_ADDRESSES */
1327 /* Define this if you want Emacs to manage the address table.
1328 It is not recommended unless you have major problems with the
1329 default scheme, which allows the OS to pick addresses. */
1331 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be
1332 discarded while computing the hash, as they're always zero. The
1333 default is appropriate for a page size of 4096 bytes. */
1335 #define USELESS_LOWER_ADDRESS_BITS 12
1338 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */
1340 #define MHASH_PRIME 89
1343 /* Whether we want to enable metering of some ralloc performance.
1344 This incurs a constant penalty for each mmap operation. */
1346 #define MMAP_METERING
1349 /* Rename the following to protect against a some smartness elsewhere.
1350 We need access to the allocator used for non-mmap allocation
1351 elsewhere, in case we get passed a handle that we didn't allocate
1352 ourselves. Currently, this default allocator is also used to
1353 maintain local structures for relocatable blocks. */
1355 #define UNDERLYING_MALLOC malloc
1356 #define UNDERLYING_FREE free
1357 #define UNDERLYING_REALLOC realloc
1359 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES
1360 is defined, and MAP_VARIABLE otherwise. Some losing systems don't
1361 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */
1363 #ifdef MMAP_GENERATE_ADDRESSES
1365 # define MAP_ADDRCHOICE_FLAG MAP_FIXED
1367 #else /* !MMAP_GENERATE_ADDRESSES */
1368 # ifdef MAP_VARIABLE
1369 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE
1371 #endif /* MMAP_GENERATE_ADDRESSES */
1374 #ifndef MAP_ADDRCHOICE_FLAG
1375 # define MAP_ADDRCHOICE_FLAG 0
1376 #endif /* MAP_ADDRCHOICE_FLAG */
1378 #ifdef MAP_ANONYMOUS
1379 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS)
1381 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG)
1382 #endif /* MAP_ANONYMOUS */
1385 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For
1386 Emacs's sake, please do not make this local to malloc_init; on some
1387 machines, the dumping procedure makes all static variables
1388 read-only. On these machines, the word static is #defined to be
1389 the empty string, meaning that r_alloc_initialized becomes an
1390 automatic variable, and loses its value each time Emacs is started up.
1392 If we're using mmap this flag has three possible values
1394 1 - Normal value when running temacs. In this case buffers
1395 are allocated using malloc so that any data that they
1396 contain becomes part of the undumped executable.
1397 2 - Normal value when running emacs */
1398 static int r_alloc_initialized = 0;
1400 /* (ptf): Macros for rounding. Note that rounding to any value is possible
1401 by changing the definition of PAGE. */
1402 #define PAGE (getpagesize ())
1403 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size)
1404 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size)
1407 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS
1408 points to a file descriptor opened on /dev/zero */
1410 static int DEV_ZERO_FD = -1;
1413 /* We actually need a data structure that can be usefully structured
1414 based on the VM address, and allows an ~O(1) lookup on an arbitrary
1415 address, i.e. a hash table. Maybe the XEmacs hash table can be
1416 coaxed enough. At the moment, we use lookup on a hash table to
1417 decide whether to do an O(n) search on the malloced block list.
1418 Addresses are hashed to a bucket modulo MHASH_PRIME. */
1421 /* We settle for a standard doubly-linked-list. The dynarr type isn't
1422 very amenable to deletion of items in the middle, so we conjure up
1423 yet another stupid datastructure. The structure is maintained as a
1424 ring, and the singleton ring has the sole element as its left and
1425 right neighbours. */
1427 static void init_MHASH_table (void); /* Forward reference */
1429 typedef struct alloc_dll
1431 size_t size; /* #bytes currently in use */
1432 size_t space_for; /* #bytes we really have */
1433 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */
1434 VM_ADDR vm_addr; /* VM address returned by mmap */
1435 struct alloc_dll *left; /* Left link in circular doubly linked list */
1436 struct alloc_dll *right;
1439 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */
1440 static size_t page_size = 0; /* Size of VM pages */
1441 static Fixnum mmap_hysteresis; /* Logically a "size_t" */
1443 /* Get a new handle for a fresh block. */
1445 new_mmap_handle (size_t nsiz)
1447 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll));
1448 if ( h == 0) return 0;
1450 if (mmap_start == 0)
1452 init_MHASH_table ();
1453 mmap_start = h; mmap_start->left = h; mmap_start->right = h;
1456 MMAP_HANDLE prev = mmap_start->left;
1457 MMAP_HANDLE nex = mmap_start;
1459 /* Four pointers need fixing. */
1468 /* Find a handle given the aliased address using linear search. */
1470 find_mmap_handle_lsearch (POINTER *alias)
1472 MMAP_HANDLE h = mmap_start;
1473 if (h == 0) return 0;
1475 if (h->aliased_address == alias && *alias == h->vm_addr)
1478 } while( h != mmap_start );
1479 return 0; /* Bogus alias passed. */
1482 /* Free a handle. */
1484 free_mmap_handle (MMAP_HANDLE h)
1486 MMAP_HANDLE prev = h->left;
1487 MMAP_HANDLE nex = h->right;
1488 if (prev == h || nex == h) /* In fact, this should be && */
1489 { /* We're the singleton dll */
1490 UNDERLYING_FREE( h ); /* Free the sole item */
1491 mmap_start = 0; return;
1493 else if (h == mmap_start)
1495 mmap_start = nex; /* Make sure mmap_start isn't bogus. */
1499 UNDERLYING_FREE( h );
1502 /* A simple hash table to speed up the inverted lookup of
1503 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a
1504 particular bucket. We invalidate a hash table entry during block
1505 deletion if the hash has cached the deleted block's address. */
1507 /* Simple hash check. */
1509 int n_hits; /* How many addresses map to this? */
1510 MMAP_HANDLE handle; /* What is the current handle? */
1511 VM_ADDR addr; /* What is its VM address? */
1512 } MHASH_HITS[ MHASH_PRIME ];
1515 init_MHASH_table (void)
1518 for (; i < MHASH_PRIME; i++)
1520 MHASH_HITS[i].n_hits = 0;
1521 MHASH_HITS[i].addr = 0;
1522 MHASH_HITS[i].handle = 0;
1526 /* Compute the hash value for an address. */
1528 MHASH (VM_ADDR addr)
1530 #if (LONGBITS == 64)
1531 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
1533 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
1535 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve
1536 when converted to signed ints */
1537 return ((hval >= 0) ? hval : MHASH_PRIME + hval);
1540 /* Add a VM address with its corresponding handle to the table. */
1542 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h)
1544 int kVal = MHASH( addr );
1545 if (MHASH_HITS[kVal].n_hits++ == 0)
1546 { /* Only overwrite the table if there were no hits so far. */
1547 MHASH_HITS[kVal].addr = addr;
1548 MHASH_HITS[kVal].handle = h;
1552 /* Delete a VM address entry from the hash table. */
1554 MHASH_DEL (VM_ADDR addr)
1556 int kVal = MHASH( addr );
1557 MHASH_HITS[kVal].n_hits--;
1558 if (addr == MHASH_HITS[kVal].addr)
1560 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */
1561 MHASH_HITS[kVal].handle = 0;
1565 /* End of hash buckets */
1567 /* Metering malloc performance. */
1568 #ifdef MMAP_METERING
1569 /* If we're metering, we introduce some extra symbols to aid the noble
1570 cause of bloating XEmacs core size. */
1572 static Lisp_Object Qmmap_times_mapped;
1573 static Lisp_Object Qmmap_pages_mapped;
1574 static Lisp_Object Qmmap_times_unmapped;
1575 static Lisp_Object Qmmap_times_remapped;
1576 static Lisp_Object Qmmap_didnt_copy;
1577 static Lisp_Object Qmmap_pages_copied;
1578 static Lisp_Object Qmmap_average_bumpval;
1579 static Lisp_Object Qmmap_wastage;
1580 static Lisp_Object Qmmap_live_pages;
1581 static Lisp_Object Qmmap_addr_looked_up;
1582 static Lisp_Object Qmmap_hash_worked;
1583 static Lisp_Object Qmmap_addrlist_size;
1585 #define M_Map 0 /* How many times allocated? */
1586 #define M_Pages_Map 1 /* How many pages allocated? */
1587 #define M_Unmap 2 /* How many times freed? */
1588 #define M_Remap 3 /* How many times increased in size? */
1589 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */
1590 #define M_Copy_Pages 5 /* Total # pages copied */
1591 #define M_Average_Bumpval 6 /* Average bump value */
1592 #define M_Wastage 7 /* Remaining (unused space) */
1593 #define M_Live_Pages 8 /* #live pages */
1594 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */
1595 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */
1596 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */
1598 #define N_Meterables 12 /* Total number of meterables */
1599 #define MEMMETER(x) {x;}
1600 #define MVAL(x) (meter[x])
1601 #define MLVAL(x) (make_int (meter[x]))
1602 static int meter[N_Meterables];
1604 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /*
1605 Return some information about mmap-based allocator.
1607 mmap-times-mapped: number of times r_alloc was called.
1608 mmap-pages-mapped: number of pages mapped by r_alloc calls only.
1609 mmap-times-unmapped: number of times r_free was called.
1610 mmap-times-remapped: number of times r_re_alloc was called.
1611 mmap-didnt-copy: number of times re-alloc did NOT have to move the block.
1612 mmap-pages-copied: total number of pages copied.
1613 mmap-average-bumpval: average increase in size demanded to re-alloc.
1614 mmap-wastage: total number of bytes allocated, but not currently in use.
1615 mmap-live-pages: total number of pages live.
1616 mmap-addr-looked-up: total number of times needed to check if addr is in block.
1617 mmap-hash-worked: total number of times the simple hash check worked.
1618 mmap-addrlist-size: number of entries in address picking list.
1622 Lisp_Object result = Qnil;
1624 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result);
1625 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result);
1626 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result);
1627 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result);
1628 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result);
1629 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result);
1630 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result);
1631 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result);
1632 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result);
1633 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result);
1634 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result);
1635 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result);
1640 #else /* !MMAP_METERING */
1645 #endif /* MMAP_METERING */
1648 find_mmap_handle (POINTER *alias)
1650 int kval = MHASH( *alias );
1651 MEMMETER( MVAL(M_Address_Lookup)++ )
1652 switch( MHASH_HITS[kval].n_hits)
1655 MEMMETER( MVAL( M_Hash_Worked )++ )
1659 if (*alias == MHASH_HITS[kval].addr)
1661 MEMMETER( MVAL( M_Hash_Worked) ++ );
1662 return MHASH_HITS[kval].handle;
1666 return find_mmap_handle_lsearch( alias );
1671 Some kernels don't like being asked to pick addresses for mapping
1672 themselves---IRIX is known to become extremely slow if mmap is
1673 passed a ZERO as the first argument. In such cases, we use an
1674 address map which is managed local to the XEmacs process. The
1675 address map maintains an ordered linked list of (address, size,
1676 occupancy) triples ordered by the absolute address. Initially, a
1677 large address area is marked as being empty. The address picking
1678 scheme takes bites off the first block which is still empty and
1679 large enough. If mmap with the specified address fails, it is
1680 marked unavailable and not attempted thereafter. The scheme will
1681 keep fragmenting the large empty block until it finds an address
1682 which can be successfully mmapped, or until there are no free
1683 blocks of the given size left.
1685 Note that this scheme, given its first-fit strategy, is prone to
1686 fragmentation of the first part of memory earmarked for this
1687 purpose. [ACP Vol I]. We can't use the workaround of using a
1688 randomized first fit because we don't want to presume too much
1689 about the memory map. Instead, we try to coalesce empty or
1690 unavailable blocks at any available opportunity. */
1692 /* Initialization procedure for address picking scheme */
1693 static void Addr_Block_initialize(void);
1695 /* Get a suitable VM_ADDR via mmap */
1696 static VM_ADDR New_Addr_Block (size_t sz);
1698 /* Free a VM_ADDR allocated via New_Addr_Block */
1699 static void Free_Addr_Block (VM_ADDR addr, size_t sz);
1701 #ifdef MMAP_GENERATE_ADDRESSES
1702 /* Implementation of the three calls for address picking when XEmacs is incharge */
1704 /* The enum denotes the status of the following block. */
1705 typedef enum { empty = 0, occupied, unavailable } addr_status;
1707 typedef struct addr_chain
1712 struct addr_chain *next;
1713 } ADDRESS_BLOCK, *ADDRESS_CHAIN;
1714 /* NB: empty and unavailable blocks are concatenated. */
1716 static ADDRESS_CHAIN addr_chain = 0;
1717 /* Start off the address block chain with a humongous address block
1718 which is empty to start with. Note that addr_chain is invariant
1719 WRT the addition/deletion of address blocks because of the assert
1720 in Coalesce() and the strict ordering of blocks by their address
1723 Addr_Block_initialize (void)
1725 MEMMETER( MVAL( M_Addrlist_Size )++)
1726 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
1727 addr_chain->next = 0; /* Last block in chain */
1728 addr_chain->sz = 0x0c000000; /* Size */
1729 addr_chain->addr = (POINTER) (0x04000000);
1730 addr_chain->flag = empty;
1733 /* Coalesce address blocks if they are contiguous. Only empty and
1734 unavailable slots are coalesced. */
1736 Coalesce_Addr_Blocks (void)
1739 for (p = addr_chain; p; p = p->next)
1741 while (p->next && p->flag == p->next->flag)
1746 if (p->flag == occupied) break; /* No cigar */
1748 /* Check if the addresses are contiguous. */
1749 if (p->addr + p->sz != np->addr) break;
1751 MEMMETER( MVAL( M_Addrlist_Size )--)
1752 /* We can coalesce these two. */
1755 assert( np != addr_chain ); /* We're not freeing the head of the list. */
1756 UNDERLYING_FREE( np );
1761 /* Get an empty address block of specified size. */
1763 New_Addr_Block (size_t sz)
1765 ADDRESS_CHAIN p = addr_chain;
1766 VM_ADDR new_addr = VM_FAILURE_ADDR;
1767 for (; p; p = p->next)
1769 if (p->flag == empty && p->sz > sz)
1771 /* Create a new entry following p which is empty. */
1772 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) );
1773 remainder->next = p->next;
1774 remainder->flag = empty;
1775 remainder->addr = p->addr + sz;
1776 remainder->sz = p->sz - sz;
1778 MEMMETER( MVAL( M_Addrlist_Size )++)
1780 /* Now make p become an occupied block with the appropriate size */
1781 p->next = remainder;
1783 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE,
1784 MAP_FLAGS, DEV_ZERO_FD, 0 );
1785 if (new_addr == VM_FAILURE_ADDR)
1787 p->flag = unavailable;
1794 Coalesce_Addr_Blocks();
1798 /* Free an address block. We mark the block as being empty, and attempt to
1799 do any coalescing that may have resulted from this. */
1801 Free_Addr_Block (VM_ADDR addr, size_t sz)
1803 ADDRESS_CHAIN p = addr_chain;
1804 for (; p; p = p->next )
1806 if (p->addr == addr)
1808 if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */
1809 munmap( (VM_ADDR) p->addr, p->sz );
1814 if (!p) abort(); /* Can't happen... we've got a block to free which is not in
1815 the address list. */
1816 Coalesce_Addr_Blocks();
1818 #else /* !MMAP_GENERATE_ADDRESSES */
1819 /* This is an alternate (simpler) implementation in cases where the
1820 address is picked by the kernel. */
1823 Addr_Block_initialize (void)
1829 New_Addr_Block (size_t sz)
1831 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
1836 Free_Addr_Block (VM_ADDR addr, size_t sz)
1838 munmap ((caddr_t) addr, sz );
1841 #endif /* MMAP_GENERATE_ADDRESSES */
1844 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
1847 r_alloc (POINTER, SIZE): Allocate a relocatable area with the start
1848 address aliased to the first parameter.
1851 POINTER r_alloc (POINTER *ptr, size_t size);
1853 r_alloc (POINTER *ptr, size_t size)
1857 switch(r_alloc_initialized)
1862 *ptr = (POINTER) UNDERLYING_MALLOC(size);
1865 mh = new_mmap_handle( size );
1868 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1869 size_t mmapped_size = ROUNDUP( size + hysteresis );
1870 MEMMETER( MVAL(M_Map)++ )
1871 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
1872 MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
1873 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) )
1874 mh->vm_addr = New_Addr_Block( mmapped_size );
1875 if (mh->vm_addr == VM_FAILURE_ADDR) {
1876 free_mmap_handle( mh ); /* Free the loser */
1878 return 0; /* ralloc failed due to mmap() failure. */
1880 MHASH_ADD( mh->vm_addr, mh );
1881 mh->space_for = mmapped_size;
1882 mh->aliased_address = ptr;
1883 *ptr = (POINTER) mh->vm_addr;
1886 *ptr = 0; /* Malloc of block failed */
1892 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
1893 Store 0 in *PTR to show there's no block allocated. */
1895 void r_alloc_free (POINTER *ptr);
1897 r_alloc_free (POINTER *ptr)
1899 switch( r_alloc_initialized) {
1904 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */
1909 MMAP_HANDLE dead_handle = find_mmap_handle( ptr );
1910 /* Check if we've got it. */
1911 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */
1913 UNDERLYING_FREE( *ptr );
1917 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) )
1918 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size ))
1919 MEMMETER(MVAL(M_Unmap)++)
1920 MHASH_DEL( dead_handle->vm_addr );
1921 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for );
1922 free_mmap_handle (dead_handle);
1926 } /* r_alloc_initialized */
1927 *ptr = 0; /* Zap the pointer's contents. */
1930 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
1932 Change *PTR to reflect the new bloc, and return this value.
1934 If more memory cannot be allocated, then leave *PTR unchanged, and
1937 POINTER r_re_alloc (POINTER *ptr, size_t sz);
1939 r_re_alloc (POINTER *ptr, size_t sz)
1941 if (r_alloc_initialized == 0)
1944 return 0; /* suppress compiler warning */
1946 else if (r_alloc_initialized == 1)
1948 POINTER tmp = (POINTER) realloc(*ptr, sz);
1955 size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1956 size_t actual_sz = ROUNDUP( sz + hysteresis );
1957 MMAP_HANDLE h = find_mmap_handle( ptr );
1958 VM_ADDR new_vm_addr;
1960 if ( h == 0 ) /* Was allocated using malloc. */
1962 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz);
1969 MVAL(M_Average_Bumpval) =
1970 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size))
1971 / (double) (MVAL(M_Remap) + 1))
1972 MEMMETER(MVAL(M_Remap)++)
1973 if (h->space_for > sz) /* We've got some more room */
1974 { /* Also, if a shrinkage was asked for. */
1975 MEMMETER( MVAL(M_Didnt_Copy)++ )
1976 MEMMETER( MVAL(M_Wastage) -= (sz - h->size))
1977 /* We're pretty dumb at handling shrinkage. We should check for
1978 a larger gap than the standard hysteresis allowable, and if so,
1979 shrink the number of pages. Right now, we simply reset the size
1980 component and return. */
1985 new_vm_addr = New_Addr_Block( actual_sz );
1986 if (new_vm_addr == VM_FAILURE_ADDR)
1987 {/* Failed to realloc. */
1992 MHASH_ADD( new_vm_addr, h );
1993 /* We got a block OK: now we should move the old contents to the
1994 new address. We use the old size of this block. */
1995 memmove(new_vm_addr, h->vm_addr, h->size);
1996 MHASH_DEL( h->vm_addr );
1997 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */
1999 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) )
2000 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size))
2001 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size))
2002 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size))
2003 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) )
2005 /* Update block datastructure. */
2006 h->space_for = actual_sz; /* New total space */
2007 h->size = sz; /* New (requested) size */
2008 h->vm_addr = new_vm_addr; /* New VM start address */
2009 h->aliased_address = ptr; /* Change alias to reflect block relocation. */
2010 *ptr = (POINTER) h->vm_addr;
2016 /* Initialize various things for memory allocation.
2022 if (r_alloc_initialized > 1)
2023 return; /* used to return 1 */
2026 /* Under pdump, we need to activate ralloc on the first go. */
2027 ++r_alloc_initialized;
2029 if (++r_alloc_initialized == 1)
2030 return; /* used to return 1 */
2032 Addr_Block_initialize(); /* Initialize the address picker, if required. */
2034 assert( page_size > 0 ); /* getpagesize() bogosity check. */
2036 #ifndef MAP_ANONYMOUS
2037 DEV_ZERO_FD = open( "/dev/zero", O_RDWR );
2038 if (DEV_ZERO_FD < 0)
2039 /* Failed. Perhaps we should abort here? */
2040 return; /* used to return 0 */
2043 #ifdef MMAP_METERING
2044 for(i = 0; i < N_Meterables; i++ )
2048 #endif /* MMAP_METERING */
2052 syms_of_ralloc (void)
2054 #ifdef MMAP_METERING
2055 defsymbol (&Qmmap_times_mapped, "mmap-times-mapped");
2056 defsymbol (&Qmmap_pages_mapped, "mmap-pages-mapped");
2057 defsymbol (&Qmmap_times_unmapped, "mmap-times-unmapped");
2058 defsymbol (&Qmmap_times_remapped, "mmap-times-remapped");
2059 defsymbol (&Qmmap_didnt_copy, "mmap-didnt-copy");
2060 defsymbol (&Qmmap_pages_copied, "mmap-pages-copied");
2061 defsymbol (&Qmmap_average_bumpval, "mmap-average-bumpval");
2062 defsymbol (&Qmmap_wastage, "mmap-wastage");
2063 defsymbol (&Qmmap_live_pages, "mmap-live-pages");
2064 defsymbol (&Qmmap_addr_looked_up, "mmap-addr-looked-up");
2065 defsymbol (&Qmmap_hash_worked, "mmap-hash-worked");
2066 defsymbol (&Qmmap_addrlist_size, "mmap-addrlist-size");
2067 DEFSUBR (Fmmap_allocator_status);
2068 #endif /* MMAP_METERING */
2072 vars_of_ralloc (void)
2074 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /*
2075 Extra room left at the end of an allocated arena,
2076 so that a re-alloc requesting extra space smaller than this
2077 does not actually cause a new arena to be allocated.
2079 A negative value is considered equal to zero. This is the
2080 minimum amount of space guaranteed to be left at the end of
2081 the arena. Because allocation happens in multiples of the OS
2082 page size, it is possible for more space to be left unused.
2084 mmap_hysteresis = 0;
2087 #endif /* HAVE_MMAP */