1 /* Block-relocating memory allocator.
2 Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
4 This file is part of XEmacs.
6 XEmacs is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 XEmacs is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA.
21 Synched Up with: FSF 20.2 (non-mmap portion only)
26 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
27 rather than all of them. This means allowing for a possible
28 hole between the first bloc and the end of malloc storage. */
35 #include <unistd.h> /* for getpagesize() */
42 /* The important properties of this type are that 1) it's a pointer, and
43 2) arithmetic on it should work as if the size of the object pointed
44 to has a size of 1. */
45 #if 0 /* Arithmetic on void* is a GCC extension. */
47 typedef void *POINTER;
49 typedef unsigned char *POINTER;
53 /* Unconditionally use unsigned char * for this. */
54 typedef unsigned char *POINTER;
56 typedef unsigned long SIZE;
58 #ifdef DOUG_LEA_MALLOC
63 #include "getpagesize.h"
66 void refill_memory_reserve (void);
68 #else /* Not emacs. */
73 typedef void *POINTER;
81 void init_ralloc (void);
82 #define safe_bcopy(x, y, z) memmove (y, x, z)
84 #define NIL ((POINTER) 0)
87 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
89 /* A flag to indicate whether we have initialized ralloc yet. For
90 Emacs's sake, please do not make this local to malloc_init; on some
91 machines, the dumping procedure makes all static variables
92 read-only. On these machines, the word static is #defined to be
93 the empty string, meaning that r_alloc_initialized becomes an
94 automatic variable, and loses its value each time Emacs is started up. */
95 static int r_alloc_initialized = 0;
98 /* Declarations for working with the malloc, ralloc, and system breaks. */
100 /* Function to set the real break value. */
101 static POINTER (*real_morecore) (long size);
103 /* The break value, as seen by malloc (). */
104 static POINTER virtual_break_value;
106 /* The break value, viewed by the relocatable blocs. */
107 static POINTER break_value;
109 /* This is the size of a page. We round memory requests to this boundary. */
110 static int page_size;
112 /* Whenever we get memory from the system, get this many extra bytes. This
113 must be a multiple of page_size. */
114 static int extra_bytes;
116 /* Macros for rounding. Note that rounding to any value is possible
117 by changing the definition of PAGE. */
118 #define PAGE (getpagesize ())
119 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
120 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
122 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
124 #define MEM_ALIGN sizeof(double)
125 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
128 /* Data structures of heaps and blocs. */
130 /* The relocatable objects, or blocs, and the malloc data
131 both reside within one or more heaps.
132 Each heap contains malloc data, running from `start' to `bloc_start',
133 and relocatable objects, running from `bloc_start' to `free'.
135 Relocatable objects may relocate within the same heap
136 or may move into another heap; the heaps themselves may grow
139 We try to make just one heap and make it larger as necessary.
140 But sometimes we can't do that, because we can't get contiguous
141 space to add onto the heap. When that happens, we start a new heap. */
147 /* Start of memory range of this heap. */
149 /* End of memory range of this heap. */
151 /* Start of relocatable data in this heap. */
153 /* Start of unused space in this heap. */
155 /* First bloc in this heap. */
156 struct bp *first_bloc;
157 /* Last bloc in this heap. */
158 struct bp *last_bloc;
161 #define NIL_HEAP ((heap_ptr) 0)
162 #define HEAP_PTR_SIZE (sizeof (struct heap))
164 /* This is the first heap object.
165 If we need additional heap objects, each one resides at the beginning of
166 the space it covers. */
167 static struct heap heap_base;
169 /* Head and tail of the list of heaps. */
170 static heap_ptr first_heap, last_heap;
172 /* These structures are allocated in the malloc arena.
173 The linked list is kept in order of increasing '.data' members.
174 The data blocks abut each other; if b->next is non-nil, then
175 b->data + b->size == b->next->data.
177 An element with variable==NIL denotes a freed block, which has not yet
178 been collected. They may only appear while r_alloc_freeze > 0, and will be
179 freed when the arena is thawed. Currently, these blocs are not reusable,
180 while the arena is frozen. Very inefficient. */
189 POINTER new_data; /* temporarily used for relocation */
190 struct heap *heap; /* Heap this bloc is in. */
193 #define NIL_BLOC ((bloc_ptr) 0)
194 #define BLOC_PTR_SIZE (sizeof (struct bp))
196 /* Head and tail of the list of relocatable blocs. */
197 static bloc_ptr first_bloc, last_bloc;
199 static int use_relocatable_buffers;
201 /* If >0, no relocation whatsoever takes place. */
202 static int r_alloc_freeze_level;
204 /* Obtain SIZE bytes of space. If enough space is not presently available
205 in our process reserve, (i.e., (page_break_value - break_value)),
206 this means getting more page-aligned space from the system.
208 Return non-zero if all went well, or zero if we couldn't allocate
211 /* Functions to get and return memory from the system. */
213 /* Find the heap that ADDRESS falls within. */
216 find_heap (POINTER address)
220 for (heap = last_heap; heap; heap = heap->prev)
222 if (heap->start <= address && address <= heap->end)
229 /* Find SIZE bytes of space in a heap.
230 Try to get them at ADDRESS (which must fall within some heap's range)
231 if we can get that many within one heap.
233 If enough space is not presently available in our reserve, this means
234 getting more page-aligned space from the system. If the returned space
235 is not contiguous to the last heap, allocate a new heap, and append it
237 obtain does not try to keep track of whether space is in use
238 or not in use. It just returns the address of SIZE bytes that
239 fall within a single heap. If you call obtain twice in a row
240 with the same arguments, you typically get the same value.
241 to the heap list. It's the caller's responsibility to keep
242 track of what space is in use.
244 Return the address of the space if all went well, or zero if we couldn't
245 allocate the memory. */
248 obtain (POINTER address, SIZE size)
251 SIZE already_available;
253 /* Find the heap that ADDRESS falls within. */
254 for (heap = last_heap; heap; heap = heap->prev)
256 if (heap->start <= address && address <= heap->end)
263 /* If we can't fit SIZE bytes in that heap,
264 try successive later heaps. */
265 while (heap && address + size > heap->end)
268 if (heap == NIL_HEAP)
270 address = heap->bloc_start;
273 /* If we can't fit them within any existing heap,
275 if (heap == NIL_HEAP)
277 POINTER new = (*real_morecore)(0);
280 already_available = (char *)last_heap->end - (char *)address;
282 if (new != last_heap->end)
284 /* Someone else called sbrk. Make a new heap. */
286 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
287 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
289 if ((*real_morecore) (bloc_start - new) != new)
292 new_heap->start = new;
293 new_heap->end = bloc_start;
294 new_heap->bloc_start = bloc_start;
295 new_heap->free = bloc_start;
296 new_heap->next = NIL_HEAP;
297 new_heap->prev = last_heap;
298 new_heap->first_bloc = NIL_BLOC;
299 new_heap->last_bloc = NIL_BLOC;
300 last_heap->next = new_heap;
301 last_heap = new_heap;
303 address = bloc_start;
304 already_available = 0;
307 /* Add space to the last heap (which we may have just created).
308 Get some extra, so we can come here less often. */
310 get = size + extra_bytes - already_available;
311 get = (char *) ROUNDUP ((char *)last_heap->end + get)
312 - (char *) last_heap->end;
314 if ((*real_morecore) (get) != last_heap->end)
317 last_heap->end += get;
324 /* Obtain SIZE bytes of space and return a pointer to the new area.
325 If we could not allocate the space, return zero. */
328 get_more_space (SIZE size)
330 POINTER ptr = break_value;
338 /* Note that SIZE bytes of space have been relinquished by the process.
339 If SIZE is more than a page, return the space to the system. */
347 /* Add the amount of space beyond break_value
348 in all heaps which have extend beyond break_value at all. */
350 for (h = last_heap; h && break_value < h->end; h = h->prev)
352 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
353 ? h->bloc_start : break_value);
356 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
358 /* Keep extra_bytes worth of empty space.
359 And don't free anything unless we can free at least extra_bytes. */
360 excess -= extra_bytes;
362 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
364 /* This heap should have no blocs in it. */
365 if (last_heap->first_bloc != NIL_BLOC
366 || last_heap->last_bloc != NIL_BLOC)
369 /* Return the last heap, with its header, to the system. */
370 excess = (char *)last_heap->end - (char *)last_heap->start;
371 last_heap = last_heap->prev;
372 last_heap->next = NIL_HEAP;
376 excess = (char *) last_heap->end
377 - (char *) ROUNDUP ((char *)last_heap->end - excess);
378 last_heap->end -= excess;
381 if ((*real_morecore) (- excess) == 0)
386 /* Return the total size in use by relocating allocator,
387 above where malloc gets space. */
389 long r_alloc_size_in_use (void);
391 r_alloc_size_in_use ()
393 return break_value - virtual_break_value;
396 /* The meat - allocating, freeing, and relocating blocs. */
399 /* Find the bloc referenced by the address in PTR. Returns a pointer
403 find_bloc (POINTER *ptr)
405 register bloc_ptr p = first_bloc;
407 while (p != NIL_BLOC)
409 if (p->variable == ptr && p->data == *ptr)
418 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
419 Returns a pointer to the new bloc, or zero if we couldn't allocate
420 memory for the new block. */
425 register bloc_ptr new_bloc;
426 register heap_ptr heap;
428 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
429 || ! (new_bloc->data = obtain (break_value, size)))
437 break_value = new_bloc->data + size;
439 new_bloc->size = size;
440 new_bloc->next = NIL_BLOC;
441 new_bloc->variable = (POINTER *) NIL;
442 new_bloc->new_data = 0;
444 /* Record in the heap that this space is in use. */
445 heap = find_heap (new_bloc->data);
446 heap->free = break_value;
448 /* Maintain the correspondence between heaps and blocs. */
449 new_bloc->heap = heap;
450 heap->last_bloc = new_bloc;
451 if (heap->first_bloc == NIL_BLOC)
452 heap->first_bloc = new_bloc;
454 /* Put this bloc on the doubly-linked list of blocs. */
457 new_bloc->prev = last_bloc;
458 last_bloc->next = new_bloc;
459 last_bloc = new_bloc;
463 first_bloc = last_bloc = new_bloc;
464 new_bloc->prev = NIL_BLOC;
470 /* Calculate new locations of blocs in the list beginning with BLOC,
471 relocating it to start at ADDRESS, in heap HEAP. If enough space is
472 not presently available in our reserve, call obtain for
475 Store the new location of each bloc in its new_data field.
476 Do not touch the contents of blocs or break_value. */
479 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
481 register bloc_ptr b = bloc;
483 /* No need to ever call this if arena is frozen, bug somewhere! */
484 if (r_alloc_freeze_level)
489 /* If bloc B won't fit within HEAP,
490 move to the next heap and try again. */
491 while (heap && address + b->size > heap->end)
494 if (heap == NIL_HEAP)
496 address = heap->bloc_start;
499 /* If BLOC won't fit in any heap,
500 get enough new space to hold BLOC and all following blocs. */
501 if (heap == NIL_HEAP)
503 register bloc_ptr tb = b;
506 /* Add up the size of all the following blocs. */
507 while (tb != NIL_BLOC)
515 /* Get that space. */
516 address = obtain (address, s);
523 /* Record the new address of this bloc
524 and update where the next bloc can start. */
525 b->new_data = address;
535 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
536 This is necessary if we put the memory of space of BLOC
537 before that of BEFORE. */
540 reorder_bloc (bloc_ptr bloc, bloc_ptr before)
544 /* Splice BLOC out from where it is. */
553 /* Splice it in before BEFORE. */
565 /* Update the records of which heaps contain which blocs, starting
566 with heap HEAP and bloc BLOC. */
569 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
573 /* Initialize HEAP's status to reflect blocs before BLOC. */
574 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
576 /* The previous bloc is in HEAP. */
577 heap->last_bloc = bloc->prev;
578 heap->free = bloc->prev->data + bloc->prev->size;
582 /* HEAP contains no blocs before BLOC. */
583 heap->first_bloc = NIL_BLOC;
584 heap->last_bloc = NIL_BLOC;
585 heap->free = heap->bloc_start;
588 /* Advance through blocs one by one. */
589 for (b = bloc; b != NIL_BLOC; b = b->next)
591 /* Advance through heaps, marking them empty,
592 till we get to the one that B is in. */
595 if (heap->bloc_start <= b->data && b->data <= heap->end)
598 /* We know HEAP is not null now,
599 because there has to be space for bloc B. */
600 heap->first_bloc = NIL_BLOC;
601 heap->last_bloc = NIL_BLOC;
602 heap->free = heap->bloc_start;
605 /* Update HEAP's status for bloc B. */
606 heap->free = b->data + b->size;
608 if (heap->first_bloc == NIL_BLOC)
609 heap->first_bloc = b;
611 /* Record that B is in HEAP. */
615 /* If there are any remaining heaps and no blocs left,
616 mark those heaps as empty. */
620 heap->first_bloc = NIL_BLOC;
621 heap->last_bloc = NIL_BLOC;
622 heap->free = heap->bloc_start;
627 /* Resize BLOC to SIZE bytes. This relocates the blocs
628 that come after BLOC in memory. */
631 resize_bloc (bloc_ptr bloc, SIZE size)
638 /* No need to ever call this if arena is frozen, bug somewhere! */
639 if (r_alloc_freeze_level)
642 if (bloc == NIL_BLOC || size == bloc->size)
645 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
647 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
651 if (heap == NIL_HEAP)
654 old_size = bloc->size;
657 /* Note that bloc could be moved into the previous heap. */
658 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
659 : first_heap->bloc_start);
662 if (heap->bloc_start <= address && address <= heap->end)
667 if (! relocate_blocs (bloc, heap, address))
669 bloc->size = old_size;
675 for (b = last_bloc; b != bloc; b = b->prev)
680 b->data = b->new_data;
684 safe_bcopy (b->data, b->new_data, b->size);
685 *b->variable = b->data = b->new_data;
691 bloc->data = bloc->new_data;
695 safe_bcopy (bloc->data, bloc->new_data, old_size);
696 memset (bloc->new_data + old_size, 0, size - old_size);
697 *bloc->variable = bloc->data = bloc->new_data;
702 for (b = bloc; b != NIL_BLOC; b = b->next)
707 b->data = b->new_data;
711 safe_bcopy (b->data, b->new_data, b->size);
712 *b->variable = b->data = b->new_data;
717 update_heap_bloc_correspondence (bloc, heap);
719 break_value = (last_bloc ? last_bloc->data + last_bloc->size
720 : first_heap->bloc_start);
724 /* Free BLOC from the chain of blocs, relocating any blocs above it
725 and returning BLOC->size bytes to the free area. */
728 free_bloc (bloc_ptr bloc)
730 heap_ptr heap = bloc->heap;
732 if (r_alloc_freeze_level)
734 bloc->variable = (POINTER *) NIL;
738 resize_bloc (bloc, 0);
740 if (bloc == first_bloc && bloc == last_bloc)
742 first_bloc = last_bloc = NIL_BLOC;
744 else if (bloc == last_bloc)
746 last_bloc = bloc->prev;
747 last_bloc->next = NIL_BLOC;
749 else if (bloc == first_bloc)
751 first_bloc = bloc->next;
752 first_bloc->prev = NIL_BLOC;
756 bloc->next->prev = bloc->prev;
757 bloc->prev->next = bloc->next;
760 /* Update the records of which blocs are in HEAP. */
761 if (heap->first_bloc == bloc)
763 if (bloc->next != 0 && bloc->next->heap == heap)
764 heap->first_bloc = bloc->next;
766 heap->first_bloc = heap->last_bloc = NIL_BLOC;
768 if (heap->last_bloc == bloc)
770 if (bloc->prev != 0 && bloc->prev->heap == heap)
771 heap->last_bloc = bloc->prev;
773 heap->first_bloc = heap->last_bloc = NIL_BLOC;
780 /* Interface routines. */
782 /* Obtain SIZE bytes of storage from the free pool, or the system, as
783 necessary. If relocatable blocs are in use, this means relocating
784 them. This function gets plugged into the GNU malloc's __morecore
787 We provide hysteresis, never relocating by less than extra_bytes.
789 If we're out of memory, we should return zero, to imitate the other
790 __morecore hook values - in particular, __default_morecore in the
791 GNU malloc package. */
793 POINTER r_alloc_sbrk (long size);
795 r_alloc_sbrk (long size)
800 if (! r_alloc_initialized)
803 if (! use_relocatable_buffers)
804 return (*real_morecore) (size);
807 return virtual_break_value;
811 /* Allocate a page-aligned space. GNU malloc would reclaim an
812 extra space if we passed an unaligned one. But we could
813 not always find a space which is contiguous to the previous. */
814 POINTER new_bloc_start;
815 heap_ptr h = first_heap;
816 SIZE get = ROUNDUP (size);
818 address = (POINTER) ROUNDUP (virtual_break_value);
820 /* Search the list upward for a heap which is large enough. */
821 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
826 address = (POINTER) ROUNDUP (h->start);
829 /* If not found, obtain more space. */
832 get += extra_bytes + page_size;
834 if (! obtain (address, get))
837 if (first_heap == last_heap)
838 address = (POINTER) ROUNDUP (virtual_break_value);
840 address = (POINTER) ROUNDUP (last_heap->start);
844 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
846 if (first_heap->bloc_start < new_bloc_start)
848 /* This is no clean solution - no idea how to do it better. */
849 if (r_alloc_freeze_level)
852 /* There is a bug here: if the above obtain call succeeded, but the
853 relocate_blocs call below does not succeed, we need to free
854 the memory that we got with obtain. */
856 /* Move all blocs upward. */
857 if (! relocate_blocs (first_bloc, h, new_bloc_start))
860 /* Note that (POINTER)(h+1) <= new_bloc_start since
861 get >= page_size, so the following does not destroy the heap
863 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
865 safe_bcopy (b->data, b->new_data, b->size);
866 *b->variable = b->data = b->new_data;
869 h->bloc_start = new_bloc_start;
871 update_heap_bloc_correspondence (first_bloc, h);
875 /* Give up managing heaps below the one the new
876 virtual_break_value points to. */
877 first_heap->prev = NIL_HEAP;
878 first_heap->next = h->next;
879 first_heap->start = h->start;
880 first_heap->end = h->end;
881 first_heap->free = h->free;
882 first_heap->first_bloc = h->first_bloc;
883 first_heap->last_bloc = h->last_bloc;
884 first_heap->bloc_start = h->bloc_start;
886 if (first_heap->next)
887 first_heap->next->prev = first_heap;
889 last_heap = first_heap;
892 memset (address, 0, size);
896 SIZE excess = (char *)first_heap->bloc_start
897 - ((char *)virtual_break_value + size);
899 address = virtual_break_value;
901 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
903 excess -= extra_bytes;
904 first_heap->bloc_start
905 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
907 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
909 for (b = first_bloc; b != NIL_BLOC; b = b->next)
911 safe_bcopy (b->data, b->new_data, b->size);
912 *b->variable = b->data = b->new_data;
916 if ((char *)virtual_break_value + size < (char *)first_heap->start)
918 /* We found an additional space below the first heap */
919 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
923 virtual_break_value = (POINTER) ((char *)address + size);
924 break_value = (last_bloc
925 ? last_bloc->data + last_bloc->size
926 : first_heap->bloc_start);
933 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
934 the data is returned in *PTR. PTR is thus the address of some variable
935 which will use the data area.
937 The allocation of 0 bytes is valid.
938 In case r_alloc_freeze is set, a best fit of unused blocs could be done
939 before allocating a new area. Not yet done.
941 If we can't allocate the necessary memory, set *PTR to zero, and
944 POINTER r_alloc (POINTER *ptr, SIZE size);
946 r_alloc (POINTER *ptr, SIZE size)
950 if (! r_alloc_initialized)
953 new_bloc = get_bloc (size);
956 new_bloc->variable = ptr;
957 *ptr = new_bloc->data;
965 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
966 Store 0 in *PTR to show there's no block allocated. */
968 void r_alloc_free (POINTER *ptr);
970 r_alloc_free (POINTER *ptr)
972 register bloc_ptr dead_bloc;
974 if (! r_alloc_initialized)
977 dead_bloc = find_bloc (ptr);
978 if (dead_bloc == NIL_BLOC)
981 free_bloc (dead_bloc);
985 refill_memory_reserve ();
989 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
990 Do this by shifting all blocks above this one up in memory, unless
991 SIZE is less than or equal to the current bloc size, in which case
994 In case r_alloc_freeze is set, a new bloc is allocated, and the
995 memory copied to it. Not very efficient. We could traverse the
996 bloc_list for a best fit of free blocs first.
998 Change *PTR to reflect the new bloc, and return this value.
1000 If more memory cannot be allocated, then leave *PTR unchanged, and
1003 POINTER r_re_alloc (POINTER *ptr, SIZE size);
1005 r_re_alloc (POINTER *ptr, SIZE size)
1007 register bloc_ptr bloc;
1009 if (! r_alloc_initialized)
1013 return r_alloc (ptr, size);
1017 return r_alloc (ptr, 0);
1020 bloc = find_bloc (ptr);
1021 if (bloc == NIL_BLOC)
1024 if (size < bloc->size)
1026 /* Wouldn't it be useful to actually resize the bloc here? */
1027 /* I think so too, but not if it's too expensive... */
1028 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1029 && r_alloc_freeze_level == 0)
1031 resize_bloc (bloc, MEM_ROUNDUP (size));
1032 /* Never mind if this fails, just do nothing... */
1033 /* It *should* be infallible! */
1036 else if (size > bloc->size)
1038 if (r_alloc_freeze_level)
1041 new_bloc = get_bloc (MEM_ROUNDUP (size));
1044 new_bloc->variable = ptr;
1045 *ptr = new_bloc->data;
1046 bloc->variable = (POINTER *) NIL;
1053 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1060 /* Disable relocations, after making room for at least SIZE bytes
1061 of non-relocatable heap if possible. The relocatable blocs are
1062 guaranteed to hold still until thawed, even if this means that
1063 malloc must return a null pointer. */
1065 void r_alloc_freeze (long size);
1067 r_alloc_freeze (long size)
1069 if (! r_alloc_initialized)
1072 /* If already frozen, we can't make any more room, so don't try. */
1073 if (r_alloc_freeze_level > 0)
1075 /* If we can't get the amount requested, half is better than nothing. */
1076 while (size > 0 && r_alloc_sbrk (size) == 0)
1078 ++r_alloc_freeze_level;
1080 r_alloc_sbrk (-size);
1083 void r_alloc_thaw (void);
1088 if (! r_alloc_initialized)
1091 if (--r_alloc_freeze_level < 0)
1094 /* This frees all unused blocs. It is not too inefficient, as the resize
1095 and bcopy is done only once. Afterwards, all unreferenced blocs are
1096 already shrunk to zero size. */
1097 if (!r_alloc_freeze_level)
1099 bloc_ptr *b = &first_bloc;
1101 if (!(*b)->variable)
1109 /* The hook `malloc' uses for the function which gets more space
1111 #ifndef DOUG_LEA_MALLOC
1112 extern POINTER (*__morecore) (long size);
1115 /* Initialize various things for memory allocation. */
1117 #define SET_FUN_PTR(fun_ptr, fun_val) \
1118 (*((void **) (&fun_ptr)) = ((void *) (fun_val)))
1123 if (r_alloc_initialized)
1126 r_alloc_initialized = 1;
1127 SET_FUN_PTR (real_morecore, __morecore);
1128 SET_FUN_PTR (__morecore, r_alloc_sbrk);
1130 first_heap = last_heap = &heap_base;
1131 first_heap->next = first_heap->prev = NIL_HEAP;
1132 first_heap->start = first_heap->bloc_start
1133 = virtual_break_value = break_value = (*real_morecore) (0);
1134 if (break_value == NIL)
1138 extra_bytes = ROUNDUP (50000);
1140 #ifdef DOUG_LEA_MALLOC
1141 mallopt (M_TOP_PAD, 64 * 4096);
1143 #if 0 /* Hasn't been synched yet */
1144 /* Give GNU malloc's morecore some hysteresis
1145 so that we move all the relocatable blocks much less often. */
1146 __malloc_extra_blocks = 64;
1150 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1152 /* The extra call to real_morecore guarantees that the end of the
1153 address space is a multiple of page_size, even if page_size is
1154 not really the page size of the system running the binary in
1155 which page_size is stored. This allows a binary to be built on a
1156 system with one page size and run on a system with a smaller page
1158 (*real_morecore) (first_heap->end - first_heap->start);
1160 /* Clear the rest of the last page; this memory is in our address space
1161 even though it is after the sbrk value. */
1162 /* Doubly true, with the additional call that explicitly adds the
1163 rest of that page to the address space. */
1164 memset (first_heap->start, 0, first_heap->end - first_heap->start);
1165 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1166 use_relocatable_buffers = 1;
1169 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1171 /* Reinitialize the morecore hook variables after restarting a dumped
1172 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1173 void r_alloc_reinit (void);
1177 /* Only do this if the hook has been reset, so that we don't get an
1178 infinite loop, in case Emacs was linked statically. */
1179 if ( ((void*) __morecore) != (void *) (r_alloc_sbrk))
1181 SET_FUN_PTR (real_morecore, __morecore);
1182 SET_FUN_PTR (__morecore, r_alloc_sbrk);
1195 if (!r_alloc_initialized)
1198 assert (first_heap);
1199 assert (last_heap->end <= (POINTER) sbrk (0));
1200 assert ((POINTER) first_heap < first_heap->start);
1201 assert (first_heap->start <= virtual_break_value);
1202 assert (virtual_break_value <= first_heap->end);
1204 for (h = first_heap; h; h = h->next)
1206 assert (h->prev == ph);
1207 assert ((POINTER) ROUNDUP (h->end) == h->end);
1208 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1209 the heap start has any sort of alignment.
1210 Perhaps it should. */
1211 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1213 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1214 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1218 assert (ph->end < h->start);
1219 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1222 if (h->bloc_start <= break_value && break_value <= h->end)
1229 assert (last_heap == ph);
1231 for (b = first_bloc; b; b = b->next)
1233 assert (b->prev == pb);
1234 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1235 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1238 for (h = first_heap; h; h = h->next)
1240 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1247 if (pb && pb->data + pb->size != b->data)
1249 assert (ph && b->data == h->bloc_start);
1252 if (ph->bloc_start <= pb->data
1253 && pb->data + pb->size <= ph->end)
1255 assert (pb->data + pb->size + b->size > ph->end);
1260 assert (ph->bloc_start + b->size > ph->end);
1268 assert (last_bloc == pb);
1271 assert (last_bloc->data + last_bloc->size == break_value);
1273 assert (first_heap->bloc_start == break_value);
1280 #else /* HAVE_MMAP */
1283 A relocating allocator built using the mmap(2) facility available
1284 in some OSes. Based on another version written by Paul Flinders,
1285 from which code (and comments) are snarfed.
1287 The OS should support mmap() with MAP_ANONYMOUS attribute, or have
1288 /dev/zero. It should support private memory mapping.
1290 Paul Flinders wrote a version which works well for systems that
1291 allow callers to specify (virtual) addresses to mmap().
1292 Unfortunately, such a scheme doesn't work for certain systems like
1293 HP-UX that have a system-wide virtual->real address map, and
1294 consequently impose restrictions on the virtual address values
1297 NB: The mapping scheme in HP-UX is motivated by the inverted page
1298 table design in some HP processors.
1300 This alternate implementation allows for the addresses to be
1301 optionally chosen by the system. Fortunately, buffer allocation
1302 doesn't insist upon contiguous memory which Flinders' scheme
1303 provides, and this one doesn't.
1305 We don't really provide for hysteresis here, but add some metering
1306 to monitor how poorly the allocator actually works. See the
1307 documentation for `mmap-hysteresis'.
1309 This implementation actually cycles through the blocks allocated
1310 via mmap() and only sends it to free() if it wasn't one of them.
1311 Unfortunately, this is O(n) in the number of mmapped blocks. (Not
1312 really, as we have a hash table which tries to reduce the cost.)
1313 Also, this dereferences the pointer passed, so it would cause a
1314 segfault if garbage was passed to it. */
1317 #include <sys/mman.h>
1320 typedef void *VM_ADDR; /* VM addresses */
1321 static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
1323 /* Configuration for relocating allocator. */
1325 /* #define MMAP_GENERATE_ADDRESSES */
1326 /* Define this if you want Emacs to manage the address table.
1327 It is not recommended unless you have major problems with the
1328 default scheme, which allows the OS to pick addresses. */
1330 /* USELESS_LOWER_ADDRESS_BITS defines the number of bits which can be
1331 discarded while computing the hash, as they're always zero. The
1332 default is appropriate for a page size of 4096 bytes. */
1334 #define USELESS_LOWER_ADDRESS_BITS 12
1337 /* Size of hash table for inverted VM_ADDR->MMAP_HANDLE lookup */
1339 #define MHASH_PRIME 89
1342 /* Whether we want to enable metering of some ralloc performance.
1343 This incurs a constant penalty for each mmap operation. */
1345 #define MMAP_METERING
1348 /* Rename the following to protect against a some smartness elsewhere.
1349 We need access to the allocator used for non-mmap allocation
1350 elsewhere, in case we get passed a handle that we didn't allocate
1351 ourselves. Currently, this default allocator is also used to
1352 maintain local structures for relocatable blocks. */
1354 #define UNDERLYING_MALLOC malloc
1355 #define UNDERLYING_FREE free
1356 #define UNDERLYING_REALLOC realloc
1358 /* MAP_ADDRCHOICE_FLAG is set to MAP_FIXED if MMAP_GENERATE_ADDRESSES
1359 is defined, and MAP_VARIABLE otherwise. Some losing systems don't
1360 define the _FIXED/_VARIABLE flags, in which case it is set to 0 */
1362 #ifdef MMAP_GENERATE_ADDRESSES
1364 # define MAP_ADDRCHOICE_FLAG MAP_FIXED
1366 #else /* !MMAP_GENERATE_ADDRESSES */
1367 # ifdef MAP_VARIABLE
1368 # define MAP_ADDRCHOICE_FLAG MAP_VARIABLE
1370 #endif /* MMAP_GENERATE_ADDRESSES */
1373 #ifndef MAP_ADDRCHOICE_FLAG
1374 # define MAP_ADDRCHOICE_FLAG 0
1375 #endif /* MAP_ADDRCHOICE_FLAG */
1377 #ifdef MAP_ANONYMOUS
1378 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG | MAP_ANONYMOUS)
1380 # define MAP_FLAGS (MAP_PRIVATE | MAP_ADDRCHOICE_FLAG)
1381 #endif /* MAP_ANONYMOUS */
1384 /* (ptf): A flag to indicate whether we have initialized ralloc yet. For
1385 Emacs's sake, please do not make this local to malloc_init; on some
1386 machines, the dumping procedure makes all static variables
1387 read-only. On these machines, the word static is #defined to be
1388 the empty string, meaning that r_alloc_initialized becomes an
1389 automatic variable, and loses its value each time Emacs is started up.
1391 If we're using mmap this flag has three possible values
1393 1 - Normal value when running temacs. In this case buffers
1394 are allocated using malloc so that any data that they
1395 contain becomes part of the undumped executable.
1396 2 - Normal value when running emacs */
1397 static int r_alloc_initialized = 0;
1399 /* (ptf): Macros for rounding. Note that rounding to any value is possible
1400 by changing the definition of PAGE. */
1401 #define PAGE (getpagesize ())
1402 #define PAGES_FOR(size) (((unsigned long int) (size) + page_size - 1)/page_size)
1403 #define ROUNDUP(size) ((unsigned long int)PAGES_FOR(size)*page_size)
1406 /* DEV_ZERO_FD is -1 normally, but for systems without MAP_ANONYMOUS
1407 points to a file descriptor opened on /dev/zero */
1409 static int DEV_ZERO_FD = -1;
1412 /* We actually need a datastructure that can be usefully structured
1413 based on the VM address, and allows an ~O(1) lookup on an arbitrary
1414 address, ie a hash-table. Maybe the XEmacs hash table can be
1415 coaxed enough. At the moment, we use lookup on a hash-table to
1416 decide whether to do an O(n) search on the malloced block list.
1417 Addresses are hashed to a bucket modulo MHASH_PRIME */
1420 /* We settle for a standard doubly-linked-list. The dynarr type isn't
1421 very amenable to deletion of items in the middle, so we conjure up
1422 yet another stupid datastructure. The structure is maintained as a
1423 ring, and the singleton ring has the sole element as its left and
1424 right neighbours. */
1426 static void init_MHASH_table (void); /* Forward reference */
1428 typedef struct alloc_dll
1430 size_t size; /* #bytes currently in use */
1431 size_t space_for; /* #bytes we really have */
1432 POINTER* aliased_address; /* Address of aliased variable, to tweak if relocating */
1433 VM_ADDR vm_addr; /* VM address returned by mmap */
1434 struct alloc_dll *left; /* Left link in circular doubly linked list */
1435 struct alloc_dll *right;
1438 static MMAP_HANDLE mmap_start = 0; /* Head of linked list */
1439 static size_t page_size = 0; /* Size of VM pages */
1440 static int mmap_hysteresis; /* Should be size_t, really. */
1442 /* Get a new handle for a fresh block. */
1444 new_mmap_handle (size_t nsiz)
1446 MMAP_HANDLE h = (MMAP_HANDLE) UNDERLYING_MALLOC( sizeof (struct alloc_dll));
1447 if ( h == 0) return 0;
1449 if (mmap_start == 0)
1451 init_MHASH_table ();
1452 mmap_start = h; mmap_start->left = h; mmap_start->right = h;
1455 MMAP_HANDLE prev = mmap_start->left;
1456 MMAP_HANDLE nex = mmap_start;
1458 /* Four pointers need fixing. */
1467 /* Find a handle given the aliased address using linear search. */
1469 find_mmap_handle_lsearch (POINTER *alias)
1471 MMAP_HANDLE h = mmap_start;
1472 if (h == 0) return 0;
1474 if (h->aliased_address == alias && *alias == h->vm_addr)
1477 } while( h != mmap_start );
1478 return 0; /* Bogus alias passed. */
1481 /* Free a handle. */
1483 free_mmap_handle (MMAP_HANDLE h)
1485 MMAP_HANDLE prev = h->left;
1486 MMAP_HANDLE nex = h->right;
1487 if (prev == h || nex == h) /* In fact, this should be && */
1488 { /* We're the singleton dll */
1489 UNDERLYING_FREE( h ); /* Free the sole item */
1490 mmap_start = 0; return;
1492 else if (h == mmap_start)
1494 mmap_start = nex; /* Make sure mmap_start isn't bogus. */
1498 UNDERLYING_FREE( h );
1501 /* A simple hash table to speed up the inverted lookup of
1502 VM_ADDR->MMAP_HANDLE. We maintain the number of hits for a
1503 particular bucket. We invalidate a hash table entry during block
1504 deletion if the hash has cached the deleted block's address. */
1506 /* Simple hash check. */
1508 int n_hits; /* How many addresses map to this? */
1509 MMAP_HANDLE handle; /* What is the current handle? */
1510 VM_ADDR addr; /* What is its VM address? */
1511 } MHASH_HITS[ MHASH_PRIME ];
1514 init_MHASH_table (void)
1517 for (; i < MHASH_PRIME; i++)
1519 MHASH_HITS[i].n_hits = 0;
1520 MHASH_HITS[i].addr = 0;
1521 MHASH_HITS[i].handle = 0;
1525 /* Compute the hash value for an address. */
1527 MHASH (VM_ADDR addr)
1529 #if (LONGBITS == 64)
1530 unsigned long int addr_shift = (unsigned long int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
1532 unsigned int addr_shift = (unsigned int)(addr) >> USELESS_LOWER_ADDRESS_BITS;
1534 int hval = addr_shift % MHASH_PRIME; /* We could have addresses which are -ve
1535 when converted to signed ints */
1536 return ((hval >= 0) ? hval : MHASH_PRIME + hval);
1539 /* Add a VM address with its corresponding handle to the table. */
1541 MHASH_ADD (VM_ADDR addr, MMAP_HANDLE h)
1543 int kVal = MHASH( addr );
1544 if (MHASH_HITS[kVal].n_hits++ == 0)
1545 { /* Only overwrite the table if there were no hits so far. */
1546 MHASH_HITS[kVal].addr = addr;
1547 MHASH_HITS[kVal].handle = h;
1551 /* Delete a VM address entry from the hash table. */
1553 MHASH_DEL (VM_ADDR addr)
1555 int kVal = MHASH( addr );
1556 MHASH_HITS[kVal].n_hits--;
1557 if (addr == MHASH_HITS[kVal].addr)
1559 MHASH_HITS[kVal].addr = 0; /* Invalidate cache. */
1560 MHASH_HITS[kVal].handle = 0;
1564 /* End of hash buckets */
1566 /* Metering malloc performance. */
1567 #ifdef MMAP_METERING
1568 /* If we're metering, we introduce some extra symbols to aid the noble
1569 cause of bloating XEmacs core size. */
1571 static Lisp_Object Qmmap_times_mapped;
1572 static Lisp_Object Qmmap_pages_mapped;
1573 static Lisp_Object Qmmap_times_unmapped;
1574 static Lisp_Object Qmmap_times_remapped;
1575 static Lisp_Object Qmmap_didnt_copy;
1576 static Lisp_Object Qmmap_pages_copied;
1577 static Lisp_Object Qmmap_average_bumpval;
1578 static Lisp_Object Qmmap_wastage;
1579 static Lisp_Object Qmmap_live_pages;
1580 static Lisp_Object Qmmap_addr_looked_up;
1581 static Lisp_Object Qmmap_hash_worked;
1582 static Lisp_Object Qmmap_addrlist_size;
1584 #define M_Map 0 /* How many times allocated? */
1585 #define M_Pages_Map 1 /* How many pages allocated? */
1586 #define M_Unmap 2 /* How many times freed? */
1587 #define M_Remap 3 /* How many times increased in size? */
1588 #define M_Didnt_Copy 4 /* How many times didn't need to copy? */
1589 #define M_Copy_Pages 5 /* Total # pages copied */
1590 #define M_Average_Bumpval 6 /* Average bump value */
1591 #define M_Wastage 7 /* Remaining (unused space) */
1592 #define M_Live_Pages 8 /* #live pages */
1593 #define M_Address_Lookup 9 /* How many times did we need to check if an addr is in the block? */
1594 #define M_Hash_Worked 10 /* How many times did the simple hash check work? */
1595 #define M_Addrlist_Size 11 /* What is the size of the XEmacs memory map? */
1597 #define N_Meterables 12 /* Total number of meterables */
1598 #define MEMMETER(x) {x;}
1599 #define MVAL(x) (meter[x])
1600 #define MLVAL(x) (make_int (meter[x]))
1601 static int meter[N_Meterables];
1603 DEFUN ("mmap-allocator-status", Fmmap_allocator_status, 0, 0, 0, /*
1604 Return some information about mmap-based allocator.
1606 mmap-times-mapped: number of times r_alloc was called.
1607 mmap-pages-mapped: number of pages mapped by r_alloc calls only.
1608 mmap-times-unmapped: number of times r_free was called.
1609 mmap-times-remapped: number of times r_re_alloc was called.
1610 mmap-didnt-copy: number of times re-alloc did NOT have to move the block.
1611 mmap-pages-copied: total number of pages copied.
1612 mmap-average-bumpval: average increase in size demanded to re-alloc.
1613 mmap-wastage: total number of bytes allocated, but not currently in use.
1614 mmap-live-pages: total number of pages live.
1615 mmap-addr-looked-up: total number of times needed to check if addr is in block.
1616 mmap-hash-worked: total number of times the simple hash check worked.
1617 mmap-addrlist-size: number of entries in address picking list.
1621 Lisp_Object result = Qnil;
1623 result = cons3 (Qmmap_addrlist_size, MLVAL (M_Addrlist_Size), result);
1624 result = cons3 (Qmmap_hash_worked, MLVAL (M_Hash_Worked), result);
1625 result = cons3 (Qmmap_addr_looked_up, MLVAL (M_Address_Lookup), result);
1626 result = cons3 (Qmmap_live_pages, MLVAL (M_Live_Pages), result);
1627 result = cons3 (Qmmap_wastage, MLVAL (M_Wastage), result);
1628 result = cons3 (Qmmap_average_bumpval,MLVAL (M_Average_Bumpval), result);
1629 result = cons3 (Qmmap_pages_copied, MLVAL (M_Copy_Pages), result);
1630 result = cons3 (Qmmap_didnt_copy, MLVAL (M_Didnt_Copy), result);
1631 result = cons3 (Qmmap_times_remapped, MLVAL (M_Remap), result);
1632 result = cons3 (Qmmap_times_unmapped, MLVAL (M_Unmap), result);
1633 result = cons3 (Qmmap_pages_mapped, MLVAL (M_Pages_Map), result);
1634 result = cons3 (Qmmap_times_mapped, MLVAL (M_Map), result);
1639 #else /* !MMAP_METERING */
1644 #endif /* MMAP_METERING */
1647 find_mmap_handle (POINTER *alias)
1649 int kval = MHASH( *alias );
1650 MEMMETER( MVAL(M_Address_Lookup)++ )
1651 switch( MHASH_HITS[kval].n_hits)
1654 MEMMETER( MVAL( M_Hash_Worked )++ )
1658 if (*alias == MHASH_HITS[kval].addr)
1660 MEMMETER( MVAL( M_Hash_Worked) ++ );
1661 return MHASH_HITS[kval].handle;
1665 return find_mmap_handle_lsearch( alias );
1670 Some kernels don't like being asked to pick addresses for mapping
1671 themselves---IRIX is known to become extremely slow if mmap is
1672 passed a ZERO as the first argument. In such cases, we use an
1673 address map which is managed local to the XEmacs process. The
1674 address map maintains an ordered linked list of (address, size,
1675 occupancy) triples ordered by the absolute address. Initially, a
1676 large address area is marked as being empty. The address picking
1677 scheme takes bites off the first block which is still empty and
1678 large enough. If mmap with the specified address fails, it is
1679 marked unavailable and not attempted thereafter. The scheme will
1680 keep fragmenting the large empty block until it finds an address
1681 which can be successfully mmapped, or until there are no free
1682 blocks of the given size left.
1684 Note that this scheme, given its first-fit strategy, is prone to
1685 fragmentation of the first part of memory earmarked for this
1686 purpose. [ACP Vol I]. We can't use the workaround of using a
1687 randomized first fit because we don't want to presume too much
1688 about the memory map. Instead, we try to coalesce empty or
1689 unavailable blocks at any available opportunity. */
1691 /* Initialization procedure for address picking scheme */
1692 static void Addr_Block_initialize(void);
1694 /* Get a suitable VM_ADDR via mmap */
1695 static VM_ADDR New_Addr_Block( SIZE sz );
1697 /* Free a VM_ADDR allocated via New_Addr_Block */
1698 static void Free_Addr_Block( VM_ADDR addr, SIZE sz );
1700 #ifdef MMAP_GENERATE_ADDRESSES
1701 /* Implementation of the three calls for address picking when XEmacs is incharge */
1703 /* The enum denotes the status of the following block. */
1704 typedef enum { empty = 0, occupied, unavailable } addr_status;
1706 typedef struct addr_chain
1711 struct addr_chain *next;
1712 } ADDRESS_BLOCK, *ADDRESS_CHAIN;
1713 /* NB: empty and unavailable blocks are concatenated. */
1715 static ADDRESS_CHAIN addr_chain = 0;
1716 /* Start off the address block chain with a humongous address block
1717 which is empty to start with. Note that addr_chain is invariant
1718 WRT the addition/deletion of address blocks because of the assert
1719 in Coalesce() and the strict ordering of blocks by their address
1721 static void Addr_Block_initialize()
1723 MEMMETER( MVAL( M_Addrlist_Size )++)
1724 addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
1725 addr_chain->next = 0; /* Last block in chain */
1726 addr_chain->sz = 0x0c000000; /* Size */
1727 addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS);
1728 addr_chain->flag = empty;
1731 /* Coalesce address blocks if they are contiguous. Only empty and
1732 unavailable slots are coalesced. */
1733 static void Coalesce_Addr_Blocks()
1736 for (p = addr_chain; p; p = p->next)
1738 while (p->next && p->flag == p->next->flag)
1743 if (p->flag == occupied) break; /* No cigar */
1745 /* Check if the addresses are contiguous. */
1746 if (p->addr + p->sz != np->addr) break;
1748 MEMMETER( MVAL( M_Addrlist_Size )--)
1749 /* We can coalesce these two. */
1752 assert( np != addr_chain ); /* We're not freeing the head of the list. */
1753 UNDERLYING_FREE( np );
1758 /* Get an empty address block of specified size. */
1759 static VM_ADDR New_Addr_Block( SIZE sz )
1761 ADDRESS_CHAIN p = addr_chain;
1762 VM_ADDR new_addr = VM_FAILURE_ADDR;
1763 for (; p; p = p->next)
1765 if (p->flag == empty && p->sz > sz)
1767 /* Create a new entry following p which is empty. */
1768 ADDRESS_CHAIN remainder = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ) );
1769 remainder->next = p->next;
1770 remainder->flag = empty;
1771 remainder->addr = p->addr + sz;
1772 remainder->sz = p->sz - sz;
1774 MEMMETER( MVAL( M_Addrlist_Size )++)
1776 /* Now make p become an occupied block with the appropriate size */
1777 p->next = remainder;
1779 new_addr = mmap( (VM_ADDR) p->addr, p->sz, PROT_READ|PROT_WRITE,
1780 MAP_FLAGS, DEV_ZERO_FD, 0 );
1781 if (new_addr == VM_FAILURE_ADDR)
1783 p->flag = unavailable;
1790 Coalesce_Addr_Blocks();
1794 /* Free an address block. We mark the block as being empty, and attempt to
1795 do any coalescing that may have resulted from this. */
1796 static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
1798 ADDRESS_CHAIN p = addr_chain;
1799 for (; p; p = p->next )
1801 if (p->addr == addr)
1803 if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */
1804 munmap( (VM_ADDR) p->addr, p->sz );
1809 if (!p) abort(); /* Can't happen... we've got a block to free which is not in
1810 the address list. */
1811 Coalesce_Addr_Blocks();
1813 #else /* !MMAP_GENERATE_ADDRESSES */
1814 /* This is an alternate (simpler) implementation in cases where the
1815 address is picked by the kernel. */
1817 static void Addr_Block_initialize(void)
1822 static VM_ADDR New_Addr_Block( SIZE sz )
1824 return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
1828 static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
1830 munmap ((caddr_t) addr, sz );
1833 #endif /* MMAP_GENERATE_ADDRESSES */
1836 /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
1839 r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start
1840 address aliased to the first parameter.
1843 POINTER r_alloc (POINTER *ptr, SIZE size);
1845 r_alloc (POINTER *ptr, SIZE size)
1849 switch(r_alloc_initialized)
1854 *ptr = (POINTER) UNDERLYING_MALLOC(size);
1857 mh = new_mmap_handle( size );
1860 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1861 SIZE mmapped_size = ROUNDUP( size + hysteresis );
1862 MEMMETER( MVAL(M_Map)++ )
1863 MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
1864 MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
1865 MEMMETER( MVAL(M_Live_Pages) += (mmapped_size/page_size) )
1866 mh->vm_addr = New_Addr_Block( mmapped_size );
1867 if (mh->vm_addr == VM_FAILURE_ADDR) {
1868 free_mmap_handle( mh ); /* Free the loser */
1870 return 0; /* ralloc failed due to mmap() failure. */
1872 MHASH_ADD( mh->vm_addr, mh );
1873 mh->space_for = mmapped_size;
1874 mh->aliased_address = ptr;
1875 *ptr = (POINTER) mh->vm_addr;
1878 *ptr = 0; /* Malloc of block failed */
1884 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
1885 Store 0 in *PTR to show there's no block allocated. */
1887 void r_alloc_free (POINTER *ptr);
1889 r_alloc_free (POINTER *ptr)
1891 switch( r_alloc_initialized) {
1896 UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */
1901 MMAP_HANDLE dead_handle = find_mmap_handle( ptr );
1902 /* Check if we've got it. */
1903 if (dead_handle == 0) /* Didn't find it in the list of mmap handles */
1905 UNDERLYING_FREE( *ptr );
1909 MEMMETER( MVAL( M_Wastage ) -= (dead_handle->space_for - dead_handle->size) )
1910 MEMMETER( MVAL( M_Live_Pages ) -= (dead_handle->space_for / page_size ))
1911 MEMMETER(MVAL(M_Unmap)++)
1912 MHASH_DEL( dead_handle->vm_addr );
1913 Free_Addr_Block( dead_handle->vm_addr, dead_handle->space_for );
1914 free_mmap_handle (dead_handle);
1918 } /* r_alloc_initialized */
1919 *ptr = 0; /* Zap the pointer's contents. */
1922 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
1924 Change *PTR to reflect the new bloc, and return this value.
1926 If more memory cannot be allocated, then leave *PTR unchanged, and
1929 POINTER r_re_alloc (POINTER *ptr, SIZE sz);
1931 r_re_alloc (POINTER *ptr, SIZE sz)
1933 if (r_alloc_initialized == 0)
1936 return 0; /* suppress compiler warning */
1938 else if (r_alloc_initialized == 1)
1940 POINTER tmp = (POINTER) realloc(*ptr, sz);
1947 SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
1948 SIZE actual_sz = ROUNDUP( sz + hysteresis );
1949 MMAP_HANDLE h = find_mmap_handle( ptr );
1950 VM_ADDR new_vm_addr;
1952 if ( h == 0 ) /* Was allocated using malloc. */
1954 POINTER tmp = (POINTER) UNDERLYING_REALLOC(*ptr, sz);
1961 MVAL(M_Average_Bumpval) =
1962 (((double) MVAL(M_Remap) * MVAL(M_Average_Bumpval)) + (sz - h->size))
1963 / (double) (MVAL(M_Remap) + 1))
1964 MEMMETER(MVAL(M_Remap)++)
1965 if (h->space_for > sz) /* We've got some more room */
1966 { /* Also, if a shrinkage was asked for. */
1967 MEMMETER( MVAL(M_Didnt_Copy)++ )
1968 MEMMETER( MVAL(M_Wastage) -= (sz - h->size))
1969 /* We're pretty dumb at handling shrinkage. We should check for
1970 a larger gap than the standard hysteresis allowable, and if so,
1971 shrink the number of pages. Right now, we simply reset the size
1972 component and return. */
1977 new_vm_addr = New_Addr_Block( actual_sz );
1978 if (new_vm_addr == VM_FAILURE_ADDR)
1979 {/* Failed to realloc. */
1984 MHASH_ADD( new_vm_addr, h );
1985 /* We got a block OK: now we should move the old contents to the
1986 new address. We use the old size of this block. */
1987 memmove(new_vm_addr, h->vm_addr, h->size);
1988 MHASH_DEL( h->vm_addr );
1989 Free_Addr_Block( h->vm_addr, h->space_for ); /* Unmap old area. */
1991 MEMMETER( MVAL( M_Copy_Pages ) += (h->space_for/page_size) )
1992 MEMMETER( MVAL( M_Live_Pages ) -= (h->space_for / page_size))
1993 MEMMETER( MVAL( M_Live_Pages ) += (actual_sz / page_size))
1994 MEMMETER( MVAL( M_Wastage ) -= (h->space_for - h->size))
1995 MEMMETER( MVAL( M_Wastage ) += (actual_sz - sz) )
1997 /* Update block datastructure. */
1998 h->space_for = actual_sz; /* New total space */
1999 h->size = sz; /* New (requested) size */
2000 h->vm_addr = new_vm_addr; /* New VM start address */
2001 h->aliased_address = ptr; /* Change alias to reflect block relocation. */
2002 *ptr = (POINTER) h->vm_addr;
2008 /* Initialize various things for memory allocation.
2014 if (r_alloc_initialized > 1)
2015 return; /* used to return 1 */
2017 if (++r_alloc_initialized == 1)
2018 return; /* used to return 1 */
2020 Addr_Block_initialize(); /* Initialize the address picker, if required. */
2022 assert( page_size > 0 ); /* getpagesize() bogosity check. */
2024 #ifndef MAP_ANONYMOUS
2025 DEV_ZERO_FD = open( "/dev/zero", O_RDWR );
2026 if (DEV_ZERO_FD < 0)
2027 /* Failed. Perhaps we should abort here? */
2028 return; /* used to return 0 */
2031 #ifdef MMAP_METERING
2032 for(i = 0; i < N_Meterables; i++ )
2036 #endif /* MMAP_METERING */
2040 syms_of_ralloc (void)
2042 #ifdef MMAP_METERING
2043 defsymbol (&Qmmap_times_mapped, "mmap-times-mapped");
2044 defsymbol (&Qmmap_pages_mapped, "mmap-pages-mapped");
2045 defsymbol (&Qmmap_times_unmapped, "mmap-times-unmapped");
2046 defsymbol (&Qmmap_times_remapped, "mmap-times-remapped");
2047 defsymbol (&Qmmap_didnt_copy, "mmap-didnt-copy");
2048 defsymbol (&Qmmap_pages_copied, "mmap-pages-copied");
2049 defsymbol (&Qmmap_average_bumpval, "mmap-average-bumpval");
2050 defsymbol (&Qmmap_wastage, "mmap-wastage");
2051 defsymbol (&Qmmap_live_pages, "mmap-live-pages");
2052 defsymbol (&Qmmap_addr_looked_up, "mmap-addr-looked-up");
2053 defsymbol (&Qmmap_hash_worked, "mmap-hash-worked");
2054 defsymbol (&Qmmap_addrlist_size, "mmap-addrlist-size");
2055 DEFSUBR (Fmmap_allocator_status);
2056 #endif /* MMAP_METERING */
2060 vars_of_ralloc (void)
2062 DEFVAR_INT ("mmap-hysteresis", &mmap_hysteresis /*
2063 Extra room left at the end of an allocated arena,
2064 so that a re-alloc requesting extra space smaller than this
2065 does not actually cause a new arena to be allocated.
2067 A negative value is considered equal to zero. This is the
2068 minimum amount of space guaranteed to be left at the end of
2069 the arena. Because allocation happens in multiples of the OS
2070 page size, it is possible for more space to be left unused.
2072 mmap_hysteresis = 0;
2075 #endif /* HAVE_MMAP */