X-Git-Url: http://git.chise.org/gitweb/?a=blobdiff_plain;f=src%2Fralloc.c;h=17b297689b01801922497fe373343cbae4aa956d;hb=1a1aa5f685a384c2798f6e1dcb75132059acbd49;hp=4c97bfca2e0d8e0bedbd0606c9f6f4a7309e339a;hpb=669565bfdc5d704dfb1d5ac1a0ec01fb3615a1ae;p=chise%2Fxemacs-chise.git.1 diff --git a/src/ralloc.c b/src/ralloc.c index 4c97bfc..17b2976 100644 --- a/src/ralloc.c +++ b/src/ralloc.c @@ -53,8 +53,6 @@ typedef unsigned char *POINTER; /* Unconditionally use unsigned char * for this. */ typedef unsigned char *POINTER; -typedef unsigned long SIZE; - #ifdef DOUG_LEA_MALLOC #define M_TOP_PAD -2 #include @@ -69,7 +67,6 @@ void refill_memory_reserve (void); #include -typedef size_t SIZE; typedef void *POINTER; #include @@ -79,7 +76,6 @@ typedef void *POINTER; #endif /* emacs. */ void init_ralloc (void); -#define safe_bcopy(x, y, z) memmove (y, x, z) #define NIL ((POINTER) 0) @@ -98,7 +94,7 @@ static int r_alloc_initialized = 0; /* Declarations for working with the malloc, ralloc, and system breaks. */ /* Function to set the real break value. */ -static POINTER (*real_morecore) (long size); +static POINTER (*real_morecore) (ptrdiff_t size); /* The break value, as seen by malloc (). */ static POINTER virtual_break_value; @@ -107,7 +103,7 @@ static POINTER virtual_break_value; static POINTER break_value; /* This is the size of a page. We round memory requests to this boundary. */ -static int page_size; +static size_t page_size; /* Whenever we get memory from the system, get this many extra bytes. This must be a multiple of page_size. */ @@ -185,7 +181,7 @@ typedef struct bp struct bp *prev; POINTER *variable; POINTER data; - SIZE size; + size_t size; POINTER new_data; /* temporarily used for relocation */ struct heap *heap; /* Heap this bloc is in. */ } *bloc_ptr; @@ -245,10 +241,10 @@ find_heap (POINTER address) allocate the memory. */ static POINTER -obtain (POINTER address, SIZE size) +obtain (POINTER address, size_t size) { heap_ptr heap; - SIZE already_available; + size_t already_available; /* Find the heap that ADDRESS falls within. */ for (heap = last_heap; heap; heap = heap->prev) @@ -258,7 +254,7 @@ obtain (POINTER address, SIZE size) } if (! heap) - abort (); + ABORT (); /* If we can't fit SIZE bytes in that heap, try successive later heaps. */ @@ -275,7 +271,7 @@ obtain (POINTER address, SIZE size) if (heap == NIL_HEAP) { POINTER new = (*real_morecore)(0); - SIZE get; + size_t get; already_available = (char *)last_heap->end - (char *)address; @@ -325,7 +321,7 @@ obtain (POINTER address, SIZE size) If we could not allocate the space, return zero. */ static POINTER -get_more_space (SIZE size) +get_more_space (size_t size) { POINTER ptr = break_value; if (obtain (size)) @@ -339,7 +335,7 @@ get_more_space (SIZE size) If SIZE is more than a page, return the space to the system. */ static void -relinquish () +relinquish (void) { register heap_ptr h; int excess = 0; @@ -364,7 +360,7 @@ relinquish () /* This heap should have no blocs in it. */ if (last_heap->first_bloc != NIL_BLOC || last_heap->last_bloc != NIL_BLOC) - abort (); + ABORT (); /* Return the last heap, with its header, to the system. */ excess = (char *)last_heap->end - (char *)last_heap->start; @@ -379,7 +375,7 @@ relinquish () } if ((*real_morecore) (- excess) == 0) - abort (); + ABORT (); } } @@ -388,7 +384,7 @@ relinquish () long r_alloc_size_in_use (void); long -r_alloc_size_in_use () +r_alloc_size_in_use (void) { return break_value - virtual_break_value; } @@ -420,7 +416,7 @@ find_bloc (POINTER *ptr) memory for the new block. */ static bloc_ptr -get_bloc (SIZE size) +get_bloc (size_t size) { register bloc_ptr new_bloc; register heap_ptr heap; @@ -482,7 +478,7 @@ relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) /* No need to ever call this if arena is frozen, bug somewhere! */ if (r_alloc_freeze_level) - abort(); + ABORT(); while (b) { @@ -501,7 +497,7 @@ relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) if (heap == NIL_HEAP) { register bloc_ptr tb = b; - register SIZE s = 0; + register size_t s = 0; /* Add up the size of all the following blocs. */ while (tb != NIL_BLOC) @@ -628,16 +624,16 @@ update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) that come after BLOC in memory. */ static int -resize_bloc (bloc_ptr bloc, SIZE size) +resize_bloc (bloc_ptr bloc, size_t size) { register bloc_ptr b; heap_ptr heap; POINTER address; - SIZE old_size; + size_t old_size; /* No need to ever call this if arena is frozen, bug somewhere! */ if (r_alloc_freeze_level) - abort(); + ABORT(); if (bloc == NIL_BLOC || size == bloc->size) return 1; @@ -649,7 +645,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } if (heap == NIL_HEAP) - abort (); + ABORT (); old_size = bloc->size; bloc->size = size; @@ -681,7 +677,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -692,7 +688,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (bloc->data, bloc->new_data, old_size); + memmove (bloc->new_data, bloc->data, old_size); memset (bloc->new_data + old_size, 0, size - old_size); *bloc->variable = bloc->data = bloc->new_data; } @@ -708,7 +704,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -790,9 +786,9 @@ free_bloc (bloc_ptr bloc) __morecore hook values - in particular, __default_morecore in the GNU malloc package. */ -POINTER r_alloc_sbrk (long size); +POINTER r_alloc_sbrk (ptrdiff_t size); POINTER -r_alloc_sbrk (long size) +r_alloc_sbrk (ptrdiff_t size) { register bloc_ptr b; POINTER address; @@ -813,7 +809,7 @@ r_alloc_sbrk (long size) not always find a space which is contiguous to the previous. */ POINTER new_bloc_start; heap_ptr h = first_heap; - SIZE get = ROUNDUP (size); + size_t get = ROUNDUP (size); address = (POINTER) ROUNDUP (virtual_break_value); @@ -862,7 +858,7 @@ r_alloc_sbrk (long size) header. */ for (b = last_bloc; b != NIL_BLOC; b = b->prev) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } @@ -893,7 +889,7 @@ r_alloc_sbrk (long size) } else /* size < 0 */ { - SIZE excess = (char *)first_heap->bloc_start + EMACS_INT excess = (char *)first_heap->bloc_start - ((char *)virtual_break_value + size); address = virtual_break_value; @@ -908,7 +904,7 @@ r_alloc_sbrk (long size) for (b = first_bloc; b != NIL_BLOC; b = b->next) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -941,9 +937,9 @@ r_alloc_sbrk (long size) If we can't allocate the necessary memory, set *PTR to zero, and return zero. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { bloc_ptr new_bloc; @@ -976,7 +972,7 @@ r_alloc_free (POINTER *ptr) dead_bloc = find_bloc (ptr); if (dead_bloc == NIL_BLOC) - abort (); + ABORT (); free_bloc (dead_bloc); *ptr = 0; @@ -1000,9 +996,9 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE size); +POINTER r_re_alloc (POINTER *ptr, size_t size); POINTER -r_re_alloc (POINTER *ptr, SIZE size) +r_re_alloc (POINTER *ptr, size_t size) { register bloc_ptr bloc; @@ -1019,7 +1015,7 @@ r_re_alloc (POINTER *ptr, SIZE size) bloc = find_bloc (ptr); if (bloc == NIL_BLOC) - abort (); + ABORT (); if (size < bloc->size) { @@ -1082,17 +1078,17 @@ r_alloc_freeze (long size) void r_alloc_thaw (void); void -r_alloc_thaw () +r_alloc_thaw (void) { if (! r_alloc_initialized) init_ralloc (); if (--r_alloc_freeze_level < 0) - abort (); + ABORT (); /* This frees all unused blocs. It is not too inefficient, as the resize - and bcopy is done only once. Afterwards, all unreferenced blocs are + and memmove is done only once. Afterwards, all unreferenced blocs are already shrunk to zero size. */ if (!r_alloc_freeze_level) { @@ -1109,14 +1105,11 @@ r_alloc_thaw () /* The hook `malloc' uses for the function which gets more space from the system. */ #ifndef DOUG_LEA_MALLOC -extern POINTER (*__morecore) (long size); +extern POINTER (*__morecore) (ptrdiff_t size); #endif /* Initialize various things for memory allocation. */ -#define SET_FUN_PTR(fun_ptr, fun_val) \ - (*((void **) (&fun_ptr)) = ((void *) (fun_val))) - void init_ralloc (void) { @@ -1124,15 +1117,19 @@ init_ralloc (void) return; r_alloc_initialized = 1; - SET_FUN_PTR (real_morecore, __morecore); - SET_FUN_PTR (__morecore, r_alloc_sbrk); + real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; + __morecore = +#ifdef __GNUC__ + (__typeof__ (__morecore)) +#endif + r_alloc_sbrk; first_heap = last_heap = &heap_base; first_heap->next = first_heap->prev = NIL_HEAP; first_heap->start = first_heap->bloc_start = virtual_break_value = break_value = (*real_morecore) (0); if (break_value == NIL) - abort (); + ABORT (); page_size = PAGE; extra_bytes = ROUNDUP (50000); @@ -1172,21 +1169,25 @@ init_ralloc (void) Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ void r_alloc_reinit (void); void -r_alloc_reinit () +r_alloc_reinit (void) { /* Only do this if the hook has been reset, so that we don't get an infinite loop, in case Emacs was linked statically. */ - if ( ((void*) __morecore) != (void *) (r_alloc_sbrk)) + if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk) { - SET_FUN_PTR (real_morecore, __morecore); - SET_FUN_PTR (__morecore, r_alloc_sbrk); + real_morecore = (POINTER (*) (ptrdiff_t)) __morecore; + __morecore = +#ifdef __GNUC__ + (__typeof__ (__morecore)) +#endif + r_alloc_sbrk; } } #if 0 #ifdef DEBUG void -r_alloc_check () +r_alloc_check (void) { int found = 0; heap_ptr h, ph = 0; @@ -1232,7 +1233,7 @@ r_alloc_check () { assert (b->prev == pb); assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); - assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); + assert ((size_t) MEM_ROUNDUP (b->size) == b->size); ph = 0; for (h = first_heap; h; h = h->next) @@ -1318,7 +1319,7 @@ r_alloc_check () #include typedef void *VM_ADDR; /* VM addresses */ -static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ +static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ /* Configuration for relocating allocator. */ @@ -1409,12 +1410,12 @@ static int r_alloc_initialized = 0; static int DEV_ZERO_FD = -1; -/* We actually need a datastructure that can be usefully structured +/* We actually need a data structure that can be usefully structured based on the VM address, and allows an ~O(1) lookup on an arbitrary - address, ie a hash-table. Maybe the XEmacs hash table can be - coaxed enough. At the moment, we use lookup on a hash-table to + address, i.e. a hash table. Maybe the XEmacs hash table can be + coaxed enough. At the moment, we use lookup on a hash table to decide whether to do an O(n) search on the malloced block list. - Addresses are hashed to a bucket modulo MHASH_PRIME */ + Addresses are hashed to a bucket modulo MHASH_PRIME. */ /* We settle for a standard doubly-linked-list. The dynarr type isn't @@ -1437,7 +1438,7 @@ typedef struct alloc_dll static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ static size_t page_size = 0; /* Size of VM pages */ -static int mmap_hysteresis; /* Should be size_t, really. */ +static Fixnum mmap_hysteresis; /* Logically a "size_t" */ /* Get a new handle for a fresh block. */ static MMAP_HANDLE @@ -1692,10 +1693,10 @@ find_mmap_handle (POINTER *alias) static void Addr_Block_initialize(void); /* Get a suitable VM_ADDR via mmap */ -static VM_ADDR New_Addr_Block( SIZE sz ); +static VM_ADDR New_Addr_Block (size_t sz); /* Free a VM_ADDR allocated via New_Addr_Block */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); +static void Free_Addr_Block (VM_ADDR addr, size_t sz); #ifdef MMAP_GENERATE_ADDRESSES /* Implementation of the three calls for address picking when XEmacs is incharge */ @@ -1706,7 +1707,7 @@ typedef enum { empty = 0, occupied, unavailable } addr_status; typedef struct addr_chain { POINTER addr; - SIZE sz; + size_t sz; addr_status flag; struct addr_chain *next; } ADDRESS_BLOCK, *ADDRESS_CHAIN; @@ -1718,19 +1719,21 @@ static ADDRESS_CHAIN addr_chain = 0; WRT the addition/deletion of address blocks because of the assert in Coalesce() and the strict ordering of blocks by their address */ -static void Addr_Block_initialize() +static void +Addr_Block_initialize (void) { MEMMETER( MVAL( M_Addrlist_Size )++) addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); addr_chain->next = 0; /* Last block in chain */ addr_chain->sz = 0x0c000000; /* Size */ - addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS); + addr_chain->addr = (POINTER) (0x04000000); addr_chain->flag = empty; } /* Coalesce address blocks if they are contiguous. Only empty and unavailable slots are coalesced. */ -static void Coalesce_Addr_Blocks() +static void +Coalesce_Addr_Blocks (void) { ADDRESS_CHAIN p; for (p = addr_chain; p; p = p->next) @@ -1756,7 +1759,8 @@ static void Coalesce_Addr_Blocks() } /* Get an empty address block of specified size. */ -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { ADDRESS_CHAIN p = addr_chain; VM_ADDR new_addr = VM_FAILURE_ADDR; @@ -1793,20 +1797,21 @@ static VM_ADDR New_Addr_Block( SIZE sz ) /* Free an address block. We mark the block as being empty, and attempt to do any coalescing that may have resulted from this. */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { ADDRESS_CHAIN p = addr_chain; for (; p; p = p->next ) { if (p->addr == addr) { - if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */ + if (p->sz != sz) ABORT(); /* ACK! Shouldn't happen at all. */ munmap( (VM_ADDR) p->addr, p->sz ); p->flag = empty; break; } } - if (!p) abort(); /* Can't happen... we've got a block to free which is not in + if (!p) ABORT(); /* Can't happen... we've got a block to free which is not in the address list. */ Coalesce_Addr_Blocks(); } @@ -1814,18 +1819,21 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* This is an alternate (simpler) implementation in cases where the address is picked by the kernel. */ -static void Addr_Block_initialize(void) +static void +Addr_Block_initialize (void) { /* Nothing. */ } -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, DEV_ZERO_FD, 0 ); } -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { munmap ((caddr_t) addr, sz ); } @@ -1836,20 +1844,20 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ /* - r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start + r_alloc (POINTER, SIZE): Allocate a relocatable area with the start address aliased to the first parameter. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { MMAP_HANDLE mh; switch(r_alloc_initialized) { case 0: - abort(); + ABORT(); case 1: *ptr = (POINTER) UNDERLYING_MALLOC(size); break; @@ -1857,8 +1865,8 @@ r_alloc (POINTER *ptr, SIZE size) mh = new_mmap_handle( size ); if (mh) { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE mmapped_size = ROUNDUP( size + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t mmapped_size = ROUNDUP( size + hysteresis ); MEMMETER( MVAL(M_Map)++ ) MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) @@ -1890,7 +1898,7 @@ r_alloc_free (POINTER *ptr) { switch( r_alloc_initialized) { case 0: - abort(); + ABORT(); case 1: UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ @@ -1926,13 +1934,13 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE sz); +POINTER r_re_alloc (POINTER *ptr, size_t sz); POINTER -r_re_alloc (POINTER *ptr, SIZE sz) +r_re_alloc (POINTER *ptr, size_t sz) { if (r_alloc_initialized == 0) { - abort (); + ABORT (); return 0; /* suppress compiler warning */ } else if (r_alloc_initialized == 1) @@ -1944,8 +1952,8 @@ r_re_alloc (POINTER *ptr, SIZE sz) } else { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE actual_sz = ROUNDUP( sz + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t actual_sz = ROUNDUP( sz + hysteresis ); MMAP_HANDLE h = find_mmap_handle( ptr ); VM_ADDR new_vm_addr; @@ -2014,6 +2022,10 @@ init_ralloc (void) if (r_alloc_initialized > 1) return; /* used to return 1 */ +#ifdef PDUMP + /* Under pdump, we need to activate ralloc on the first go. */ + ++r_alloc_initialized; +#endif if (++r_alloc_initialized == 1) return; /* used to return 1 */