X-Git-Url: http://git.chise.org/gitweb/?a=blobdiff_plain;f=src%2Fralloc.c;h=17b297689b01801922497fe373343cbae4aa956d;hb=e66c951a306d25a51e5339df42cd7ee04deb47d8;hp=9cc4416fb6aa66c4cc0034ef3a16a58d130ee39e;hpb=ea1ea793fe6e244ef5555ed983423a204101af13;p=chise%2Fxemacs-chise.git.1 diff --git a/src/ralloc.c b/src/ralloc.c index 9cc4416..17b2976 100644 --- a/src/ralloc.c +++ b/src/ralloc.c @@ -53,8 +53,6 @@ typedef unsigned char *POINTER; /* Unconditionally use unsigned char * for this. */ typedef unsigned char *POINTER; -typedef unsigned long SIZE; - #ifdef DOUG_LEA_MALLOC #define M_TOP_PAD -2 #include @@ -69,7 +67,6 @@ void refill_memory_reserve (void); #include -typedef size_t SIZE; typedef void *POINTER; #include @@ -79,7 +76,6 @@ typedef void *POINTER; #endif /* emacs. */ void init_ralloc (void); -#define safe_bcopy(x, y, z) memmove (y, x, z) #define NIL ((POINTER) 0) @@ -107,7 +103,7 @@ static POINTER virtual_break_value; static POINTER break_value; /* This is the size of a page. We round memory requests to this boundary. */ -static int page_size; +static size_t page_size; /* Whenever we get memory from the system, get this many extra bytes. This must be a multiple of page_size. */ @@ -185,7 +181,7 @@ typedef struct bp struct bp *prev; POINTER *variable; POINTER data; - SIZE size; + size_t size; POINTER new_data; /* temporarily used for relocation */ struct heap *heap; /* Heap this bloc is in. */ } *bloc_ptr; @@ -245,10 +241,10 @@ find_heap (POINTER address) allocate the memory. */ static POINTER -obtain (POINTER address, SIZE size) +obtain (POINTER address, size_t size) { heap_ptr heap; - SIZE already_available; + size_t already_available; /* Find the heap that ADDRESS falls within. */ for (heap = last_heap; heap; heap = heap->prev) @@ -258,7 +254,7 @@ obtain (POINTER address, SIZE size) } if (! heap) - abort (); + ABORT (); /* If we can't fit SIZE bytes in that heap, try successive later heaps. */ @@ -275,7 +271,7 @@ obtain (POINTER address, SIZE size) if (heap == NIL_HEAP) { POINTER new = (*real_morecore)(0); - SIZE get; + size_t get; already_available = (char *)last_heap->end - (char *)address; @@ -325,7 +321,7 @@ obtain (POINTER address, SIZE size) If we could not allocate the space, return zero. */ static POINTER -get_more_space (SIZE size) +get_more_space (size_t size) { POINTER ptr = break_value; if (obtain (size)) @@ -364,7 +360,7 @@ relinquish (void) /* This heap should have no blocs in it. */ if (last_heap->first_bloc != NIL_BLOC || last_heap->last_bloc != NIL_BLOC) - abort (); + ABORT (); /* Return the last heap, with its header, to the system. */ excess = (char *)last_heap->end - (char *)last_heap->start; @@ -379,7 +375,7 @@ relinquish (void) } if ((*real_morecore) (- excess) == 0) - abort (); + ABORT (); } } @@ -388,7 +384,7 @@ relinquish (void) long r_alloc_size_in_use (void); long -r_alloc_size_in_use () +r_alloc_size_in_use (void) { return break_value - virtual_break_value; } @@ -420,7 +416,7 @@ find_bloc (POINTER *ptr) memory for the new block. */ static bloc_ptr -get_bloc (SIZE size) +get_bloc (size_t size) { register bloc_ptr new_bloc; register heap_ptr heap; @@ -482,7 +478,7 @@ relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) /* No need to ever call this if arena is frozen, bug somewhere! */ if (r_alloc_freeze_level) - abort(); + ABORT(); while (b) { @@ -501,7 +497,7 @@ relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) if (heap == NIL_HEAP) { register bloc_ptr tb = b; - register SIZE s = 0; + register size_t s = 0; /* Add up the size of all the following blocs. */ while (tb != NIL_BLOC) @@ -628,16 +624,16 @@ update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) that come after BLOC in memory. */ static int -resize_bloc (bloc_ptr bloc, SIZE size) +resize_bloc (bloc_ptr bloc, size_t size) { register bloc_ptr b; heap_ptr heap; POINTER address; - SIZE old_size; + size_t old_size; /* No need to ever call this if arena is frozen, bug somewhere! */ if (r_alloc_freeze_level) - abort(); + ABORT(); if (bloc == NIL_BLOC || size == bloc->size) return 1; @@ -649,7 +645,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } if (heap == NIL_HEAP) - abort (); + ABORT (); old_size = bloc->size; bloc->size = size; @@ -681,7 +677,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -692,7 +688,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (bloc->data, bloc->new_data, old_size); + memmove (bloc->new_data, bloc->data, old_size); memset (bloc->new_data + old_size, 0, size - old_size); *bloc->variable = bloc->data = bloc->new_data; } @@ -708,7 +704,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -813,7 +809,7 @@ r_alloc_sbrk (ptrdiff_t size) not always find a space which is contiguous to the previous. */ POINTER new_bloc_start; heap_ptr h = first_heap; - SIZE get = ROUNDUP (size); + size_t get = ROUNDUP (size); address = (POINTER) ROUNDUP (virtual_break_value); @@ -862,7 +858,7 @@ r_alloc_sbrk (ptrdiff_t size) header. */ for (b = last_bloc; b != NIL_BLOC; b = b->prev) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } @@ -893,7 +889,7 @@ r_alloc_sbrk (ptrdiff_t size) } else /* size < 0 */ { - SIZE excess = (char *)first_heap->bloc_start + EMACS_INT excess = (char *)first_heap->bloc_start - ((char *)virtual_break_value + size); address = virtual_break_value; @@ -908,7 +904,7 @@ r_alloc_sbrk (ptrdiff_t size) for (b = first_bloc; b != NIL_BLOC; b = b->next) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -941,9 +937,9 @@ r_alloc_sbrk (ptrdiff_t size) If we can't allocate the necessary memory, set *PTR to zero, and return zero. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { bloc_ptr new_bloc; @@ -976,7 +972,7 @@ r_alloc_free (POINTER *ptr) dead_bloc = find_bloc (ptr); if (dead_bloc == NIL_BLOC) - abort (); + ABORT (); free_bloc (dead_bloc); *ptr = 0; @@ -1000,9 +996,9 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE size); +POINTER r_re_alloc (POINTER *ptr, size_t size); POINTER -r_re_alloc (POINTER *ptr, SIZE size) +r_re_alloc (POINTER *ptr, size_t size) { register bloc_ptr bloc; @@ -1019,7 +1015,7 @@ r_re_alloc (POINTER *ptr, SIZE size) bloc = find_bloc (ptr); if (bloc == NIL_BLOC) - abort (); + ABORT (); if (size < bloc->size) { @@ -1089,10 +1085,10 @@ r_alloc_thaw (void) init_ralloc (); if (--r_alloc_freeze_level < 0) - abort (); + ABORT (); /* This frees all unused blocs. It is not too inefficient, as the resize - and bcopy is done only once. Afterwards, all unreferenced blocs are + and memmove is done only once. Afterwards, all unreferenced blocs are already shrunk to zero size. */ if (!r_alloc_freeze_level) { @@ -1133,7 +1129,7 @@ init_ralloc (void) first_heap->start = first_heap->bloc_start = virtual_break_value = break_value = (*real_morecore) (0); if (break_value == NIL) - abort (); + ABORT (); page_size = PAGE; extra_bytes = ROUNDUP (50000); @@ -1237,7 +1233,7 @@ r_alloc_check (void) { assert (b->prev == pb); assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); - assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); + assert ((size_t) MEM_ROUNDUP (b->size) == b->size); ph = 0; for (h = first_heap; h; h = h->next) @@ -1323,7 +1319,7 @@ r_alloc_check (void) #include typedef void *VM_ADDR; /* VM addresses */ -static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ +static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ /* Configuration for relocating allocator. */ @@ -1442,7 +1438,7 @@ typedef struct alloc_dll static MMAP_HANDLE mmap_start = 0; /* Head of linked list */ static size_t page_size = 0; /* Size of VM pages */ -static int mmap_hysteresis; /* Should be size_t, really. */ +static Fixnum mmap_hysteresis; /* Logically a "size_t" */ /* Get a new handle for a fresh block. */ static MMAP_HANDLE @@ -1697,10 +1693,10 @@ find_mmap_handle (POINTER *alias) static void Addr_Block_initialize(void); /* Get a suitable VM_ADDR via mmap */ -static VM_ADDR New_Addr_Block( SIZE sz ); +static VM_ADDR New_Addr_Block (size_t sz); /* Free a VM_ADDR allocated via New_Addr_Block */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); +static void Free_Addr_Block (VM_ADDR addr, size_t sz); #ifdef MMAP_GENERATE_ADDRESSES /* Implementation of the three calls for address picking when XEmacs is incharge */ @@ -1711,7 +1707,7 @@ typedef enum { empty = 0, occupied, unavailable } addr_status; typedef struct addr_chain { POINTER addr; - SIZE sz; + size_t sz; addr_status flag; struct addr_chain *next; } ADDRESS_BLOCK, *ADDRESS_CHAIN; @@ -1723,19 +1719,21 @@ static ADDRESS_CHAIN addr_chain = 0; WRT the addition/deletion of address blocks because of the assert in Coalesce() and the strict ordering of blocks by their address */ -static void Addr_Block_initialize() +static void +Addr_Block_initialize (void) { MEMMETER( MVAL( M_Addrlist_Size )++) addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); addr_chain->next = 0; /* Last block in chain */ addr_chain->sz = 0x0c000000; /* Size */ - addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS); + addr_chain->addr = (POINTER) (0x04000000); addr_chain->flag = empty; } /* Coalesce address blocks if they are contiguous. Only empty and unavailable slots are coalesced. */ -static void Coalesce_Addr_Blocks() +static void +Coalesce_Addr_Blocks (void) { ADDRESS_CHAIN p; for (p = addr_chain; p; p = p->next) @@ -1761,7 +1759,8 @@ static void Coalesce_Addr_Blocks() } /* Get an empty address block of specified size. */ -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { ADDRESS_CHAIN p = addr_chain; VM_ADDR new_addr = VM_FAILURE_ADDR; @@ -1798,20 +1797,21 @@ static VM_ADDR New_Addr_Block( SIZE sz ) /* Free an address block. We mark the block as being empty, and attempt to do any coalescing that may have resulted from this. */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { ADDRESS_CHAIN p = addr_chain; for (; p; p = p->next ) { if (p->addr == addr) { - if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */ + if (p->sz != sz) ABORT(); /* ACK! Shouldn't happen at all. */ munmap( (VM_ADDR) p->addr, p->sz ); p->flag = empty; break; } } - if (!p) abort(); /* Can't happen... we've got a block to free which is not in + if (!p) ABORT(); /* Can't happen... we've got a block to free which is not in the address list. */ Coalesce_Addr_Blocks(); } @@ -1819,18 +1819,21 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* This is an alternate (simpler) implementation in cases where the address is picked by the kernel. */ -static void Addr_Block_initialize(void) +static void +Addr_Block_initialize (void) { /* Nothing. */ } -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, DEV_ZERO_FD, 0 ); } -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { munmap ((caddr_t) addr, sz ); } @@ -1841,20 +1844,20 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ /* - r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start + r_alloc (POINTER, SIZE): Allocate a relocatable area with the start address aliased to the first parameter. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { MMAP_HANDLE mh; switch(r_alloc_initialized) { case 0: - abort(); + ABORT(); case 1: *ptr = (POINTER) UNDERLYING_MALLOC(size); break; @@ -1862,8 +1865,8 @@ r_alloc (POINTER *ptr, SIZE size) mh = new_mmap_handle( size ); if (mh) { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE mmapped_size = ROUNDUP( size + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t mmapped_size = ROUNDUP( size + hysteresis ); MEMMETER( MVAL(M_Map)++ ) MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) @@ -1895,7 +1898,7 @@ r_alloc_free (POINTER *ptr) { switch( r_alloc_initialized) { case 0: - abort(); + ABORT(); case 1: UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */ @@ -1931,13 +1934,13 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE sz); +POINTER r_re_alloc (POINTER *ptr, size_t sz); POINTER -r_re_alloc (POINTER *ptr, SIZE sz) +r_re_alloc (POINTER *ptr, size_t sz) { if (r_alloc_initialized == 0) { - abort (); + ABORT (); return 0; /* suppress compiler warning */ } else if (r_alloc_initialized == 1) @@ -1949,8 +1952,8 @@ r_re_alloc (POINTER *ptr, SIZE sz) } else { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE actual_sz = ROUNDUP( sz + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t actual_sz = ROUNDUP( sz + hysteresis ); MMAP_HANDLE h = find_mmap_handle( ptr ); VM_ADDR new_vm_addr; @@ -2019,6 +2022,10 @@ init_ralloc (void) if (r_alloc_initialized > 1) return; /* used to return 1 */ +#ifdef PDUMP + /* Under pdump, we need to activate ralloc on the first go. */ + ++r_alloc_initialized; +#endif if (++r_alloc_initialized == 1) return; /* used to return 1 */