X-Git-Url: http://git.chise.org/gitweb/?a=blobdiff_plain;f=src%2Fralloc.c;h=ea8f8f883ce5be483a9741ec4e597f100dd0fa7b;hb=1f0bb0efca5a3e69e0477db69b4b6f13fdaddc60;hp=9cc4416fb6aa66c4cc0034ef3a16a58d130ee39e;hpb=376658ea71d16dced8acff36c3e385ac3738d868;p=chise%2Fxemacs-chise.git- diff --git a/src/ralloc.c b/src/ralloc.c index 9cc4416..ea8f8f8 100644 --- a/src/ralloc.c +++ b/src/ralloc.c @@ -53,8 +53,6 @@ typedef unsigned char *POINTER; /* Unconditionally use unsigned char * for this. */ typedef unsigned char *POINTER; -typedef unsigned long SIZE; - #ifdef DOUG_LEA_MALLOC #define M_TOP_PAD -2 #include @@ -69,7 +67,6 @@ void refill_memory_reserve (void); #include -typedef size_t SIZE; typedef void *POINTER; #include @@ -79,7 +76,6 @@ typedef void *POINTER; #endif /* emacs. */ void init_ralloc (void); -#define safe_bcopy(x, y, z) memmove (y, x, z) #define NIL ((POINTER) 0) @@ -185,7 +181,7 @@ typedef struct bp struct bp *prev; POINTER *variable; POINTER data; - SIZE size; + size_t size; POINTER new_data; /* temporarily used for relocation */ struct heap *heap; /* Heap this bloc is in. */ } *bloc_ptr; @@ -245,10 +241,10 @@ find_heap (POINTER address) allocate the memory. */ static POINTER -obtain (POINTER address, SIZE size) +obtain (POINTER address, size_t size) { heap_ptr heap; - SIZE already_available; + size_t already_available; /* Find the heap that ADDRESS falls within. */ for (heap = last_heap; heap; heap = heap->prev) @@ -275,7 +271,7 @@ obtain (POINTER address, SIZE size) if (heap == NIL_HEAP) { POINTER new = (*real_morecore)(0); - SIZE get; + size_t get; already_available = (char *)last_heap->end - (char *)address; @@ -325,7 +321,7 @@ obtain (POINTER address, SIZE size) If we could not allocate the space, return zero. */ static POINTER -get_more_space (SIZE size) +get_more_space (size_t size) { POINTER ptr = break_value; if (obtain (size)) @@ -388,7 +384,7 @@ relinquish (void) long r_alloc_size_in_use (void); long -r_alloc_size_in_use () +r_alloc_size_in_use (void) { return break_value - virtual_break_value; } @@ -420,7 +416,7 @@ find_bloc (POINTER *ptr) memory for the new block. */ static bloc_ptr -get_bloc (SIZE size) +get_bloc (size_t size) { register bloc_ptr new_bloc; register heap_ptr heap; @@ -501,7 +497,7 @@ relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address) if (heap == NIL_HEAP) { register bloc_ptr tb = b; - register SIZE s = 0; + register size_t s = 0; /* Add up the size of all the following blocs. */ while (tb != NIL_BLOC) @@ -628,12 +624,12 @@ update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap) that come after BLOC in memory. */ static int -resize_bloc (bloc_ptr bloc, SIZE size) +resize_bloc (bloc_ptr bloc, size_t size) { register bloc_ptr b; heap_ptr heap; POINTER address; - SIZE old_size; + size_t old_size; /* No need to ever call this if arena is frozen, bug somewhere! */ if (r_alloc_freeze_level) @@ -681,7 +677,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -692,7 +688,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (bloc->data, bloc->new_data, old_size); + memmove (bloc->new_data, bloc->data, old_size); memset (bloc->new_data + old_size, 0, size - old_size); *bloc->variable = bloc->data = bloc->new_data; } @@ -708,7 +704,7 @@ resize_bloc (bloc_ptr bloc, SIZE size) } else { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -813,7 +809,7 @@ r_alloc_sbrk (ptrdiff_t size) not always find a space which is contiguous to the previous. */ POINTER new_bloc_start; heap_ptr h = first_heap; - SIZE get = ROUNDUP (size); + size_t get = ROUNDUP (size); address = (POINTER) ROUNDUP (virtual_break_value); @@ -862,7 +858,7 @@ r_alloc_sbrk (ptrdiff_t size) header. */ for (b = last_bloc; b != NIL_BLOC; b = b->prev) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } @@ -893,7 +889,7 @@ r_alloc_sbrk (ptrdiff_t size) } else /* size < 0 */ { - SIZE excess = (char *)first_heap->bloc_start + size_t excess = (char *)first_heap->bloc_start - ((char *)virtual_break_value + size); address = virtual_break_value; @@ -908,7 +904,7 @@ r_alloc_sbrk (ptrdiff_t size) for (b = first_bloc; b != NIL_BLOC; b = b->next) { - safe_bcopy (b->data, b->new_data, b->size); + memmove (b->new_data, b->data, b->size); *b->variable = b->data = b->new_data; } } @@ -941,9 +937,9 @@ r_alloc_sbrk (ptrdiff_t size) If we can't allocate the necessary memory, set *PTR to zero, and return zero. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { bloc_ptr new_bloc; @@ -1000,9 +996,9 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE size); +POINTER r_re_alloc (POINTER *ptr, size_t size); POINTER -r_re_alloc (POINTER *ptr, SIZE size) +r_re_alloc (POINTER *ptr, size_t size) { register bloc_ptr bloc; @@ -1092,7 +1088,7 @@ r_alloc_thaw (void) abort (); /* This frees all unused blocs. It is not too inefficient, as the resize - and bcopy is done only once. Afterwards, all unreferenced blocs are + and memmove is done only once. Afterwards, all unreferenced blocs are already shrunk to zero size. */ if (!r_alloc_freeze_level) { @@ -1237,7 +1233,7 @@ r_alloc_check (void) { assert (b->prev == pb); assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); - assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); + assert ((size_t) MEM_ROUNDUP (b->size) == b->size); ph = 0; for (h = first_heap; h; h = h->next) @@ -1323,7 +1319,7 @@ r_alloc_check (void) #include typedef void *VM_ADDR; /* VM addresses */ -static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ +static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */ /* Configuration for relocating allocator. */ @@ -1697,10 +1693,10 @@ find_mmap_handle (POINTER *alias) static void Addr_Block_initialize(void); /* Get a suitable VM_ADDR via mmap */ -static VM_ADDR New_Addr_Block( SIZE sz ); +static VM_ADDR New_Addr_Block (size_t sz); /* Free a VM_ADDR allocated via New_Addr_Block */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ); +static void Free_Addr_Block (VM_ADDR addr, size_t sz); #ifdef MMAP_GENERATE_ADDRESSES /* Implementation of the three calls for address picking when XEmacs is incharge */ @@ -1711,7 +1707,7 @@ typedef enum { empty = 0, occupied, unavailable } addr_status; typedef struct addr_chain { POINTER addr; - SIZE sz; + size_t sz; addr_status flag; struct addr_chain *next; } ADDRESS_BLOCK, *ADDRESS_CHAIN; @@ -1723,19 +1719,21 @@ static ADDRESS_CHAIN addr_chain = 0; WRT the addition/deletion of address blocks because of the assert in Coalesce() and the strict ordering of blocks by their address */ -static void Addr_Block_initialize() +static void +Addr_Block_initialize (void) { MEMMETER( MVAL( M_Addrlist_Size )++) addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK )); addr_chain->next = 0; /* Last block in chain */ addr_chain->sz = 0x0c000000; /* Size */ - addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS); + addr_chain->addr = (POINTER) (0x04000000); addr_chain->flag = empty; } /* Coalesce address blocks if they are contiguous. Only empty and unavailable slots are coalesced. */ -static void Coalesce_Addr_Blocks() +static void +Coalesce_Addr_Blocks (void) { ADDRESS_CHAIN p; for (p = addr_chain; p; p = p->next) @@ -1761,7 +1759,8 @@ static void Coalesce_Addr_Blocks() } /* Get an empty address block of specified size. */ -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { ADDRESS_CHAIN p = addr_chain; VM_ADDR new_addr = VM_FAILURE_ADDR; @@ -1798,7 +1797,8 @@ static VM_ADDR New_Addr_Block( SIZE sz ) /* Free an address block. We mark the block as being empty, and attempt to do any coalescing that may have resulted from this. */ -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { ADDRESS_CHAIN p = addr_chain; for (; p; p = p->next ) @@ -1819,18 +1819,21 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* This is an alternate (simpler) implementation in cases where the address is picked by the kernel. */ -static void Addr_Block_initialize(void) +static void +Addr_Block_initialize (void) { /* Nothing. */ } -static VM_ADDR New_Addr_Block( SIZE sz ) +static VM_ADDR +New_Addr_Block (size_t sz) { return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS, DEV_ZERO_FD, 0 ); } -static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) +static void +Free_Addr_Block (VM_ADDR addr, size_t sz) { munmap ((caddr_t) addr, sz ); } @@ -1841,13 +1844,13 @@ static void Free_Addr_Block( VM_ADDR addr, SIZE sz ) /* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */ /* - r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start + r_alloc (POINTER, SIZE): Allocate a relocatable area with the start address aliased to the first parameter. */ -POINTER r_alloc (POINTER *ptr, SIZE size); +POINTER r_alloc (POINTER *ptr, size_t size); POINTER -r_alloc (POINTER *ptr, SIZE size) +r_alloc (POINTER *ptr, size_t size) { MMAP_HANDLE mh; @@ -1862,8 +1865,8 @@ r_alloc (POINTER *ptr, SIZE size) mh = new_mmap_handle( size ); if (mh) { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE mmapped_size = ROUNDUP( size + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t mmapped_size = ROUNDUP( size + hysteresis ); MEMMETER( MVAL(M_Map)++ ) MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) ) MEMMETER( MVAL(M_Wastage) += mmapped_size - size ) @@ -1931,9 +1934,9 @@ r_alloc_free (POINTER *ptr) If more memory cannot be allocated, then leave *PTR unchanged, and return zero. */ -POINTER r_re_alloc (POINTER *ptr, SIZE sz); +POINTER r_re_alloc (POINTER *ptr, size_t sz); POINTER -r_re_alloc (POINTER *ptr, SIZE sz) +r_re_alloc (POINTER *ptr, size_t sz) { if (r_alloc_initialized == 0) { @@ -1949,8 +1952,8 @@ r_re_alloc (POINTER *ptr, SIZE sz) } else { - SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); - SIZE actual_sz = ROUNDUP( sz + hysteresis ); + size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0); + size_t actual_sz = ROUNDUP( sz + hysteresis ); MMAP_HANDLE h = find_mmap_handle( ptr ); VM_ADDR new_vm_addr;