/* Unconditionally use unsigned char * for this. */
typedef unsigned char *POINTER;
-typedef unsigned long SIZE;
-
#ifdef DOUG_LEA_MALLOC
#define M_TOP_PAD -2
#include <malloc.h>
#include <stddef.h>
-typedef size_t SIZE;
typedef void *POINTER;
#include <unistd.h>
#endif /* emacs. */
void init_ralloc (void);
-#define safe_bcopy(x, y, z) memmove (y, x, z)
#define NIL ((POINTER) 0)
/* Declarations for working with the malloc, ralloc, and system breaks. */
/* Function to set the real break value. */
-static POINTER (*real_morecore) (long size);
+static POINTER (*real_morecore) (ptrdiff_t size);
/* The break value, as seen by malloc (). */
static POINTER virtual_break_value;
static POINTER break_value;
/* This is the size of a page. We round memory requests to this boundary. */
-static int page_size;
+static size_t page_size;
/* Whenever we get memory from the system, get this many extra bytes. This
must be a multiple of page_size. */
struct bp *prev;
POINTER *variable;
POINTER data;
- SIZE size;
+ size_t size;
POINTER new_data; /* temporarily used for relocation */
struct heap *heap; /* Heap this bloc is in. */
} *bloc_ptr;
allocate the memory. */
static POINTER
-obtain (POINTER address, SIZE size)
+obtain (POINTER address, size_t size)
{
heap_ptr heap;
- SIZE already_available;
+ size_t already_available;
/* Find the heap that ADDRESS falls within. */
for (heap = last_heap; heap; heap = heap->prev)
}
if (! heap)
- abort ();
+ ABORT ();
/* If we can't fit SIZE bytes in that heap,
try successive later heaps. */
if (heap == NIL_HEAP)
{
POINTER new = (*real_morecore)(0);
- SIZE get;
+ size_t get;
already_available = (char *)last_heap->end - (char *)address;
If we could not allocate the space, return zero. */
static POINTER
-get_more_space (SIZE size)
+get_more_space (size_t size)
{
POINTER ptr = break_value;
if (obtain (size))
If SIZE is more than a page, return the space to the system. */
static void
-relinquish ()
+relinquish (void)
{
register heap_ptr h;
int excess = 0;
/* This heap should have no blocs in it. */
if (last_heap->first_bloc != NIL_BLOC
|| last_heap->last_bloc != NIL_BLOC)
- abort ();
+ ABORT ();
/* Return the last heap, with its header, to the system. */
excess = (char *)last_heap->end - (char *)last_heap->start;
}
if ((*real_morecore) (- excess) == 0)
- abort ();
+ ABORT ();
}
}
long r_alloc_size_in_use (void);
long
-r_alloc_size_in_use ()
+r_alloc_size_in_use (void)
{
return break_value - virtual_break_value;
}
memory for the new block. */
static bloc_ptr
-get_bloc (SIZE size)
+get_bloc (size_t size)
{
register bloc_ptr new_bloc;
register heap_ptr heap;
/* No need to ever call this if arena is frozen, bug somewhere! */
if (r_alloc_freeze_level)
- abort();
+ ABORT();
while (b)
{
if (heap == NIL_HEAP)
{
register bloc_ptr tb = b;
- register SIZE s = 0;
+ register size_t s = 0;
/* Add up the size of all the following blocs. */
while (tb != NIL_BLOC)
that come after BLOC in memory. */
static int
-resize_bloc (bloc_ptr bloc, SIZE size)
+resize_bloc (bloc_ptr bloc, size_t size)
{
register bloc_ptr b;
heap_ptr heap;
POINTER address;
- SIZE old_size;
+ size_t old_size;
/* No need to ever call this if arena is frozen, bug somewhere! */
if (r_alloc_freeze_level)
- abort();
+ ABORT();
if (bloc == NIL_BLOC || size == bloc->size)
return 1;
}
if (heap == NIL_HEAP)
- abort ();
+ ABORT ();
old_size = bloc->size;
bloc->size = size;
}
else
{
- safe_bcopy (b->data, b->new_data, b->size);
+ memmove (b->new_data, b->data, b->size);
*b->variable = b->data = b->new_data;
}
}
}
else
{
- safe_bcopy (bloc->data, bloc->new_data, old_size);
+ memmove (bloc->new_data, bloc->data, old_size);
memset (bloc->new_data + old_size, 0, size - old_size);
*bloc->variable = bloc->data = bloc->new_data;
}
}
else
{
- safe_bcopy (b->data, b->new_data, b->size);
+ memmove (b->new_data, b->data, b->size);
*b->variable = b->data = b->new_data;
}
}
__morecore hook values - in particular, __default_morecore in the
GNU malloc package. */
-POINTER r_alloc_sbrk (long size);
+POINTER r_alloc_sbrk (ptrdiff_t size);
POINTER
-r_alloc_sbrk (long size)
+r_alloc_sbrk (ptrdiff_t size)
{
register bloc_ptr b;
POINTER address;
not always find a space which is contiguous to the previous. */
POINTER new_bloc_start;
heap_ptr h = first_heap;
- SIZE get = ROUNDUP (size);
+ size_t get = ROUNDUP (size);
address = (POINTER) ROUNDUP (virtual_break_value);
header. */
for (b = last_bloc; b != NIL_BLOC; b = b->prev)
{
- safe_bcopy (b->data, b->new_data, b->size);
+ memmove (b->new_data, b->data, b->size);
*b->variable = b->data = b->new_data;
}
}
else /* size < 0 */
{
- SIZE excess = (char *)first_heap->bloc_start
+ EMACS_INT excess = (char *)first_heap->bloc_start
- ((char *)virtual_break_value + size);
address = virtual_break_value;
for (b = first_bloc; b != NIL_BLOC; b = b->next)
{
- safe_bcopy (b->data, b->new_data, b->size);
+ memmove (b->new_data, b->data, b->size);
*b->variable = b->data = b->new_data;
}
}
If we can't allocate the necessary memory, set *PTR to zero, and
return zero. */
-POINTER r_alloc (POINTER *ptr, SIZE size);
+POINTER r_alloc (POINTER *ptr, size_t size);
POINTER
-r_alloc (POINTER *ptr, SIZE size)
+r_alloc (POINTER *ptr, size_t size)
{
bloc_ptr new_bloc;
dead_bloc = find_bloc (ptr);
if (dead_bloc == NIL_BLOC)
- abort ();
+ ABORT ();
free_bloc (dead_bloc);
*ptr = 0;
If more memory cannot be allocated, then leave *PTR unchanged, and
return zero. */
-POINTER r_re_alloc (POINTER *ptr, SIZE size);
+POINTER r_re_alloc (POINTER *ptr, size_t size);
POINTER
-r_re_alloc (POINTER *ptr, SIZE size)
+r_re_alloc (POINTER *ptr, size_t size)
{
register bloc_ptr bloc;
bloc = find_bloc (ptr);
if (bloc == NIL_BLOC)
- abort ();
+ ABORT ();
if (size < bloc->size)
{
void r_alloc_thaw (void);
void
-r_alloc_thaw ()
+r_alloc_thaw (void)
{
if (! r_alloc_initialized)
init_ralloc ();
if (--r_alloc_freeze_level < 0)
- abort ();
+ ABORT ();
/* This frees all unused blocs. It is not too inefficient, as the resize
- and bcopy is done only once. Afterwards, all unreferenced blocs are
+ and memmove is done only once. Afterwards, all unreferenced blocs are
already shrunk to zero size. */
if (!r_alloc_freeze_level)
{
/* The hook `malloc' uses for the function which gets more space
from the system. */
#ifndef DOUG_LEA_MALLOC
-extern POINTER (*__morecore) (long size);
+extern POINTER (*__morecore) (ptrdiff_t size);
#endif
/* Initialize various things for memory allocation. */
-#define SET_FUN_PTR(fun_ptr, fun_val) \
- (*((void **) (&fun_ptr)) = ((void *) (fun_val)))
-
void
init_ralloc (void)
{
return;
r_alloc_initialized = 1;
- SET_FUN_PTR (real_morecore, __morecore);
- SET_FUN_PTR (__morecore, r_alloc_sbrk);
+ real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
+ __morecore =
+#ifdef __GNUC__
+ (__typeof__ (__morecore))
+#endif
+ r_alloc_sbrk;
first_heap = last_heap = &heap_base;
first_heap->next = first_heap->prev = NIL_HEAP;
first_heap->start = first_heap->bloc_start
= virtual_break_value = break_value = (*real_morecore) (0);
if (break_value == NIL)
- abort ();
+ ABORT ();
page_size = PAGE;
extra_bytes = ROUNDUP (50000);
Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
void r_alloc_reinit (void);
void
-r_alloc_reinit ()
+r_alloc_reinit (void)
{
/* Only do this if the hook has been reset, so that we don't get an
infinite loop, in case Emacs was linked statically. */
- if ( ((void*) __morecore) != (void *) (r_alloc_sbrk))
+ if ( (POINTER (*) (ptrdiff_t)) __morecore != r_alloc_sbrk)
{
- SET_FUN_PTR (real_morecore, __morecore);
- SET_FUN_PTR (__morecore, r_alloc_sbrk);
+ real_morecore = (POINTER (*) (ptrdiff_t)) __morecore;
+ __morecore =
+#ifdef __GNUC__
+ (__typeof__ (__morecore))
+#endif
+ r_alloc_sbrk;
}
}
#if 0
#ifdef DEBUG
void
-r_alloc_check ()
+r_alloc_check (void)
{
int found = 0;
heap_ptr h, ph = 0;
{
assert (b->prev == pb);
assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
- assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
+ assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
ph = 0;
for (h = first_heap; h; h = h->next)
#include <stdio.h>
typedef void *VM_ADDR; /* VM addresses */
-static CONST VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
+static const VM_ADDR VM_FAILURE_ADDR = (VM_ADDR) -1; /* mmap returns this when it fails. */
/* Configuration for relocating allocator. */
static int DEV_ZERO_FD = -1;
-/* We actually need a datastructure that can be usefully structured
+/* We actually need a data structure that can be usefully structured
based on the VM address, and allows an ~O(1) lookup on an arbitrary
- address, ie a hash-table. Maybe the XEmacs hash table can be
- coaxed enough. At the moment, we use lookup on a hash-table to
+ address, i.e. a hash table. Maybe the XEmacs hash table can be
+ coaxed enough. At the moment, we use lookup on a hash table to
decide whether to do an O(n) search on the malloced block list.
- Addresses are hashed to a bucket modulo MHASH_PRIME */
+ Addresses are hashed to a bucket modulo MHASH_PRIME. */
/* We settle for a standard doubly-linked-list. The dynarr type isn't
static MMAP_HANDLE mmap_start = 0; /* Head of linked list */
static size_t page_size = 0; /* Size of VM pages */
-static int mmap_hysteresis; /* Should be size_t, really. */
+static Fixnum mmap_hysteresis; /* Logically a "size_t" */
/* Get a new handle for a fresh block. */
static MMAP_HANDLE
static void Addr_Block_initialize(void);
/* Get a suitable VM_ADDR via mmap */
-static VM_ADDR New_Addr_Block( SIZE sz );
+static VM_ADDR New_Addr_Block (size_t sz);
/* Free a VM_ADDR allocated via New_Addr_Block */
-static void Free_Addr_Block( VM_ADDR addr, SIZE sz );
+static void Free_Addr_Block (VM_ADDR addr, size_t sz);
#ifdef MMAP_GENERATE_ADDRESSES
/* Implementation of the three calls for address picking when XEmacs is incharge */
typedef struct addr_chain
{
POINTER addr;
- SIZE sz;
+ size_t sz;
addr_status flag;
struct addr_chain *next;
} ADDRESS_BLOCK, *ADDRESS_CHAIN;
WRT the addition/deletion of address blocks because of the assert
in Coalesce() and the strict ordering of blocks by their address
*/
-static void Addr_Block_initialize()
+static void
+Addr_Block_initialize (void)
{
MEMMETER( MVAL( M_Addrlist_Size )++)
addr_chain = (ADDRESS_CHAIN) UNDERLYING_MALLOC( sizeof( ADDRESS_BLOCK ));
addr_chain->next = 0; /* Last block in chain */
addr_chain->sz = 0x0c000000; /* Size */
- addr_chain->addr = (POINTER) (0x04000000 | DATA_SEG_BITS);
+ addr_chain->addr = (POINTER) (0x04000000);
addr_chain->flag = empty;
}
/* Coalesce address blocks if they are contiguous. Only empty and
unavailable slots are coalesced. */
-static void Coalesce_Addr_Blocks()
+static void
+Coalesce_Addr_Blocks (void)
{
ADDRESS_CHAIN p;
for (p = addr_chain; p; p = p->next)
}
/* Get an empty address block of specified size. */
-static VM_ADDR New_Addr_Block( SIZE sz )
+static VM_ADDR
+New_Addr_Block (size_t sz)
{
ADDRESS_CHAIN p = addr_chain;
VM_ADDR new_addr = VM_FAILURE_ADDR;
/* Free an address block. We mark the block as being empty, and attempt to
do any coalescing that may have resulted from this. */
-static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
+static void
+Free_Addr_Block (VM_ADDR addr, size_t sz)
{
ADDRESS_CHAIN p = addr_chain;
for (; p; p = p->next )
{
if (p->addr == addr)
{
- if (p->sz != sz) abort(); /* ACK! Shouldn't happen at all. */
+ if (p->sz != sz) ABORT(); /* ACK! Shouldn't happen at all. */
munmap( (VM_ADDR) p->addr, p->sz );
p->flag = empty;
break;
}
}
- if (!p) abort(); /* Can't happen... we've got a block to free which is not in
+ if (!p) ABORT(); /* Can't happen... we've got a block to free which is not in
the address list. */
Coalesce_Addr_Blocks();
}
/* This is an alternate (simpler) implementation in cases where the
address is picked by the kernel. */
-static void Addr_Block_initialize(void)
+static void
+Addr_Block_initialize (void)
{
/* Nothing. */
}
-static VM_ADDR New_Addr_Block( SIZE sz )
+static VM_ADDR
+New_Addr_Block (size_t sz)
{
return mmap (0, sz, PROT_READ|PROT_WRITE, MAP_FLAGS,
DEV_ZERO_FD, 0 );
}
-static void Free_Addr_Block( VM_ADDR addr, SIZE sz )
+static void
+Free_Addr_Block (VM_ADDR addr, size_t sz)
{
munmap ((caddr_t) addr, sz );
}
/* IMPLEMENTATION OF EXPORTED RELOCATOR INTERFACE */
/*
- r_alloc( POINTER, SIZE ): Allocate a relocatable area with the start
+ r_alloc (POINTER, SIZE): Allocate a relocatable area with the start
address aliased to the first parameter.
*/
-POINTER r_alloc (POINTER *ptr, SIZE size);
+POINTER r_alloc (POINTER *ptr, size_t size);
POINTER
-r_alloc (POINTER *ptr, SIZE size)
+r_alloc (POINTER *ptr, size_t size)
{
MMAP_HANDLE mh;
switch(r_alloc_initialized)
{
case 0:
- abort();
+ ABORT();
case 1:
*ptr = (POINTER) UNDERLYING_MALLOC(size);
break;
mh = new_mmap_handle( size );
if (mh)
{
- SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
- SIZE mmapped_size = ROUNDUP( size + hysteresis );
+ size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
+ size_t mmapped_size = ROUNDUP( size + hysteresis );
MEMMETER( MVAL(M_Map)++ )
MEMMETER( MVAL(M_Pages_Map) += (mmapped_size/page_size) )
MEMMETER( MVAL(M_Wastage) += mmapped_size - size )
{
switch( r_alloc_initialized) {
case 0:
- abort();
+ ABORT();
case 1:
UNDERLYING_FREE( *ptr ); /* Certain this is from the heap. */
If more memory cannot be allocated, then leave *PTR unchanged, and
return zero. */
-POINTER r_re_alloc (POINTER *ptr, SIZE sz);
+POINTER r_re_alloc (POINTER *ptr, size_t sz);
POINTER
-r_re_alloc (POINTER *ptr, SIZE sz)
+r_re_alloc (POINTER *ptr, size_t sz)
{
if (r_alloc_initialized == 0)
{
- abort ();
+ ABORT ();
return 0; /* suppress compiler warning */
}
else if (r_alloc_initialized == 1)
}
else
{
- SIZE hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
- SIZE actual_sz = ROUNDUP( sz + hysteresis );
+ size_t hysteresis = (mmap_hysteresis > 0 ? mmap_hysteresis : 0);
+ size_t actual_sz = ROUNDUP( sz + hysteresis );
MMAP_HANDLE h = find_mmap_handle( ptr );
VM_ADDR new_vm_addr;
if (r_alloc_initialized > 1)
return; /* used to return 1 */
+#ifdef PDUMP
+ /* Under pdump, we need to activate ralloc on the first go. */
+ ++r_alloc_initialized;
+#endif
if (++r_alloc_initialized == 1)
return; /* used to return 1 */