ralloc.c 32.6 KB
Newer Older
1
/* Block-relocating memory allocator.
2
   Copyright (C) 1993, 1995, 2000-2011  Free Software Foundation, Inc.
Jim Blandy's avatar
Jim Blandy committed
3 4 5

This file is part of GNU Emacs.

6
GNU Emacs is free software: you can redistribute it and/or modify
Jim Blandy's avatar
Jim Blandy committed
7
it under the terms of the GNU General Public License as published by
8 9
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Jim Blandy's avatar
Jim Blandy committed
10 11 12 13 14 15 16

GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
17
along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.  */
Jim Blandy's avatar
Jim Blandy committed
18 19 20

/* NOTES:

21
   Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
Jim Blandy's avatar
Jim Blandy committed
22
   rather than all of them.  This means allowing for a possible
23
   hole between the first bloc and the end of malloc storage.  */
Jim Blandy's avatar
Jim Blandy committed
24

25
#ifdef emacs
26

27
#include <config.h>
28
#include <setjmp.h>
Jim Blandy's avatar
Jim Blandy committed
29
#include "lisp.h"		/* Needed for VALBITS.  */
Jan Djärv's avatar
Jan Djärv committed
30
#include "blockinput.h"
31

Dave Love's avatar
Dave Love committed
32
#include <unistd.h>
33

34 35
typedef POINTER_TYPE *POINTER;
typedef size_t SIZE;
36

37
#ifdef DOUG_LEA_MALLOC
38
#define M_TOP_PAD           -2
39
extern int mallopt (int, int);
40
#else /* not DOUG_LEA_MALLOC */
41
#ifndef SYSTEM_MALLOC
42
extern size_t __malloc_extra_blocks;
43
#endif /* SYSTEM_MALLOC */
44
#endif /* not DOUG_LEA_MALLOC */
45

46
#else /* not emacs */
47

48
#include <stddef.h>
49

50 51
typedef size_t SIZE;
typedef void *POINTER;
52 53 54 55

#include <unistd.h>
#include <malloc.h>

56
#endif	/* not emacs */
57

58

59
#include "getpagesize.h"
Jim Blandy's avatar
Jim Blandy committed
60 61 62

#define NIL ((POINTER) 0)

63 64 65 66 67
/* A flag to indicate whether we have initialized ralloc yet.  For
   Emacs's sake, please do not make this local to malloc_init; on some
   machines, the dumping procedure makes all static variables
   read-only.  On these machines, the word static is #defined to be
   the empty string, meaning that r_alloc_initialized becomes an
68 69 70
   automatic variable, and loses its value each time Emacs is started
   up.  */

71 72
static int r_alloc_initialized = 0;

73
static void r_alloc_init (void);
74

Jim Blandy's avatar
Jim Blandy committed
75

Jim Blandy's avatar
Jim Blandy committed
76 77
/* Declarations for working with the malloc, ralloc, and system breaks.  */

78
/* Function to set the real break value.  */
79
POINTER (*real_morecore) (long int);
Jim Blandy's avatar
Jim Blandy committed
80

81
/* The break value, as seen by malloc.  */
Jim Blandy's avatar
Jim Blandy committed
82 83
static POINTER virtual_break_value;

84 85
/* The address of the end of the last data in use by ralloc,
   including relocatable blocs as well as malloc data.  */
Jim Blandy's avatar
Jim Blandy committed
86 87
static POINTER break_value;

88 89 90
/* This is the size of a page.  We round memory requests to this boundary.  */
static int page_size;

91
/* Whenever we get memory from the system, get this many extra bytes.  This
92
   must be a multiple of page_size.  */
93 94
static int extra_bytes;

Jim Blandy's avatar
Jim Blandy committed
95
/* Macros for rounding.  Note that rounding to any value is possible
96
   by changing the definition of PAGE.  */
Jim Blandy's avatar
Jim Blandy committed
97
#define PAGE (getpagesize ())
98 99 100
#define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
#define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
		       & ~(page_size - 1))
101
#define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
102 103 104 105

#define MEM_ALIGN sizeof(double)
#define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
				   & ~(MEM_ALIGN - 1))
106

107 108 109 110
/* The hook `malloc' uses for the function which gets more space
   from the system.  */

#ifndef SYSTEM_MALLOC
111
extern POINTER (*__morecore) (long int);
112 113 114
#endif


115

116 117 118 119
/***********************************************************************
		      Implementation using sbrk
 ***********************************************************************/

120 121 122 123 124 125 126 127 128 129 130 131
/* Data structures of heaps and blocs.  */

/* The relocatable objects, or blocs, and the malloc data
   both reside within one or more heaps.
   Each heap contains malloc data, running from `start' to `bloc_start',
   and relocatable objects, running from `bloc_start' to `free'.

   Relocatable objects may relocate within the same heap
   or may move into another heap; the heaps themselves may grow
   but they never move.

   We try to make just one heap and make it larger as necessary.
Karl Heuer's avatar
Karl Heuer committed
132
   But sometimes we can't do that, because we can't get contiguous
133
   space to add onto the heap.  When that happens, we start a new heap.  */
134

135 136 137 138
typedef struct heap
{
  struct heap *next;
  struct heap *prev;
139
  /* Start of memory range of this heap.  */
140
  POINTER start;
141
  /* End of memory range of this heap.  */
142
  POINTER end;
143 144 145 146
  /* Start of relocatable data in this heap.  */
  POINTER bloc_start;
  /* Start of unused space in this heap.  */
  POINTER free;
147 148 149 150
  /* First bloc in this heap.  */
  struct bp *first_bloc;
  /* Last bloc in this heap.  */
  struct bp *last_bloc;
151 152 153 154 155
} *heap_ptr;

#define NIL_HEAP ((heap_ptr) 0)
#define HEAP_PTR_SIZE (sizeof (struct heap))

156 157 158 159 160 161
/* This is the first heap object.
   If we need additional heap objects, each one resides at the beginning of
   the space it covers.   */
static struct heap heap_base;

/* Head and tail of the list of heaps.  */
162 163 164 165 166
static heap_ptr first_heap, last_heap;

/* These structures are allocated in the malloc arena.
   The linked list is kept in order of increasing '.data' members.
   The data blocks abut each other; if b->next is non-nil, then
167
   b->data + b->size == b->next->data.
168 169

   An element with variable==NIL denotes a freed block, which has not yet
Juanma Barranquero's avatar
Juanma Barranquero committed
170 171 172
   been collected.  They may only appear while r_alloc_freeze_level > 0,
   and will be freed when the arena is thawed.  Currently, these blocs are
   not reusable, while the arena is frozen.  Very inefficient.  */
173

174 175 176 177 178 179 180
typedef struct bp
{
  struct bp *next;
  struct bp *prev;
  POINTER *variable;
  POINTER data;
  SIZE size;
Karl Heuer's avatar
Karl Heuer committed
181
  POINTER new_data;		/* temporarily used for relocation */
182
  struct heap *heap; 		/* Heap this bloc is in.  */
183 184 185 186 187
} *bloc_ptr;

#define NIL_BLOC ((bloc_ptr) 0)
#define BLOC_PTR_SIZE (sizeof (struct bp))

188
/* Head and tail of the list of relocatable blocs.  */
189 190
static bloc_ptr first_bloc, last_bloc;

191 192 193 194 195
static int use_relocatable_buffers;

/* If >0, no relocation whatsoever takes place.  */
static int r_alloc_freeze_level;

Jim Blandy's avatar
Jim Blandy committed
196

Jim Blandy's avatar
Jim Blandy committed
197 198
/* Functions to get and return memory from the system.  */

199 200 201
/* Find the heap that ADDRESS falls within.  */

static heap_ptr
202
find_heap (POINTER address)
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
{
  heap_ptr heap;

  for (heap = last_heap; heap; heap = heap->prev)
    {
      if (heap->start <= address && address <= heap->end)
	return heap;
    }

  return NIL_HEAP;
}

/* Find SIZE bytes of space in a heap.
   Try to get them at ADDRESS (which must fall within some heap's range)
   if we can get that many within one heap.

219
   If enough space is not presently available in our reserve, this means
Karl Heuer's avatar
Karl Heuer committed
220 221
   getting more page-aligned space from the system.  If the returned space
   is not contiguous to the last heap, allocate a new heap, and append it
222 223 224 225 226 227 228

   obtain does not try to keep track of whether space is in use
   or not in use.  It just returns the address of SIZE bytes that
   fall within a single heap.  If you call obtain twice in a row
   with the same arguments, you typically get the same value.
   to the heap list.  It's the caller's responsibility to keep
   track of what space is in use.
Jim Blandy's avatar
Jim Blandy committed
229

230 231
   Return the address of the space if all went well, or zero if we couldn't
   allocate the memory.  */
232

233
static POINTER
234
obtain (POINTER address, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
235
{
236 237
  heap_ptr heap;
  SIZE already_available;
Jim Blandy's avatar
Jim Blandy committed
238

239
  /* Find the heap that ADDRESS falls within.  */
240
  for (heap = last_heap; heap; heap = heap->prev)
Jim Blandy's avatar
Jim Blandy committed
241
    {
242 243 244
      if (heap->start <= address && address <= heap->end)
	break;
    }
Jim Blandy's avatar
Jim Blandy committed
245

246
  if (! heap)
247
    abort ();
Jim Blandy's avatar
Jim Blandy committed
248

249 250
  /* If we can't fit SIZE bytes in that heap,
     try successive later heaps.  */
251
  while (heap && (char *) address + size > (char *) heap->end)
252 253 254 255 256
    {
      heap = heap->next;
      if (heap == NIL_HEAP)
	break;
      address = heap->bloc_start;
Jim Blandy's avatar
Jim Blandy committed
257 258
    }

259 260
  /* If we can't fit them within any existing heap,
     get more space.  */
261 262 263 264
  if (heap == NIL_HEAP)
    {
      POINTER new = (*real_morecore)(0);
      SIZE get;
265

266
      already_available = (char *)last_heap->end - (char *)address;
Jim Blandy's avatar
Jim Blandy committed
267

268 269
      if (new != last_heap->end)
	{
270 271 272 273
	  /* Someone else called sbrk.  Make a new heap.  */

	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
	  POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
274

275
	  if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
276 277 278 279 280
	    return 0;

	  new_heap->start = new;
	  new_heap->end = bloc_start;
	  new_heap->bloc_start = bloc_start;
281
	  new_heap->free = bloc_start;
282 283
	  new_heap->next = NIL_HEAP;
	  new_heap->prev = last_heap;
284 285
	  new_heap->first_bloc = NIL_BLOC;
	  new_heap->last_bloc = NIL_BLOC;
286 287 288 289 290 291
	  last_heap->next = new_heap;
	  last_heap = new_heap;

	  address = bloc_start;
	  already_available = 0;
	}
Jim Blandy's avatar
Jim Blandy committed
292

293 294 295
      /* Add space to the last heap (which we may have just created).
	 Get some extra, so we can come here less often.  */

296
      get = size + extra_bytes - already_available;
297
      get = (char *) ROUNDUP ((char *)last_heap->end + get)
298
	- (char *) last_heap->end;
Jim Blandy's avatar
Jim Blandy committed
299

300 301 302
      if ((*real_morecore) (get) != last_heap->end)
	return 0;

303
      last_heap->end = (char *) last_heap->end + get;
304 305 306 307
    }

  return address;
}
Jim Blandy's avatar
Jim Blandy committed
308

309 310 311 312 313
/* Return unused heap space to the system
   if there is a lot of unused space now.
   This can make the last heap smaller;
   it can also eliminate the last heap entirely.  */

Jim Blandy's avatar
Jim Blandy committed
314
static void
315
relinquish (void)
Jim Blandy's avatar
Jim Blandy committed
316
{
317
  register heap_ptr h;
318
  long excess = 0;
319

320 321 322
  /* Add the amount of space beyond break_value
     in all heaps which have extend beyond break_value at all.  */

323 324 325 326 327 328 329
  for (h = last_heap; h && break_value < h->end; h = h->prev)
    {
      excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
					    ? h->bloc_start : break_value);
    }

  if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
Jim Blandy's avatar
Jim Blandy committed
330
    {
331 332
      /* Keep extra_bytes worth of empty space.
	 And don't free anything unless we can free at least extra_bytes.  */
333
      excess -= extra_bytes;
Jim Blandy's avatar
Jim Blandy committed
334

335 336
      if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
	{
337 338 339 340 341
	  /* This heap should have no blocs in it.  */
	  if (last_heap->first_bloc != NIL_BLOC
	      || last_heap->last_bloc != NIL_BLOC)
	    abort ();

342
	  /* Return the last heap, with its header, to the system.  */
343 344 345 346 347 348 349
	  excess = (char *)last_heap->end - (char *)last_heap->start;
	  last_heap = last_heap->prev;
	  last_heap->next = NIL_HEAP;
	}
      else
	{
	  excess = (char *) last_heap->end
350
			- (char *) ROUNDUP ((char *)last_heap->end - excess);
351
	  last_heap->end = (char *) last_heap->end - excess;
352
	}
Jim Blandy's avatar
Jim Blandy committed
353

354
      if ((*real_morecore) (- excess) == 0)
355 356 357 358
	{
	  /* If the system didn't want that much memory back, adjust
             the end of the last heap to reflect that.  This can occur
             if break_value is still within the original data segment.  */
359
	  last_heap->end = (char *) last_heap->end + excess;
360 361 362 363 364 365 366
	  /* Make sure that the result of the adjustment is accurate.
             It should be, for the else clause above; the other case,
             which returns the entire last heap to the system, seems
             unlikely to trigger this mode of failure.  */
	  if (last_heap->end != (*real_morecore) (0))
	    abort ();
	}
367
    }
Jim Blandy's avatar
Jim Blandy committed
368
}
369 370 371 372 373

/* Return the total size in use by relocating allocator,
   above where malloc gets space.  */

long
374
r_alloc_size_in_use (void)
375
{
376
  return (char *) break_value - (char *) virtual_break_value;
377
}
Jim Blandy's avatar
Jim Blandy committed
378

Jim Blandy's avatar
Jim Blandy committed
379 380 381
/* The meat - allocating, freeing, and relocating blocs.  */

/* Find the bloc referenced by the address in PTR.  Returns a pointer
382
   to that block.  */
Jim Blandy's avatar
Jim Blandy committed
383 384

static bloc_ptr
385
find_bloc (POINTER *ptr)
Jim Blandy's avatar
Jim Blandy committed
386 387 388 389 390
{
  register bloc_ptr p = first_bloc;

  while (p != NIL_BLOC)
    {
391 392 393 394 395
      /* Consistency check. Don't return inconsistent blocs.
	 Don't abort here, as callers might be expecting this,  but
	 callers that always expect a bloc to be returned should abort
	 if one isn't to avoid a memory corruption bug that is
	 difficult to track down.  */
Jim Blandy's avatar
Jim Blandy committed
396 397 398 399 400 401 402 403 404 405
      if (p->variable == ptr && p->data == *ptr)
	return p;

      p = p->next;
    }

  return p;
}

/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
406 407
   Returns a pointer to the new bloc, or zero if we couldn't allocate
   memory for the new block.  */
Jim Blandy's avatar
Jim Blandy committed
408 409

static bloc_ptr
410
get_bloc (SIZE size)
Jim Blandy's avatar
Jim Blandy committed
411
{
412
  register bloc_ptr new_bloc;
413
  register heap_ptr heap;
414 415

  if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
416
      || ! (new_bloc->data = obtain (break_value, size)))
417
    {
418
      free (new_bloc);
419 420 421

      return 0;
    }
Jim Blandy's avatar
Jim Blandy committed
422

423
  break_value = (char *) new_bloc->data + size;
424

Jim Blandy's avatar
Jim Blandy committed
425 426
  new_bloc->size = size;
  new_bloc->next = NIL_BLOC;
427
  new_bloc->variable = (POINTER *) NIL;
428
  new_bloc->new_data = 0;
Jim Blandy's avatar
Jim Blandy committed
429

430 431 432 433
  /* Record in the heap that this space is in use.  */
  heap = find_heap (new_bloc->data);
  heap->free = break_value;

434 435 436 437 438 439
  /* Maintain the correspondence between heaps and blocs.  */
  new_bloc->heap = heap;
  heap->last_bloc = new_bloc;
  if (heap->first_bloc == NIL_BLOC)
    heap->first_bloc = new_bloc;

440
  /* Put this bloc on the doubly-linked list of blocs.  */
Jim Blandy's avatar
Jim Blandy committed
441 442 443 444 445 446 447 448 449 450 451 452 453 454
  if (first_bloc)
    {
      new_bloc->prev = last_bloc;
      last_bloc->next = new_bloc;
      last_bloc = new_bloc;
    }
  else
    {
      first_bloc = last_bloc = new_bloc;
      new_bloc->prev = NIL_BLOC;
    }

  return new_bloc;
}
455

456 457 458
/* Calculate new locations of blocs in the list beginning with BLOC,
   relocating it to start at ADDRESS, in heap HEAP.  If enough space is
   not presently available in our reserve, call obtain for
459 460
   more space.

461 462
   Store the new location of each bloc in its new_data field.
   Do not touch the contents of blocs or break_value.  */
Jim Blandy's avatar
Jim Blandy committed
463

464
static int
465
relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
466 467
{
  register bloc_ptr b = bloc;
468

469
  /* No need to ever call this if arena is frozen, bug somewhere!  */
470
  if (r_alloc_freeze_level)
471 472
    abort();

473 474
  while (b)
    {
475 476
      /* If bloc B won't fit within HEAP,
	 move to the next heap and try again.  */
477
      while (heap && (char *) address + b->size > (char *) heap->end)
478 479 480 481 482 483
	{
	  heap = heap->next;
	  if (heap == NIL_HEAP)
	    break;
	  address = heap->bloc_start;
	}
Jim Blandy's avatar
Jim Blandy committed
484

485 486
      /* If BLOC won't fit in any heap,
	 get enough new space to hold BLOC and all following blocs.  */
487 488 489 490 491
      if (heap == NIL_HEAP)
	{
	  register bloc_ptr tb = b;
	  register SIZE s = 0;

492
	  /* Add up the size of all the following blocs.  */
493 494
	  while (tb != NIL_BLOC)
	    {
495
	      if (tb->variable)
496 497
		s += tb->size;

498 499 500
	      tb = tb->next;
	    }

501 502 503
	  /* Get that space.  */
	  address = obtain (address, s);
	  if (address == 0)
504 505 506 507 508
	    return 0;

	  heap = last_heap;
	}

509 510
      /* Record the new address of this bloc
	 and update where the next bloc can start.  */
511
      b->new_data = address;
512
      if (b->variable)
513
	address = (char *) address + b->size;
514 515 516 517 518
      b = b->next;
    }

  return 1;
}
519 520 521 522 523

/* Update the records of which heaps contain which blocs, starting
   with heap HEAP and bloc BLOC.  */

static void
524
update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
525 526 527
{
  register bloc_ptr b;

528 529 530 531 532
  /* Initialize HEAP's status to reflect blocs before BLOC.  */
  if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
    {
      /* The previous bloc is in HEAP.  */
      heap->last_bloc = bloc->prev;
533
      heap->free = (char *) bloc->prev->data + bloc->prev->size;
534 535 536 537 538 539 540 541 542
    }
  else
    {
      /* HEAP contains no blocs before BLOC.  */
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
      heap->free = heap->bloc_start;
    }

543 544 545
  /* Advance through blocs one by one.  */
  for (b = bloc; b != NIL_BLOC; b = b->next)
    {
546 547
      /* Advance through heaps, marking them empty,
	 till we get to the one that B is in.  */
548 549 550 551 552
      while (heap)
	{
	  if (heap->bloc_start <= b->data && b->data <= heap->end)
	    break;
	  heap = heap->next;
553 554 555 556
	  /* We know HEAP is not null now,
	     because there has to be space for bloc B.  */
	  heap->first_bloc = NIL_BLOC;
	  heap->last_bloc = NIL_BLOC;
557 558
	  heap->free = heap->bloc_start;
	}
559 560

      /* Update HEAP's status for bloc B.  */
561
      heap->free = (char *) b->data + b->size;
562 563 564 565 566 567
      heap->last_bloc = b;
      if (heap->first_bloc == NIL_BLOC)
	heap->first_bloc = b;

      /* Record that B is in HEAP.  */
      b->heap = heap;
568 569 570
    }

  /* If there are any remaining heaps and no blocs left,
571
     mark those heaps as empty.  */
572 573 574
  heap = heap->next;
  while (heap)
    {
575 576
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
577 578 579 580
      heap->free = heap->bloc_start;
      heap = heap->next;
    }
}
581

582 583 584
/* Resize BLOC to SIZE bytes.  This relocates the blocs
   that come after BLOC in memory.  */

585
static int
586
resize_bloc (bloc_ptr bloc, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
587
{
588 589 590 591 592
  register bloc_ptr b;
  heap_ptr heap;
  POINTER address;
  SIZE old_size;

593
  /* No need to ever call this if arena is frozen, bug somewhere!  */
594
  if (r_alloc_freeze_level)
595 596
    abort();

597 598 599 600 601 602 603 604 605 606
  if (bloc == NIL_BLOC || size == bloc->size)
    return 1;

  for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
    {
      if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
	break;
    }

  if (heap == NIL_HEAP)
607
    abort ();
608 609 610 611

  old_size = bloc->size;
  bloc->size = size;

612
  /* Note that bloc could be moved into the previous heap.  */
613 614
  address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
	     : (char *) first_heap->bloc_start);
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
  while (heap)
    {
      if (heap->bloc_start <= address && address <= heap->end)
	break;
      heap = heap->prev;
    }

  if (! relocate_blocs (bloc, heap, address))
    {
      bloc->size = old_size;
      return 0;
    }

  if (size > old_size)
    {
      for (b = last_bloc; b != bloc; b = b->prev)
	{
632 633 634 635
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
636 637
            }
	  else
638
	    {
639
	      memmove (b->new_data, b->data, b->size);
640 641 642 643 644 645 646 647 648 649
	      *b->variable = b->data = b->new_data;
            }
	}
      if (!bloc->variable)
	{
	  bloc->size = 0;
	  bloc->data = bloc->new_data;
	}
      else
	{
650
	  memmove (bloc->new_data, bloc->data, old_size);
651
	  memset ((char *) bloc->new_data + old_size, 0, size - old_size);
652
	  *bloc->variable = bloc->data = bloc->new_data;
653 654 655
	}
    }
  else
Jim Blandy's avatar
Jim Blandy committed
656
    {
657 658
      for (b = bloc; b != NIL_BLOC; b = b->next)
	{
659 660 661 662
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
663 664
            }
	  else
665
	    {
666
	      memmove (b->new_data, b->data, b->size);
667 668
	      *b->variable = b->data = b->new_data;
	    }
669 670
	}
    }
Jim Blandy's avatar
Jim Blandy committed
671

672
  update_heap_bloc_correspondence (bloc, heap);
673

674 675
  break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
676 677
  return 1;
}
678

679 680
/* Free BLOC from the chain of blocs, relocating any blocs above it.
   This may return space to the system.  */
Jim Blandy's avatar
Jim Blandy committed
681 682

static void
683
free_bloc (bloc_ptr bloc)
Jim Blandy's avatar
Jim Blandy committed
684
{
685 686
  heap_ptr heap = bloc->heap;

687 688 689 690 691
  if (r_alloc_freeze_level)
    {
      bloc->variable = (POINTER *) NIL;
      return;
    }
692

693 694
  resize_bloc (bloc, 0);

Jim Blandy's avatar
Jim Blandy committed
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
  if (bloc == first_bloc && bloc == last_bloc)
    {
      first_bloc = last_bloc = NIL_BLOC;
    }
  else if (bloc == last_bloc)
    {
      last_bloc = bloc->prev;
      last_bloc->next = NIL_BLOC;
    }
  else if (bloc == first_bloc)
    {
      first_bloc = bloc->next;
      first_bloc->prev = NIL_BLOC;
    }
  else
    {
      bloc->next->prev = bloc->prev;
      bloc->prev->next = bloc->next;
    }

715 716 717
  /* Update the records of which blocs are in HEAP.  */
  if (heap->first_bloc == bloc)
    {
718
      if (bloc->next != 0 && bloc->next->heap == heap)
719 720 721 722 723 724
	heap->first_bloc = bloc->next;
      else
	heap->first_bloc = heap->last_bloc = NIL_BLOC;
    }
  if (heap->last_bloc == bloc)
    {
725
      if (bloc->prev != 0 && bloc->prev->heap == heap)
726 727 728 729 730
	heap->last_bloc = bloc->prev;
      else
	heap->first_bloc = heap->last_bloc = NIL_BLOC;
    }

731
  relinquish ();
Jim Blandy's avatar
Jim Blandy committed
732 733 734
  free (bloc);
}

Jim Blandy's avatar
Jim Blandy committed
735 736
/* Interface routines.  */

737
/* Obtain SIZE bytes of storage from the free pool, or the system, as
738
   necessary.  If relocatable blocs are in use, this means relocating
739 740 741
   them.  This function gets plugged into the GNU malloc's __morecore
   hook.

742 743
   We provide hysteresis, never relocating by less than extra_bytes.

744 745 746
   If we're out of memory, we should return zero, to imitate the other
   __morecore hook values - in particular, __default_morecore in the
   GNU malloc package.  */
Jim Blandy's avatar
Jim Blandy committed
747

748
POINTER
749
r_alloc_sbrk (long int size)
Jim Blandy's avatar
Jim Blandy committed
750
{
751 752
  register bloc_ptr b;
  POINTER address;
Jim Blandy's avatar
Jim Blandy committed
753

754 755 756
  if (! r_alloc_initialized)
    r_alloc_init ();

Jim Blandy's avatar
Jim Blandy committed
757
  if (! use_relocatable_buffers)
Roland McGrath's avatar
Roland McGrath committed
758
    return (*real_morecore) (size);
Jim Blandy's avatar
Jim Blandy committed
759

760 761
  if (size == 0)
    return virtual_break_value;
762

763
  if (size > 0)
Jim Blandy's avatar
Jim Blandy committed
764
    {
765 766
      /* Allocate a page-aligned space.  GNU malloc would reclaim an
	 extra space if we passed an unaligned one.  But we could
Karl Heuer's avatar
Karl Heuer committed
767
	 not always find a space which is contiguous to the previous.  */
768 769
      POINTER new_bloc_start;
      heap_ptr h = first_heap;
770
      SIZE get = ROUNDUP (size);
771

772
      address = (POINTER) ROUNDUP (virtual_break_value);
773

774 775
      /* Search the list upward for a heap which is large enough.  */
      while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
776 777 778 779
	{
	  h = h->next;
	  if (h == NIL_HEAP)
	    break;
780
	  address = (POINTER) ROUNDUP (h->start);
781 782
	}

783
      /* If not found, obtain more space.  */
784 785 786 787
      if (h == NIL_HEAP)
	{
	  get += extra_bytes + page_size;

788
	  if (! obtain (address, get))
789
	    return 0;
790

791
	  if (first_heap == last_heap)
792
	    address = (POINTER) ROUNDUP (virtual_break_value);
793
	  else
794
	    address = (POINTER) ROUNDUP (last_heap->start);
795 796 797
	  h = last_heap;
	}

798
      new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
799 800 801

      if (first_heap->bloc_start < new_bloc_start)
	{
802
	  /* This is no clean solution - no idea how to do it better.  */
803
	  if (r_alloc_freeze_level)
804 805 806 807 808 809
	    return NIL;

	  /* There is a bug here: if the above obtain call succeeded, but the
	     relocate_blocs call below does not succeed, we need to free
	     the memory that we got with obtain.  */

810
	  /* Move all blocs upward.  */
811
	  if (! relocate_blocs (first_bloc, h, new_bloc_start))
812 813 814 815
	    return 0;

	  /* Note that (POINTER)(h+1) <= new_bloc_start since
	     get >= page_size, so the following does not destroy the heap
816
	     header.  */
817 818
	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
	    {
819
	      memmove (b->new_data, b->data, b->size);
820 821 822 823
	      *b->variable = b->data = b->new_data;
	    }

	  h->bloc_start = new_bloc_start;
824

825
	  update_heap_bloc_correspondence (first_bloc, h);
826 827 828 829
	}
      if (h != first_heap)
	{
	  /* Give up managing heaps below the one the new
830
	     virtual_break_value points to.  */
831 832 833 834
	  first_heap->prev = NIL_HEAP;
	  first_heap->next = h->next;
	  first_heap->start = h->start;
	  first_heap->end = h->end;
835
	  first_heap->free = h->free;
836 837
	  first_heap->first_bloc = h->first_bloc;
	  first_heap->last_bloc = h->last_bloc;
838 839 840 841 842 843 844 845
	  first_heap->bloc_start = h->bloc_start;

	  if (first_heap->next)
	    first_heap->next->prev = first_heap;
	  else
	    last_heap = first_heap;
	}

846
      memset (address, 0, size);
Jim Blandy's avatar
Jim Blandy committed
847
    }
848
  else /* size < 0 */
Jim Blandy's avatar
Jim Blandy committed
849
    {
850 851 852 853 854 855 856 857 858
      SIZE excess = (char *)first_heap->bloc_start
		      - ((char *)virtual_break_value + size);

      address = virtual_break_value;

      if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
	{
	  excess -= extra_bytes;
	  first_heap->bloc_start
859
	    = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
860

861
	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
862

863 864
	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
	    {
865
	      memmove (b->new_data, b->data, b->size);
866 867 868 869 870 871 872 873 874
	      *b->variable = b->data = b->new_data;
	    }
	}

      if ((char *)virtual_break_value + size < (char *)first_heap->start)
	{
	  /* We found an additional space below the first heap */
	  first_heap->start = (POINTER) ((char *)virtual_break_value + size);
	}
Jim Blandy's avatar
Jim Blandy committed
875 876
    }

877
  virtual_break_value = (POINTER) ((char *)address + size);
878
  break_value = (last_bloc
879 880
		 ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
881
  if (size < 0)
882
    relinquish ();
883

884
  return address;
Jim Blandy's avatar
Jim Blandy committed
885 886
}

887

Jim Blandy's avatar
Jim Blandy committed
888 889
/* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
   the data is returned in *PTR.  PTR is thus the address of some variable
890 891
   which will use the data area.

892
   The allocation of 0 bytes is valid.
Juanma Barranquero's avatar
Juanma Barranquero committed
893 894
   In case r_alloc_freeze_level is set, a best fit of unused blocs could be
   done before allocating a new area.  Not yet done.
895

896 897
   If we can't allocate the necessary memory, set *PTR to zero, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
898 899

POINTER
900
r_alloc (POINTER *ptr, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
901 902 903
{
  register bloc_ptr new_bloc;

904 905 906
  if (! r_alloc_initialized)
    r_alloc_init ();

907
  new_bloc = get_bloc (MEM_ROUNDUP (size));
908 909 910 911 912 913 914
  if (new_bloc)
    {
      new_bloc->variable = ptr;
      *ptr = new_bloc->data;
    }
  else
    *ptr = 0;
Jim Blandy's avatar
Jim Blandy committed
915 916 917 918

  return *ptr;
}

919 920
/* Free a bloc of relocatable storage whose data is pointed to by PTR.
   Store 0 in *PTR to show there's no block allocated.  */
Jim Blandy's avatar
Jim Blandy committed
921 922

void
923
r_alloc_free (register POINTER *ptr)
Jim Blandy's avatar
Jim Blandy committed
924 925 926
{
  register bloc_ptr dead_bloc;

927 928 929
  if (! r_alloc_initialized)
    r_alloc_init ();

Jim Blandy's avatar
Jim Blandy committed
930 931
  dead_bloc = find_bloc (ptr);
  if (dead_bloc == NIL_BLOC)
932
    abort (); /* Double free? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
933 934

  free_bloc (dead_bloc);
935
  *ptr = 0;
936

937
#ifdef emacs
938
  refill_memory_reserve ();
939
#endif
Jim Blandy's avatar
Jim Blandy committed
940 941
}

942
/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
943 944 945
   Do this by shifting all blocks above this one up in memory, unless
   SIZE is less than or equal to the current bloc size, in which case
   do nothing.
Jim Blandy's avatar
Jim Blandy committed
946

Juanma Barranquero's avatar
Juanma Barranquero committed
947
   In case r_alloc_freeze_level is set, a new bloc is allocated, and the
Karl Heuer's avatar
Karl Heuer committed
948
   memory copied to it.  Not very efficient.  We could traverse the
949 950
   bloc_list for a best fit of free blocs first.

951 952 953 954
   Change *PTR to reflect the new bloc, and return this value.

   If more memory cannot be allocated, then leave *PTR unchanged, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
955 956

POINTER
957
r_re_alloc (POINTER *ptr, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
958
{
959
  register bloc_ptr bloc;
Jim Blandy's avatar
Jim Blandy committed
960

961 962 963
  if (! r_alloc_initialized)
    r_alloc_init ();

964 965
  if (!*ptr)
    return r_alloc (ptr, size);
966
  if (!size)
967 968 969 970 971
    {
      r_alloc_free (ptr);
      return r_alloc (ptr, 0);
    }

972 973
  bloc = find_bloc (ptr);
  if (bloc == NIL_BLOC)
974
    abort (); /* Already freed? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
975

976
  if (size < bloc->size)
977 978 979
    {
      /* Wouldn't it be useful to actually resize the bloc here?  */
      /* I think so too, but not if it's too expensive...  */
980 981
      if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
          && r_alloc_freeze_level == 0)
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	{
	  resize_bloc (bloc, MEM_ROUNDUP (size));
	  /* Never mind if this fails, just do nothing...  */
	  /* It *should* be infallible!  */
	}
    }
  else if (size > bloc->size)
    {
      if (r_alloc_freeze_level)
	{
	  bloc_ptr new_bloc;
	  new_bloc = get_bloc (MEM_ROUNDUP (size));
	  if (new_bloc)
	    {
	      new_bloc->variable = ptr;
	      *ptr = new_bloc->data;
	      bloc->variable = (POINTER *) NIL;
	    }
          else
	    return NIL;
	}
1003
      else
1004 1005 1006 1007 1008
	{
	  if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
	    return NIL;
        }
    }
Jim Blandy's avatar
Jim Blandy committed
1009 1010
  return *ptr;
}
1011 1012 1013 1014 1015

/* Disable relocations, after making room for at least SIZE bytes
   of non-relocatable heap if possible.  The relocatable blocs are
   guaranteed to hold still until thawed, even if this means that
   malloc must return a null pointer.  */
1016

1017
void
1018
r_alloc_freeze (long int size)
1019
{
1020 1021 1022
  if (! r_alloc_initialized)
    r_alloc_init ();

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
  /* If already frozen, we can't make any more room, so don't try.  */
  if (r_alloc_freeze_level > 0)
    size = 0;
  /* If we can't get the amount requested, half is better than nothing.  */
  while (size > 0 && r_alloc_sbrk (size) == 0)
    size /= 2;
  ++r_alloc_freeze_level;
  if (size > 0)
    r_alloc_sbrk (-size);
}

void
1035
r_alloc_thaw (void)
1036
{
1037

1038
  if (! r_alloc_initialized)
1039 1040
    r_alloc_init ();

1041 1042
  if (--r_alloc_freeze_level < 0)
    abort ();
1043

1044
  /* This frees all unused blocs.  It is not too inefficient, as the resize
1045
     and memcpy is done only once.  Afterwards, all unreferenced blocs are
1046
     already shrunk to zero size.  */
1047
  if (!r_alloc_freeze_level)
1048 1049
    {
      bloc_ptr *b = &first_bloc;
1050 1051 1052 1053
      while (*b)
	if (!(*b)->variable)
	  free_bloc (*b);
	else
1054 1055
	  b = &(*b)->next;
    }
1056
}
1057

1058 1059 1060 1061 1062 1063

#if defined (emacs) && defined (DOUG_LEA_MALLOC)

/* Reinitialize the morecore hook variables after restarting a dumped
   Emacs.  This is needed when using Doug Lea's malloc from GNU libc.  */
void
1064
r_alloc_reinit (void)
1065 1066 1067 1068 1069 1070 1071 1072 1073
{
  /* Only do this if the hook has been reset, so that we don't get an
     infinite loop, in case Emacs was linked statically.  */
  if (__morecore != r_alloc_sbrk)
    {
      real_morecore = __morecore;
      __morecore = r_alloc_sbrk;
    }
}
1074 1075

#endif /* emacs && DOUG_LEA_MALLOC */
1076

1077
#ifdef DEBUG
1078

1079 1080
#include <assert.h>

1081
void