ralloc.c 32.7 KB
Newer Older
1
/* Block-relocating memory allocator.
2
   Copyright (C) 1993, 1995, 2000-2012  Free Software Foundation, Inc.
Jim Blandy's avatar
Jim Blandy committed
3 4 5

This file is part of GNU Emacs.

6
GNU Emacs is free software: you can redistribute it and/or modify
Jim Blandy's avatar
Jim Blandy committed
7
it under the terms of the GNU General Public License as published by
8 9
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Jim Blandy's avatar
Jim Blandy committed
10 11 12 13 14 15 16

GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
17
along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.  */
Jim Blandy's avatar
Jim Blandy committed
18 19 20

/* NOTES:

21
   Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
Jim Blandy's avatar
Jim Blandy committed
22
   rather than all of them.  This means allowing for a possible
23
   hole between the first bloc and the end of malloc storage.  */
Jim Blandy's avatar
Jim Blandy committed
24

25
#ifdef emacs
26

27
#include <config.h>
28

Jim Blandy's avatar
Jim Blandy committed
29
#include "lisp.h"		/* Needed for VALBITS.  */
Jan Djärv's avatar
Jan Djärv committed
30
#include "blockinput.h"
31

Dave Love's avatar
Dave Love committed
32
#include <unistd.h>
33

34
#ifdef DOUG_LEA_MALLOC
35
#define M_TOP_PAD           -2
36
extern int mallopt (int, int);
37
#else /* not DOUG_LEA_MALLOC */
38
#ifndef SYSTEM_MALLOC
39
extern size_t __malloc_extra_blocks;
40
#endif /* SYSTEM_MALLOC */
41
#endif /* not DOUG_LEA_MALLOC */
42

43
#else /* not emacs */
44

45
#include <stddef.h>
46 47 48 49

#include <unistd.h>
#include <malloc.h>

50
#endif	/* not emacs */
51

52

53
#include "getpagesize.h"
Jim Blandy's avatar
Jim Blandy committed
54

Paul Eggert's avatar
Paul Eggert committed
55 56
typedef size_t SIZE;
typedef void *POINTER;
Jim Blandy's avatar
Jim Blandy committed
57 58
#define NIL ((POINTER) 0)

59 60 61 62 63
/* A flag to indicate whether we have initialized ralloc yet.  For
   Emacs's sake, please do not make this local to malloc_init; on some
   machines, the dumping procedure makes all static variables
   read-only.  On these machines, the word static is #defined to be
   the empty string, meaning that r_alloc_initialized becomes an
64 65 66
   automatic variable, and loses its value each time Emacs is started
   up.  */

67 68
static int r_alloc_initialized = 0;

69
static void r_alloc_init (void);
70

Jim Blandy's avatar
Jim Blandy committed
71

Jim Blandy's avatar
Jim Blandy committed
72 73
/* Declarations for working with the malloc, ralloc, and system breaks.  */

74
/* Function to set the real break value.  */
75
POINTER (*real_morecore) (ptrdiff_t);
Jim Blandy's avatar
Jim Blandy committed
76

77
/* The break value, as seen by malloc.  */
Jim Blandy's avatar
Jim Blandy committed
78 79
static POINTER virtual_break_value;

80 81
/* The address of the end of the last data in use by ralloc,
   including relocatable blocs as well as malloc data.  */
Jim Blandy's avatar
Jim Blandy committed
82 83
static POINTER break_value;

84 85 86
/* This is the size of a page.  We round memory requests to this boundary.  */
static int page_size;

87
/* Whenever we get memory from the system, get this many extra bytes.  This
88
   must be a multiple of page_size.  */
89 90
static int extra_bytes;

Jim Blandy's avatar
Jim Blandy committed
91
/* Macros for rounding.  Note that rounding to any value is possible
92
   by changing the definition of PAGE.  */
Jim Blandy's avatar
Jim Blandy committed
93
#define PAGE (getpagesize ())
94
#define ROUNDUP(size) (((size_t) (size) + page_size - 1) \
95
		       & ~((size_t)(page_size - 1)))
96

Juanma Barranquero's avatar
Juanma Barranquero committed
97
#define MEM_ALIGN sizeof (double)
98
#define MEM_ROUNDUP(addr) (((size_t)(addr) + MEM_ALIGN - 1) \
99
			   & ~(MEM_ALIGN - 1))
100

101 102 103 104
/* The hook `malloc' uses for the function which gets more space
   from the system.  */

#ifndef SYSTEM_MALLOC
105
extern POINTER (*__morecore) (ptrdiff_t);
106 107 108
#endif


109

110 111 112 113
/***********************************************************************
		      Implementation using sbrk
 ***********************************************************************/

114 115 116 117 118 119 120 121 122 123 124 125
/* Data structures of heaps and blocs.  */

/* The relocatable objects, or blocs, and the malloc data
   both reside within one or more heaps.
   Each heap contains malloc data, running from `start' to `bloc_start',
   and relocatable objects, running from `bloc_start' to `free'.

   Relocatable objects may relocate within the same heap
   or may move into another heap; the heaps themselves may grow
   but they never move.

   We try to make just one heap and make it larger as necessary.
Karl Heuer's avatar
Karl Heuer committed
126
   But sometimes we can't do that, because we can't get contiguous
127
   space to add onto the heap.  When that happens, we start a new heap.  */
128

129 130 131 132
typedef struct heap
{
  struct heap *next;
  struct heap *prev;
133
  /* Start of memory range of this heap.  */
134
  POINTER start;
135
  /* End of memory range of this heap.  */
136
  POINTER end;
137 138 139 140
  /* Start of relocatable data in this heap.  */
  POINTER bloc_start;
  /* Start of unused space in this heap.  */
  POINTER free;
141 142 143 144
  /* First bloc in this heap.  */
  struct bp *first_bloc;
  /* Last bloc in this heap.  */
  struct bp *last_bloc;
145 146 147 148
} *heap_ptr;

#define NIL_HEAP ((heap_ptr) 0)

149 150 151 152 153 154
/* This is the first heap object.
   If we need additional heap objects, each one resides at the beginning of
   the space it covers.   */
static struct heap heap_base;

/* Head and tail of the list of heaps.  */
155 156 157 158 159
static heap_ptr first_heap, last_heap;

/* These structures are allocated in the malloc arena.
   The linked list is kept in order of increasing '.data' members.
   The data blocks abut each other; if b->next is non-nil, then
160
   b->data + b->size == b->next->data.
161 162

   An element with variable==NIL denotes a freed block, which has not yet
Juanma Barranquero's avatar
Juanma Barranquero committed
163 164 165
   been collected.  They may only appear while r_alloc_freeze_level > 0,
   and will be freed when the arena is thawed.  Currently, these blocs are
   not reusable, while the arena is frozen.  Very inefficient.  */
166

167 168 169 170 171 172 173
typedef struct bp
{
  struct bp *next;
  struct bp *prev;
  POINTER *variable;
  POINTER data;
  SIZE size;
Karl Heuer's avatar
Karl Heuer committed
174
  POINTER new_data;		/* temporarily used for relocation */
175
  struct heap *heap; 		/* Heap this bloc is in.  */
176 177 178 179 180
} *bloc_ptr;

#define NIL_BLOC ((bloc_ptr) 0)
#define BLOC_PTR_SIZE (sizeof (struct bp))

181
/* Head and tail of the list of relocatable blocs.  */
182 183
static bloc_ptr first_bloc, last_bloc;

184 185 186 187 188
static int use_relocatable_buffers;

/* If >0, no relocation whatsoever takes place.  */
static int r_alloc_freeze_level;

Jim Blandy's avatar
Jim Blandy committed
189

Jim Blandy's avatar
Jim Blandy committed
190 191
/* Functions to get and return memory from the system.  */

192 193 194
/* Find the heap that ADDRESS falls within.  */

static heap_ptr
195
find_heap (POINTER address)
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
{
  heap_ptr heap;

  for (heap = last_heap; heap; heap = heap->prev)
    {
      if (heap->start <= address && address <= heap->end)
	return heap;
    }

  return NIL_HEAP;
}

/* Find SIZE bytes of space in a heap.
   Try to get them at ADDRESS (which must fall within some heap's range)
   if we can get that many within one heap.

212
   If enough space is not presently available in our reserve, this means
Karl Heuer's avatar
Karl Heuer committed
213 214
   getting more page-aligned space from the system.  If the returned space
   is not contiguous to the last heap, allocate a new heap, and append it
Juanma Barranquero's avatar
Juanma Barranquero committed
215
   to the heap list.
216

Juanma Barranquero's avatar
Juanma Barranquero committed
217 218 219 220 221
   obtain does not try to keep track of whether space is in use or not
   in use.  It just returns the address of SIZE bytes that fall within a
   single heap.  If you call obtain twice in a row with the same arguments,
   you typically get the same value.  It's the caller's responsibility to
   keep track of what space is in use.
Jim Blandy's avatar
Jim Blandy committed
222

223 224
   Return the address of the space if all went well, or zero if we couldn't
   allocate the memory.  */
225

226
static POINTER
227
obtain (POINTER address, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
228
{
229 230
  heap_ptr heap;
  SIZE already_available;
Jim Blandy's avatar
Jim Blandy committed
231

232
  /* Find the heap that ADDRESS falls within.  */
233
  for (heap = last_heap; heap; heap = heap->prev)
Jim Blandy's avatar
Jim Blandy committed
234
    {
235 236 237
      if (heap->start <= address && address <= heap->end)
	break;
    }
Jim Blandy's avatar
Jim Blandy committed
238

239
  if (! heap)
240
    emacs_abort ();
Jim Blandy's avatar
Jim Blandy committed
241

242 243
  /* If we can't fit SIZE bytes in that heap,
     try successive later heaps.  */
244
  while (heap && (char *) address + size > (char *) heap->end)
245 246 247 248 249
    {
      heap = heap->next;
      if (heap == NIL_HEAP)
	break;
      address = heap->bloc_start;
Jim Blandy's avatar
Jim Blandy committed
250 251
    }

252 253
  /* If we can't fit them within any existing heap,
     get more space.  */
254 255 256 257
  if (heap == NIL_HEAP)
    {
      POINTER new = (*real_morecore)(0);
      SIZE get;
258

259
      already_available = (char *)last_heap->end - (char *)address;
Jim Blandy's avatar
Jim Blandy committed
260

261 262
      if (new != last_heap->end)
	{
263 264 265 266
	  /* Someone else called sbrk.  Make a new heap.  */

	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
	  POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
267

268
	  if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
269 270 271 272 273
	    return 0;

	  new_heap->start = new;
	  new_heap->end = bloc_start;
	  new_heap->bloc_start = bloc_start;
274
	  new_heap->free = bloc_start;
275 276
	  new_heap->next = NIL_HEAP;
	  new_heap->prev = last_heap;
277 278
	  new_heap->first_bloc = NIL_BLOC;
	  new_heap->last_bloc = NIL_BLOC;
279 280 281 282 283 284
	  last_heap->next = new_heap;
	  last_heap = new_heap;

	  address = bloc_start;
	  already_available = 0;
	}
Jim Blandy's avatar
Jim Blandy committed
285

286 287 288
      /* Add space to the last heap (which we may have just created).
	 Get some extra, so we can come here less often.  */

289
      get = size + extra_bytes - already_available;
290
      get = (char *) ROUNDUP ((char *)last_heap->end + get)
291
	- (char *) last_heap->end;
Jim Blandy's avatar
Jim Blandy committed
292

293 294 295
      if ((*real_morecore) (get) != last_heap->end)
	return 0;

296
      last_heap->end = (char *) last_heap->end + get;
297 298 299 300
    }

  return address;
}
Jim Blandy's avatar
Jim Blandy committed
301

302 303 304 305 306
/* Return unused heap space to the system
   if there is a lot of unused space now.
   This can make the last heap smaller;
   it can also eliminate the last heap entirely.  */

Jim Blandy's avatar
Jim Blandy committed
307
static void
308
relinquish (void)
Jim Blandy's avatar
Jim Blandy committed
309
{
310
  register heap_ptr h;
311
  ptrdiff_t excess = 0;
312

313 314 315
  /* Add the amount of space beyond break_value
     in all heaps which have extend beyond break_value at all.  */

316 317 318 319 320 321 322
  for (h = last_heap; h && break_value < h->end; h = h->prev)
    {
      excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
					    ? h->bloc_start : break_value);
    }

  if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
Jim Blandy's avatar
Jim Blandy committed
323
    {
324 325
      /* Keep extra_bytes worth of empty space.
	 And don't free anything unless we can free at least extra_bytes.  */
326
      excess -= extra_bytes;
Jim Blandy's avatar
Jim Blandy committed
327

328 329
      if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
	{
330 331 332
	  /* This heap should have no blocs in it.  */
	  if (last_heap->first_bloc != NIL_BLOC
	      || last_heap->last_bloc != NIL_BLOC)
333
	    emacs_abort ();
334

335
	  /* Return the last heap, with its header, to the system.  */
336 337 338 339 340 341 342
	  excess = (char *)last_heap->end - (char *)last_heap->start;
	  last_heap = last_heap->prev;
	  last_heap->next = NIL_HEAP;
	}
      else
	{
	  excess = (char *) last_heap->end
343
			- (char *) ROUNDUP ((char *)last_heap->end - excess);
344
	  last_heap->end = (char *) last_heap->end - excess;
345
	}
Jim Blandy's avatar
Jim Blandy committed
346

347
      if ((*real_morecore) (- excess) == 0)
348 349 350 351
	{
	  /* If the system didn't want that much memory back, adjust
             the end of the last heap to reflect that.  This can occur
             if break_value is still within the original data segment.  */
352
	  last_heap->end = (char *) last_heap->end + excess;
353 354 355 356 357
	  /* Make sure that the result of the adjustment is accurate.
             It should be, for the else clause above; the other case,
             which returns the entire last heap to the system, seems
             unlikely to trigger this mode of failure.  */
	  if (last_heap->end != (*real_morecore) (0))
358
	    emacs_abort ();
359
	}
360
    }
Jim Blandy's avatar
Jim Blandy committed
361 362
}

Jim Blandy's avatar
Jim Blandy committed
363 364 365
/* The meat - allocating, freeing, and relocating blocs.  */

/* Find the bloc referenced by the address in PTR.  Returns a pointer
366
   to that block.  */
Jim Blandy's avatar
Jim Blandy committed
367 368

static bloc_ptr
369
find_bloc (POINTER *ptr)
Jim Blandy's avatar
Jim Blandy committed
370 371 372 373 374
{
  register bloc_ptr p = first_bloc;

  while (p != NIL_BLOC)
    {
375
      /* Consistency check. Don't return inconsistent blocs.
Juanma Barranquero's avatar
Juanma Barranquero committed
376
	 Don't abort here, as callers might be expecting this, but
377 378 379
	 callers that always expect a bloc to be returned should abort
	 if one isn't to avoid a memory corruption bug that is
	 difficult to track down.  */
Jim Blandy's avatar
Jim Blandy committed
380 381 382 383 384 385 386 387 388 389
      if (p->variable == ptr && p->data == *ptr)
	return p;

      p = p->next;
    }

  return p;
}

/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
390 391
   Returns a pointer to the new bloc, or zero if we couldn't allocate
   memory for the new block.  */
Jim Blandy's avatar
Jim Blandy committed
392 393

static bloc_ptr
394
get_bloc (SIZE size)
Jim Blandy's avatar
Jim Blandy committed
395
{
396
  register bloc_ptr new_bloc;
397
  register heap_ptr heap;
398

399
  if (! (new_bloc = malloc (BLOC_PTR_SIZE))
400
      || ! (new_bloc->data = obtain (break_value, size)))
401
    {
402
      free (new_bloc);
403 404 405

      return 0;
    }
Jim Blandy's avatar
Jim Blandy committed
406

407
  break_value = (char *) new_bloc->data + size;
408

Jim Blandy's avatar
Jim Blandy committed
409 410
  new_bloc->size = size;
  new_bloc->next = NIL_BLOC;
411
  new_bloc->variable = (POINTER *) NIL;
412
  new_bloc->new_data = 0;
Jim Blandy's avatar
Jim Blandy committed
413

414 415 416 417
  /* Record in the heap that this space is in use.  */
  heap = find_heap (new_bloc->data);
  heap->free = break_value;

418 419 420 421 422 423
  /* Maintain the correspondence between heaps and blocs.  */
  new_bloc->heap = heap;
  heap->last_bloc = new_bloc;
  if (heap->first_bloc == NIL_BLOC)
    heap->first_bloc = new_bloc;

424
  /* Put this bloc on the doubly-linked list of blocs.  */
Jim Blandy's avatar
Jim Blandy committed
425 426 427 428 429 430 431 432 433 434 435 436 437 438
  if (first_bloc)
    {
      new_bloc->prev = last_bloc;
      last_bloc->next = new_bloc;
      last_bloc = new_bloc;
    }
  else
    {
      first_bloc = last_bloc = new_bloc;
      new_bloc->prev = NIL_BLOC;
    }

  return new_bloc;
}
439

440 441 442
/* Calculate new locations of blocs in the list beginning with BLOC,
   relocating it to start at ADDRESS, in heap HEAP.  If enough space is
   not presently available in our reserve, call obtain for
443 444
   more space.

445 446
   Store the new location of each bloc in its new_data field.
   Do not touch the contents of blocs or break_value.  */
Jim Blandy's avatar
Jim Blandy committed
447

448
static int
449
relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
450 451
{
  register bloc_ptr b = bloc;
452

453
  /* No need to ever call this if arena is frozen, bug somewhere!  */
454
  if (r_alloc_freeze_level)
455
    emacs_abort ();
456

457 458
  while (b)
    {
459 460
      /* If bloc B won't fit within HEAP,
	 move to the next heap and try again.  */
461
      while (heap && (char *) address + b->size > (char *) heap->end)
462 463 464 465 466 467
	{
	  heap = heap->next;
	  if (heap == NIL_HEAP)
	    break;
	  address = heap->bloc_start;
	}
Jim Blandy's avatar
Jim Blandy committed
468

469 470
      /* If BLOC won't fit in any heap,
	 get enough new space to hold BLOC and all following blocs.  */
471 472 473 474 475
      if (heap == NIL_HEAP)
	{
	  register bloc_ptr tb = b;
	  register SIZE s = 0;

476
	  /* Add up the size of all the following blocs.  */
477 478
	  while (tb != NIL_BLOC)
	    {
479
	      if (tb->variable)
480 481
		s += tb->size;

482 483 484
	      tb = tb->next;
	    }

485 486 487
	  /* Get that space.  */
	  address = obtain (address, s);
	  if (address == 0)
488 489 490 491 492
	    return 0;

	  heap = last_heap;
	}

493 494
      /* Record the new address of this bloc
	 and update where the next bloc can start.  */
495
      b->new_data = address;
496
      if (b->variable)
497
	address = (char *) address + b->size;
498 499 500 501 502
      b = b->next;
    }

  return 1;
}
503 504 505 506 507

/* Update the records of which heaps contain which blocs, starting
   with heap HEAP and bloc BLOC.  */

static void
508
update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
509 510 511
{
  register bloc_ptr b;

512 513 514 515 516
  /* Initialize HEAP's status to reflect blocs before BLOC.  */
  if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
    {
      /* The previous bloc is in HEAP.  */
      heap->last_bloc = bloc->prev;
517
      heap->free = (char *) bloc->prev->data + bloc->prev->size;
518 519 520 521 522 523 524 525 526
    }
  else
    {
      /* HEAP contains no blocs before BLOC.  */
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
      heap->free = heap->bloc_start;
    }

527 528 529
  /* Advance through blocs one by one.  */
  for (b = bloc; b != NIL_BLOC; b = b->next)
    {
530 531
      /* Advance through heaps, marking them empty,
	 till we get to the one that B is in.  */
532 533 534 535 536
      while (heap)
	{
	  if (heap->bloc_start <= b->data && b->data <= heap->end)
	    break;
	  heap = heap->next;
537 538 539 540
	  /* We know HEAP is not null now,
	     because there has to be space for bloc B.  */
	  heap->first_bloc = NIL_BLOC;
	  heap->last_bloc = NIL_BLOC;
541 542
	  heap->free = heap->bloc_start;
	}
543 544

      /* Update HEAP's status for bloc B.  */
545
      heap->free = (char *) b->data + b->size;
546 547 548 549 550 551
      heap->last_bloc = b;
      if (heap->first_bloc == NIL_BLOC)
	heap->first_bloc = b;

      /* Record that B is in HEAP.  */
      b->heap = heap;
552 553 554
    }

  /* If there are any remaining heaps and no blocs left,
555
     mark those heaps as empty.  */
556 557 558
  heap = heap->next;
  while (heap)
    {
559 560
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
561 562 563 564
      heap->free = heap->bloc_start;
      heap = heap->next;
    }
}
565

566 567 568
/* Resize BLOC to SIZE bytes.  This relocates the blocs
   that come after BLOC in memory.  */

569
static int
570
resize_bloc (bloc_ptr bloc, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
571
{
572 573 574 575 576
  register bloc_ptr b;
  heap_ptr heap;
  POINTER address;
  SIZE old_size;

577
  /* No need to ever call this if arena is frozen, bug somewhere!  */
578
  if (r_alloc_freeze_level)
579
    emacs_abort ();
580

581 582 583 584 585 586 587 588 589 590
  if (bloc == NIL_BLOC || size == bloc->size)
    return 1;

  for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
    {
      if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
	break;
    }

  if (heap == NIL_HEAP)
591
    emacs_abort ();
592 593 594 595

  old_size = bloc->size;
  bloc->size = size;

596
  /* Note that bloc could be moved into the previous heap.  */
597 598
  address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
	     : (char *) first_heap->bloc_start);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
  while (heap)
    {
      if (heap->bloc_start <= address && address <= heap->end)
	break;
      heap = heap->prev;
    }

  if (! relocate_blocs (bloc, heap, address))
    {
      bloc->size = old_size;
      return 0;
    }

  if (size > old_size)
    {
      for (b = last_bloc; b != bloc; b = b->prev)
	{
616 617 618 619
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
620 621
            }
	  else
622
	    {
623 624
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
625 626 627 628 629 630 631 632 633 634
	      *b->variable = b->data = b->new_data;
            }
	}
      if (!bloc->variable)
	{
	  bloc->size = 0;
	  bloc->data = bloc->new_data;
	}
      else
	{
635 636
	  if (bloc->new_data != bloc->data)
	    memmove (bloc->new_data, bloc->data, old_size);
637
	  memset ((char *) bloc->new_data + old_size, 0, size - old_size);
638
	  *bloc->variable = bloc->data = bloc->new_data;
639 640 641
	}
    }
  else
Jim Blandy's avatar
Jim Blandy committed
642
    {
643 644
      for (b = bloc; b != NIL_BLOC; b = b->next)
	{
645 646 647 648
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
649 650
            }
	  else
651
	    {
652 653
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
654 655
	      *b->variable = b->data = b->new_data;
	    }
656 657
	}
    }
Jim Blandy's avatar
Jim Blandy committed
658

659
  update_heap_bloc_correspondence (bloc, heap);
660

661 662
  break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
663 664
  return 1;
}
665

666 667
/* Free BLOC from the chain of blocs, relocating any blocs above it.
   This may return space to the system.  */
Jim Blandy's avatar
Jim Blandy committed
668 669

static void
670
free_bloc (bloc_ptr bloc)
Jim Blandy's avatar
Jim Blandy committed
671
{
672
  heap_ptr heap = bloc->heap;
673
  heap_ptr h;
674

675 676 677 678 679
  if (r_alloc_freeze_level)
    {
      bloc->variable = (POINTER *) NIL;
      return;
    }
680

681 682
  resize_bloc (bloc, 0);

Jim Blandy's avatar
Jim Blandy committed
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
  if (bloc == first_bloc && bloc == last_bloc)
    {
      first_bloc = last_bloc = NIL_BLOC;
    }
  else if (bloc == last_bloc)
    {
      last_bloc = bloc->prev;
      last_bloc->next = NIL_BLOC;
    }
  else if (bloc == first_bloc)
    {
      first_bloc = bloc->next;
      first_bloc->prev = NIL_BLOC;
    }
  else
    {
      bloc->next->prev = bloc->prev;
      bloc->prev->next = bloc->next;
    }

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
  /* Sometimes, 'heap' obtained from bloc->heap above is not really a
     'heap' structure.  It can even be beyond the current break point,
     which will cause crashes when we dereference it below (see
     bug#12242).  Evidently, the reason is bloc allocations done while
     use_relocatable_buffers was non-positive, because additional
     memory we get then is not recorded in the heaps we manage.  If
     bloc->heap records such a "heap", we cannot (and don't need to)
     update its records.  So we validate the 'heap' value by making
     sure it is one of the heaps we manage via the heaps linked list,
     and don't touch a 'heap' that isn't found there.  This avoids
     accessing memory we know nothing about.  */
  for (h = first_heap; h != NIL_HEAP; h = h->next)
    if (heap == h)
      break;

  if (h)
719
    {
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
      /* Update the records of which blocs are in HEAP.  */
      if (heap->first_bloc == bloc)
	{
	  if (bloc->next != 0 && bloc->next->heap == heap)
	    heap->first_bloc = bloc->next;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
      if (heap->last_bloc == bloc)
	{
	  if (bloc->prev != 0 && bloc->prev->heap == heap)
	    heap->last_bloc = bloc->prev;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
735 736
    }

737
  relinquish ();
Jim Blandy's avatar
Jim Blandy committed
738 739 740
  free (bloc);
}

Jim Blandy's avatar
Jim Blandy committed
741 742
/* Interface routines.  */

743
/* Obtain SIZE bytes of storage from the free pool, or the system, as
744
   necessary.  If relocatable blocs are in use, this means relocating
745 746 747
   them.  This function gets plugged into the GNU malloc's __morecore
   hook.

748 749
   We provide hysteresis, never relocating by less than extra_bytes.

750 751 752
   If we're out of memory, we should return zero, to imitate the other
   __morecore hook values - in particular, __default_morecore in the
   GNU malloc package.  */
Jim Blandy's avatar
Jim Blandy committed
753

754
static POINTER
755
r_alloc_sbrk (ptrdiff_t size)
Jim Blandy's avatar
Jim Blandy committed
756
{
757 758
  register bloc_ptr b;
  POINTER address;
Jim Blandy's avatar
Jim Blandy committed
759

760 761 762
  if (! r_alloc_initialized)
    r_alloc_init ();

763
  if (use_relocatable_buffers <= 0)
Roland McGrath's avatar
Roland McGrath committed
764
    return (*real_morecore) (size);
Jim Blandy's avatar
Jim Blandy committed
765

766 767
  if (size == 0)
    return virtual_break_value;
768

769
  if (size > 0)
Jim Blandy's avatar
Jim Blandy committed
770
    {
771 772
      /* Allocate a page-aligned space.  GNU malloc would reclaim an
	 extra space if we passed an unaligned one.  But we could
Karl Heuer's avatar
Karl Heuer committed
773
	 not always find a space which is contiguous to the previous.  */
774 775
      POINTER new_bloc_start;
      heap_ptr h = first_heap;
776
      SIZE get = ROUNDUP (size);
777

778
      address = (POINTER) ROUNDUP (virtual_break_value);
779

780 781
      /* Search the list upward for a heap which is large enough.  */
      while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
782 783 784 785
	{
	  h = h->next;
	  if (h == NIL_HEAP)
	    break;
786
	  address = (POINTER) ROUNDUP (h->start);
787 788
	}

789
      /* If not found, obtain more space.  */
790 791 792 793
      if (h == NIL_HEAP)
	{
	  get += extra_bytes + page_size;

794
	  if (! obtain (address, get))
795
	    return 0;
796

797
	  if (first_heap == last_heap)
798
	    address = (POINTER) ROUNDUP (virtual_break_value);
799
	  else
800
	    address = (POINTER) ROUNDUP (last_heap->start);
801 802 803
	  h = last_heap;
	}

804
      new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
805 806 807

      if (first_heap->bloc_start < new_bloc_start)
	{
808
	  /* This is no clean solution - no idea how to do it better.  */
809
	  if (r_alloc_freeze_level)
810 811 812 813 814 815
	    return NIL;

	  /* There is a bug here: if the above obtain call succeeded, but the
	     relocate_blocs call below does not succeed, we need to free
	     the memory that we got with obtain.  */

816
	  /* Move all blocs upward.  */
817
	  if (! relocate_blocs (first_bloc, h, new_bloc_start))
818 819 820 821
	    return 0;

	  /* Note that (POINTER)(h+1) <= new_bloc_start since
	     get >= page_size, so the following does not destroy the heap
822
	     header.  */
823 824
	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
	    {
825 826
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
827 828 829 830
	      *b->variable = b->data = b->new_data;
	    }

	  h->bloc_start = new_bloc_start;
831

832
	  update_heap_bloc_correspondence (first_bloc, h);
833 834 835 836
	}
      if (h != first_heap)
	{
	  /* Give up managing heaps below the one the new
837
	     virtual_break_value points to.  */
838 839 840 841
	  first_heap->prev = NIL_HEAP;
	  first_heap->next = h->next;
	  first_heap->start = h->start;
	  first_heap->end = h->end;
842
	  first_heap->free = h->free;
843 844
	  first_heap->first_bloc = h->first_bloc;
	  first_heap->last_bloc = h->last_bloc;
845 846 847 848 849 850 851 852
	  first_heap->bloc_start = h->bloc_start;

	  if (first_heap->next)
	    first_heap->next->prev = first_heap;
	  else
	    last_heap = first_heap;
	}

853
      memset (address, 0, size);
Jim Blandy's avatar
Jim Blandy committed
854
    }
855
  else /* size < 0 */
Jim Blandy's avatar
Jim Blandy committed
856
    {
857 858 859 860 861 862 863 864 865
      SIZE excess = (char *)first_heap->bloc_start
		      - ((char *)virtual_break_value + size);

      address = virtual_break_value;

      if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
	{
	  excess -= extra_bytes;
	  first_heap->bloc_start
866
	    = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
867

868
	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
869

870 871
	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
	    {
872 873
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
874 875 876 877 878 879 880 881 882
	      *b->variable = b->data = b->new_data;
	    }
	}

      if ((char *)virtual_break_value + size < (char *)first_heap->start)
	{
	  /* We found an additional space below the first heap */
	  first_heap->start = (POINTER) ((char *)virtual_break_value + size);
	}
Jim Blandy's avatar
Jim Blandy committed
883 884
    }

885
  virtual_break_value = (POINTER) ((char *)address + size);
886
  break_value = (last_bloc
887 888
		 ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
889
  if (size < 0)
890
    relinquish ();
891

892
  return address;
Jim Blandy's avatar
Jim Blandy committed
893 894
}

895

Jim Blandy's avatar
Jim Blandy committed
896 897
/* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
   the data is returned in *PTR.  PTR is thus the address of some variable
898 899
   which will use the data area.

900
   The allocation of 0 bytes is valid.
Juanma Barranquero's avatar
Juanma Barranquero committed
901 902
   In case r_alloc_freeze_level is set, a best fit of unused blocs could be
   done before allocating a new area.  Not yet done.
903

904 905
   If we can't allocate the necessary memory, set *PTR to zero, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
906 907

POINTER
908
r_alloc (POINTER *ptr, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
909 910 911
{
  register bloc_ptr new_bloc;

912 913 914
  if (! r_alloc_initialized)
    r_alloc_init ();

915
  new_bloc = get_bloc (MEM_ROUNDUP (size));
916 917 918 919 920 921 922
  if (new_bloc)
    {
      new_bloc->variable = ptr;
      *ptr = new_bloc->data;
    }
  else
    *ptr = 0;
Jim Blandy's avatar
Jim Blandy committed
923 924 925 926

  return *ptr;
}

927 928
/* Free a bloc of relocatable storage whose data is pointed to by PTR.
   Store 0 in *PTR to show there's no block allocated.  */
Jim Blandy's avatar
Jim Blandy committed
929 930

void
931
r_alloc_free (register POINTER *ptr)
Jim Blandy's avatar
Jim Blandy committed
932 933 934
{
  register bloc_ptr dead_bloc;

935 936 937
  if (! r_alloc_initialized)
    r_alloc_init ();

Jim Blandy's avatar
Jim Blandy committed
938 939
  dead_bloc = find_bloc (ptr);
  if (dead_bloc == NIL_BLOC)
940
    emacs_abort (); /* Double free? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
941 942

  free_bloc (dead_bloc);
943
  *ptr = 0;
944

945
#ifdef emacs
946
  refill_memory_reserve ();
947
#endif
Jim Blandy's avatar
Jim Blandy committed
948 949
}

950
/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
951 952 953
   Do this by shifting all blocks above this one up in memory, unless
   SIZE is less than or equal to the current bloc size, in which case
   do nothing.
Jim Blandy's avatar
Jim Blandy committed
954

Juanma Barranquero's avatar
Juanma Barranquero committed
955
   In case r_alloc_freeze_level is set, a new bloc is allocated, and the
Karl Heuer's avatar
Karl Heuer committed
956
   memory copied to it.  Not very efficient.  We could traverse the
957 958
   bloc_list for a best fit of free blocs first.

959 960 961 962
   Change *PTR to reflect the new bloc, and return this value.

   If more memory cannot be allocated, then leave *PTR unchanged, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
963 964

POINTER
965
r_re_alloc (POINTER *ptr, SIZE size)
Jim Blandy's avatar
Jim Blandy committed
966
{
967
  register bloc_ptr bloc;
Jim Blandy's avatar
Jim Blandy committed
968

969 970 971
  if (! r_alloc_initialized)
    r_alloc_init ();

972 973
  if (!*ptr)
    return r_alloc (ptr, size);
974
  if (!size)
975 976 977 978 979
    {
      r_alloc_free (ptr);
      return r_alloc (ptr, 0);
    }

980 981
  bloc = find_bloc (ptr);
  if (bloc == NIL_BLOC)
982
    emacs_abort (); /* Already freed? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
983

984
  if (size < bloc->size)
985 986 987
    {
      /* Wouldn't it be useful to actually resize the bloc here?  */
      /* I think so too, but not if it's too expensive...  */
988 989
      if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
          && r_alloc_freeze_level == 0)
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	{
	  resize_bloc (bloc, MEM_ROUNDUP (size));
	  /* Never mind if this fails, just do nothing...  */
	  /* It *should* be infallible!  */
	}
    }
  else if (size > bloc->size)
    {
      if (r_alloc_freeze_level)
	{
	  bloc_ptr new_bloc;
	  new_bloc = get_bloc (MEM_ROUNDUP (size));
	  if (new_bloc)
	    {
	      new_bloc->variable = ptr;
	      *ptr = new_bloc->data;
	      bloc->variable = (POINTER *) NIL;
	    }
          else
	    return NIL;
	}
1011
      else
1012 1013 1014 1015 1016
	{
	  if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
	    return NIL;
        }
    }
Jim Blandy's avatar
Jim Blandy committed
1017 1018
  return *ptr;
}
1019

1020 1021 1022 1023 1024 1025

#if defined (emacs) && defined (DOUG_LEA_MALLOC)

/* Reinitialize the morecore hook variables after restarting a dumped
   Emacs.  This is needed when using Doug Lea's malloc from GNU libc.  */
void
1026
r_alloc_reinit (void)
1027 1028 1029 1030 1031 1032 1033 1034 1035
{
  /* Only do this if the hook has been reset, so that we don't get an
     infinite loop, in case Emacs was linked statically.  */
  if (__morecore != r_alloc_sbrk)
    {
      real_morecore = __morecore;
      __morecore = r_alloc_sbrk;
    }
}
1036 1037

#endif /* emacs && DOUG_LEA_MALLOC */
1038

1039
#ifdef DEBUG
1040

1041 1042
#include <assert.h>

1043
void
Andreas Schwab's avatar
Andreas Schwab committed
1044
r_alloc_check (void)
1045
{
Richard M. Stallman's avatar
Richard M. Stallman committed
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
  int found = 0;
  heap_ptr h, ph = 0;
  bloc_ptr b, pb = 0;

  if (!r_alloc_initialized)
    return;

  assert (first_heap);
  assert (last_heap->end <= (POINTER) sbrk (0));
  assert ((POINTER) first_heap < first_heap->start);
  assert (first_heap->start <= virtual_break_value);
  assert (virtual_break_value <= first_heap->end);

  for (h = first_heap; h; h = h->next)
    {
      assert (h->prev == ph);
      assert ((POINTER) ROUNDUP (h->end) == h->end);
1063 1064 1065
#if 0 /* ??? The code in ralloc.c does not really try to ensure
	 the heap start has any sort of alignment.
	 Perhaps it should.  */
Richard M. Stallman's avatar
Richard M. Stallman committed
1066
      assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1067
#endif
Richard M. Stallman's avatar
Richard M. Stallman committed
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
      assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
      assert (h->start <= h->bloc_start && h->bloc_start <= h->end);

      if (ph)
	{
	  assert (ph->end < h->start);
	  assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
	}

      if (h->bloc_start <= break_value && break_value <= h->end)
	found = 1;

      ph = h;
    }

  assert (found);
  assert (last_heap == ph);

  for (b = first_bloc; b; b = b->next)
    {
      assert (b->prev == pb);
      assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
      assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);