ralloc.c 32.3 KB
Newer Older
1
/* Block-relocating memory allocator.
Paul Eggert's avatar
Paul Eggert committed
2
   Copyright (C) 1993, 1995, 2000-2019 Free Software Foundation, Inc.
Jim Blandy's avatar
Jim Blandy committed
3 4 5

This file is part of GNU Emacs.

6
GNU Emacs is free software: you can redistribute it and/or modify
Jim Blandy's avatar
Jim Blandy committed
7
it under the terms of the GNU General Public License as published by
8 9
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
Jim Blandy's avatar
Jim Blandy committed
10 11 12 13 14 15 16

GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
17
along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
Jim Blandy's avatar
Jim Blandy committed
18 19 20

/* NOTES:

21
   Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
Jim Blandy's avatar
Jim Blandy committed
22
   rather than all of them.  This means allowing for a possible
23
   hole between the first bloc and the end of malloc storage.  */
Jim Blandy's avatar
Jim Blandy committed
24

25
#include <config.h>
26

27 28
#include <stddef.h>

29 30 31 32 33
#ifdef emacs
# include "lisp.h"
# include "blockinput.h"
# include <unistd.h>
#endif
34

35
#include "getpagesize.h"
Jim Blandy's avatar
Jim Blandy committed
36

37 38 39 40 41
/* A flag to indicate whether we have initialized ralloc yet.  For
   Emacs's sake, please do not make this local to malloc_init; on some
   machines, the dumping procedure makes all static variables
   read-only.  On these machines, the word static is #defined to be
   the empty string, meaning that r_alloc_initialized becomes an
42 43 44
   automatic variable, and loses its value each time Emacs is started
   up.  */

45 46
static int r_alloc_initialized = 0;

47
static void r_alloc_init (void);
48

Jim Blandy's avatar
Jim Blandy committed
49

Jim Blandy's avatar
Jim Blandy committed
50 51
/* Declarations for working with the malloc, ralloc, and system breaks.  */

52
/* Function to set the real break value.  */
Paul Eggert's avatar
Paul Eggert committed
53
void *(*real_morecore) (ptrdiff_t);
Jim Blandy's avatar
Jim Blandy committed
54

55
/* The break value, as seen by malloc.  */
Paul Eggert's avatar
Paul Eggert committed
56
static void *virtual_break_value;
Jim Blandy's avatar
Jim Blandy committed
57

58 59
/* The address of the end of the last data in use by ralloc,
   including relocatable blocs as well as malloc data.  */
Paul Eggert's avatar
Paul Eggert committed
60
static void *break_value;
Jim Blandy's avatar
Jim Blandy committed
61

62 63 64
/* This is the size of a page.  We round memory requests to this boundary.  */
static int page_size;

65
/* Whenever we get memory from the system, get this many extra bytes.  This
66
   must be a multiple of page_size.  */
67 68
static int extra_bytes;

Jim Blandy's avatar
Jim Blandy committed
69
/* Macros for rounding.  Note that rounding to any value is possible
70
   by changing the definition of PAGE.  */
Jim Blandy's avatar
Jim Blandy committed
71
#define PAGE (getpagesize ())
72
#define PAGE_ROUNDUP(size) (((size_t) (size) + page_size - 1) \
Paul Eggert's avatar
Paul Eggert committed
73
		       & ~((size_t) (page_size - 1)))
74

Juanma Barranquero's avatar
Juanma Barranquero committed
75
#define MEM_ALIGN sizeof (double)
Paul Eggert's avatar
Paul Eggert committed
76
#define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
77
			   & ~(MEM_ALIGN - 1))
78

79 80 81
/* The hook `malloc' uses for the function which gets more space
   from the system.  */

82 83
#ifdef HAVE_MALLOC_H
# include <malloc.h>
84 85
#endif
#ifndef DOUG_LEA_MALLOC
Paul Eggert's avatar
Paul Eggert committed
86
extern void *(*__morecore) (ptrdiff_t);
87 88 89
#endif


90

91 92 93 94
/***********************************************************************
		      Implementation using sbrk
 ***********************************************************************/

95 96 97 98 99 100 101 102 103 104 105 106
/* Data structures of heaps and blocs.  */

/* The relocatable objects, or blocs, and the malloc data
   both reside within one or more heaps.
   Each heap contains malloc data, running from `start' to `bloc_start',
   and relocatable objects, running from `bloc_start' to `free'.

   Relocatable objects may relocate within the same heap
   or may move into another heap; the heaps themselves may grow
   but they never move.

   We try to make just one heap and make it larger as necessary.
Karl Heuer's avatar
Karl Heuer committed
107
   But sometimes we can't do that, because we can't get contiguous
108
   space to add onto the heap.  When that happens, we start a new heap.  */
109

110 111 112 113
typedef struct heap
{
  struct heap *next;
  struct heap *prev;
114
  /* Start of memory range of this heap.  */
Paul Eggert's avatar
Paul Eggert committed
115
  void *start;
116
  /* End of memory range of this heap.  */
Paul Eggert's avatar
Paul Eggert committed
117
  void *end;
118
  /* Start of relocatable data in this heap.  */
Paul Eggert's avatar
Paul Eggert committed
119
  void *bloc_start;
120
  /* Start of unused space in this heap.  */
Paul Eggert's avatar
Paul Eggert committed
121
  void *free;
122 123 124 125
  /* First bloc in this heap.  */
  struct bp *first_bloc;
  /* Last bloc in this heap.  */
  struct bp *last_bloc;
126 127 128 129
} *heap_ptr;

#define NIL_HEAP ((heap_ptr) 0)

130 131 132 133 134 135
/* This is the first heap object.
   If we need additional heap objects, each one resides at the beginning of
   the space it covers.   */
static struct heap heap_base;

/* Head and tail of the list of heaps.  */
136 137 138 139 140
static heap_ptr first_heap, last_heap;

/* These structures are allocated in the malloc arena.
   The linked list is kept in order of increasing '.data' members.
   The data blocks abut each other; if b->next is non-nil, then
141
   b->data + b->size == b->next->data.
142

Paul Eggert's avatar
Paul Eggert committed
143
   An element with variable==NULL denotes a freed block, which has not yet
Juanma Barranquero's avatar
Juanma Barranquero committed
144 145 146
   been collected.  They may only appear while r_alloc_freeze_level > 0,
   and will be freed when the arena is thawed.  Currently, these blocs are
   not reusable, while the arena is frozen.  Very inefficient.  */
147

148 149 150 151
typedef struct bp
{
  struct bp *next;
  struct bp *prev;
Paul Eggert's avatar
Paul Eggert committed
152 153 154 155
  void **variable;
  void *data;
  size_t size;
  void *new_data;		/* temporarily used for relocation */
156
  struct heap *heap; 		/* Heap this bloc is in.  */
157 158 159 160 161
} *bloc_ptr;

#define NIL_BLOC ((bloc_ptr) 0)
#define BLOC_PTR_SIZE (sizeof (struct bp))

162
/* Head and tail of the list of relocatable blocs.  */
163 164
static bloc_ptr first_bloc, last_bloc;

165 166 167 168 169
static int use_relocatable_buffers;

/* If >0, no relocation whatsoever takes place.  */
static int r_alloc_freeze_level;

Jim Blandy's avatar
Jim Blandy committed
170

Jim Blandy's avatar
Jim Blandy committed
171 172
/* Functions to get and return memory from the system.  */

173 174 175
/* Find the heap that ADDRESS falls within.  */

static heap_ptr
Paul Eggert's avatar
Paul Eggert committed
176
find_heap (void *address)
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
{
  heap_ptr heap;

  for (heap = last_heap; heap; heap = heap->prev)
    {
      if (heap->start <= address && address <= heap->end)
	return heap;
    }

  return NIL_HEAP;
}

/* Find SIZE bytes of space in a heap.
   Try to get them at ADDRESS (which must fall within some heap's range)
   if we can get that many within one heap.

193
   If enough space is not presently available in our reserve, this means
Karl Heuer's avatar
Karl Heuer committed
194 195
   getting more page-aligned space from the system.  If the returned space
   is not contiguous to the last heap, allocate a new heap, and append it
Juanma Barranquero's avatar
Juanma Barranquero committed
196
   to the heap list.
197

Juanma Barranquero's avatar
Juanma Barranquero committed
198 199 200 201 202
   obtain does not try to keep track of whether space is in use or not
   in use.  It just returns the address of SIZE bytes that fall within a
   single heap.  If you call obtain twice in a row with the same arguments,
   you typically get the same value.  It's the caller's responsibility to
   keep track of what space is in use.
Jim Blandy's avatar
Jim Blandy committed
203

204 205
   Return the address of the space if all went well, or zero if we couldn't
   allocate the memory.  */
206

Paul Eggert's avatar
Paul Eggert committed
207 208
static void *
obtain (void *address, size_t size)
Jim Blandy's avatar
Jim Blandy committed
209
{
210
  heap_ptr heap;
Paul Eggert's avatar
Paul Eggert committed
211
  size_t already_available;
Jim Blandy's avatar
Jim Blandy committed
212

213
  /* Find the heap that ADDRESS falls within.  */
214
  for (heap = last_heap; heap; heap = heap->prev)
Jim Blandy's avatar
Jim Blandy committed
215
    {
216 217 218
      if (heap->start <= address && address <= heap->end)
	break;
    }
Jim Blandy's avatar
Jim Blandy committed
219

220
  if (! heap)
221
    emacs_abort ();
Jim Blandy's avatar
Jim Blandy committed
222

223 224
  /* If we can't fit SIZE bytes in that heap,
     try successive later heaps.  */
225
  while (heap && (char *) address + size > (char *) heap->end)
226 227 228 229 230
    {
      heap = heap->next;
      if (heap == NIL_HEAP)
	break;
      address = heap->bloc_start;
Jim Blandy's avatar
Jim Blandy committed
231 232
    }

233 234
  /* If we can't fit them within any existing heap,
     get more space.  */
235 236
  if (heap == NIL_HEAP)
    {
Paul Eggert's avatar
Paul Eggert committed
237 238
      void *new = real_morecore (0);
      size_t get;
239

Paul Eggert's avatar
Paul Eggert committed
240
      already_available = (char *) last_heap->end - (char *) address;
Jim Blandy's avatar
Jim Blandy committed
241

242 243
      if (new != last_heap->end)
	{
244 245 246
	  /* Someone else called sbrk.  Make a new heap.  */

	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
Paul Eggert's avatar
Paul Eggert committed
247
	  void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
248

Paul Eggert's avatar
Paul Eggert committed
249
	  if (real_morecore ((char *) bloc_start - (char *) new) != new)
250 251 252 253 254
	    return 0;

	  new_heap->start = new;
	  new_heap->end = bloc_start;
	  new_heap->bloc_start = bloc_start;
255
	  new_heap->free = bloc_start;
256 257
	  new_heap->next = NIL_HEAP;
	  new_heap->prev = last_heap;
258 259
	  new_heap->first_bloc = NIL_BLOC;
	  new_heap->last_bloc = NIL_BLOC;
260 261 262 263 264 265
	  last_heap->next = new_heap;
	  last_heap = new_heap;

	  address = bloc_start;
	  already_available = 0;
	}
Jim Blandy's avatar
Jim Blandy committed
266

267 268 269
      /* Add space to the last heap (which we may have just created).
	 Get some extra, so we can come here less often.  */

270
      get = size + extra_bytes - already_available;
271
      get = (char *) PAGE_ROUNDUP ((char *) last_heap->end + get)
272
	- (char *) last_heap->end;
Jim Blandy's avatar
Jim Blandy committed
273

Paul Eggert's avatar
Paul Eggert committed
274
      if (real_morecore (get) != last_heap->end)
275 276
	return 0;

277
      last_heap->end = (char *) last_heap->end + get;
278 279 280 281
    }

  return address;
}
Jim Blandy's avatar
Jim Blandy committed
282

283 284 285 286 287
/* Return unused heap space to the system
   if there is a lot of unused space now.
   This can make the last heap smaller;
   it can also eliminate the last heap entirely.  */

Jim Blandy's avatar
Jim Blandy committed
288
static void
289
relinquish (void)
Jim Blandy's avatar
Jim Blandy committed
290
{
291
  register heap_ptr h;
292
  ptrdiff_t excess = 0;
293

294 295 296
  /* Add the amount of space beyond break_value
     in all heaps which have extend beyond break_value at all.  */

297 298 299 300 301 302
  for (h = last_heap; h && break_value < h->end; h = h->prev)
    {
      excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
					    ? h->bloc_start : break_value);
    }

Paul Eggert's avatar
Paul Eggert committed
303
  if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
Jim Blandy's avatar
Jim Blandy committed
304
    {
305 306
      /* Keep extra_bytes worth of empty space.
	 And don't free anything unless we can free at least extra_bytes.  */
307
      excess -= extra_bytes;
Jim Blandy's avatar
Jim Blandy committed
308

Paul Eggert's avatar
Paul Eggert committed
309
      if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
310
	{
311 312
	  heap_ptr lh_prev;

313 314
	  /* This heap should have no blocs in it.  If it does, we
	     cannot return it to the system.  */
315 316
	  if (last_heap->first_bloc != NIL_BLOC
	      || last_heap->last_bloc != NIL_BLOC)
317
	    return;
318

319
	  /* Return the last heap, with its header, to the system.  */
Paul Eggert's avatar
Paul Eggert committed
320
	  excess = (char *) last_heap->end - (char *) last_heap->start;
321 322 323 324
	  lh_prev = last_heap->prev;
	  /* If the system doesn't want that much memory back, leave
	     last_heap unaltered to reflect that.  This can occur if
	     break_value is still within the original data segment.  */
Paul Eggert's avatar
Paul Eggert committed
325
	  if (real_morecore (- excess) != 0)
326 327 328 329
	    {
	      last_heap = lh_prev;
	      last_heap->next = NIL_HEAP;
	    }
330 331 332
	}
      else
	{
Paul Eggert's avatar
Paul Eggert committed
333
	  excess = ((char *) last_heap->end
334
		    - (char *) PAGE_ROUNDUP ((char *) last_heap->end - excess));
335 336 337 338
	  /* If the system doesn't want that much memory back, leave
	     the end of the last heap unchanged to reflect that.  This
	     can occur if break_value is still within the original
	     data segment.  */
Paul Eggert's avatar
Paul Eggert committed
339
	  if (real_morecore (- excess) != 0)
340
	    last_heap->end = (char *) last_heap->end - excess;
341
	}
342
    }
Jim Blandy's avatar
Jim Blandy committed
343 344
}

Jim Blandy's avatar
Jim Blandy committed
345 346 347
/* The meat - allocating, freeing, and relocating blocs.  */

/* Find the bloc referenced by the address in PTR.  Returns a pointer
348
   to that block.  */
Jim Blandy's avatar
Jim Blandy committed
349 350

static bloc_ptr
Paul Eggert's avatar
Paul Eggert committed
351
find_bloc (void **ptr)
Jim Blandy's avatar
Jim Blandy committed
352
{
Paul Eggert's avatar
Paul Eggert committed
353
  bloc_ptr p = first_bloc;
Jim Blandy's avatar
Jim Blandy committed
354 355 356

  while (p != NIL_BLOC)
    {
357
      /* Consistency check. Don't return inconsistent blocs.
Juanma Barranquero's avatar
Juanma Barranquero committed
358
	 Don't abort here, as callers might be expecting this, but
359 360 361
	 callers that always expect a bloc to be returned should abort
	 if one isn't to avoid a memory corruption bug that is
	 difficult to track down.  */
Jim Blandy's avatar
Jim Blandy committed
362 363 364 365 366 367 368 369 370 371
      if (p->variable == ptr && p->data == *ptr)
	return p;

      p = p->next;
    }

  return p;
}

/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
372 373
   Returns a pointer to the new bloc, or zero if we couldn't allocate
   memory for the new block.  */
Jim Blandy's avatar
Jim Blandy committed
374 375

static bloc_ptr
Paul Eggert's avatar
Paul Eggert committed
376
get_bloc (size_t size)
Jim Blandy's avatar
Jim Blandy committed
377
{
Paul Eggert's avatar
Paul Eggert committed
378 379
  bloc_ptr new_bloc;
  heap_ptr heap;
380

381
  if (! (new_bloc = malloc (BLOC_PTR_SIZE))
382
      || ! (new_bloc->data = obtain (break_value, size)))
383
    {
384
      free (new_bloc);
385 386 387

      return 0;
    }
Jim Blandy's avatar
Jim Blandy committed
388

389
  break_value = (char *) new_bloc->data + size;
390

Jim Blandy's avatar
Jim Blandy committed
391 392
  new_bloc->size = size;
  new_bloc->next = NIL_BLOC;
Paul Eggert's avatar
Paul Eggert committed
393
  new_bloc->variable = NULL;
394
  new_bloc->new_data = 0;
Jim Blandy's avatar
Jim Blandy committed
395

396 397 398 399
  /* Record in the heap that this space is in use.  */
  heap = find_heap (new_bloc->data);
  heap->free = break_value;

400 401 402 403 404 405
  /* Maintain the correspondence between heaps and blocs.  */
  new_bloc->heap = heap;
  heap->last_bloc = new_bloc;
  if (heap->first_bloc == NIL_BLOC)
    heap->first_bloc = new_bloc;

406
  /* Put this bloc on the doubly-linked list of blocs.  */
Jim Blandy's avatar
Jim Blandy committed
407 408 409 410 411 412 413 414 415 416 417 418 419 420
  if (first_bloc)
    {
      new_bloc->prev = last_bloc;
      last_bloc->next = new_bloc;
      last_bloc = new_bloc;
    }
  else
    {
      first_bloc = last_bloc = new_bloc;
      new_bloc->prev = NIL_BLOC;
    }

  return new_bloc;
}
421

422 423 424
/* Calculate new locations of blocs in the list beginning with BLOC,
   relocating it to start at ADDRESS, in heap HEAP.  If enough space is
   not presently available in our reserve, call obtain for
425 426
   more space.

427 428
   Store the new location of each bloc in its new_data field.
   Do not touch the contents of blocs or break_value.  */
Jim Blandy's avatar
Jim Blandy committed
429

430
static int
Paul Eggert's avatar
Paul Eggert committed
431
relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
432
{
Paul Eggert's avatar
Paul Eggert committed
433
  bloc_ptr b = bloc;
434

435
  /* No need to ever call this if arena is frozen, bug somewhere!  */
436
  if (r_alloc_freeze_level)
437
    emacs_abort ();
438

439 440
  while (b)
    {
441 442
      /* If bloc B won't fit within HEAP,
	 move to the next heap and try again.  */
443
      while (heap && (char *) address + b->size > (char *) heap->end)
444 445 446 447 448 449
	{
	  heap = heap->next;
	  if (heap == NIL_HEAP)
	    break;
	  address = heap->bloc_start;
	}
Jim Blandy's avatar
Jim Blandy committed
450

451 452
      /* If BLOC won't fit in any heap,
	 get enough new space to hold BLOC and all following blocs.  */
453 454
      if (heap == NIL_HEAP)
	{
Paul Eggert's avatar
Paul Eggert committed
455 456
	  bloc_ptr tb = b;
	  size_t s = 0;
457

458
	  /* Add up the size of all the following blocs.  */
459 460
	  while (tb != NIL_BLOC)
	    {
461
	      if (tb->variable)
462 463
		s += tb->size;

464 465 466
	      tb = tb->next;
	    }

467 468 469
	  /* Get that space.  */
	  address = obtain (address, s);
	  if (address == 0)
470 471 472 473 474
	    return 0;

	  heap = last_heap;
	}

475 476
      /* Record the new address of this bloc
	 and update where the next bloc can start.  */
477
      b->new_data = address;
478
      if (b->variable)
479
	address = (char *) address + b->size;
480 481 482 483 484
      b = b->next;
    }

  return 1;
}
485 486 487 488 489

/* Update the records of which heaps contain which blocs, starting
   with heap HEAP and bloc BLOC.  */

static void
490
update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
491 492 493
{
  register bloc_ptr b;

494 495 496 497 498
  /* Initialize HEAP's status to reflect blocs before BLOC.  */
  if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
    {
      /* The previous bloc is in HEAP.  */
      heap->last_bloc = bloc->prev;
499
      heap->free = (char *) bloc->prev->data + bloc->prev->size;
500 501 502 503 504 505 506 507 508
    }
  else
    {
      /* HEAP contains no blocs before BLOC.  */
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
      heap->free = heap->bloc_start;
    }

509 510 511
  /* Advance through blocs one by one.  */
  for (b = bloc; b != NIL_BLOC; b = b->next)
    {
512 513
      /* Advance through heaps, marking them empty,
	 till we get to the one that B is in.  */
514 515 516 517 518
      while (heap)
	{
	  if (heap->bloc_start <= b->data && b->data <= heap->end)
	    break;
	  heap = heap->next;
519 520 521 522
	  /* We know HEAP is not null now,
	     because there has to be space for bloc B.  */
	  heap->first_bloc = NIL_BLOC;
	  heap->last_bloc = NIL_BLOC;
523 524
	  heap->free = heap->bloc_start;
	}
525 526

      /* Update HEAP's status for bloc B.  */
527
      heap->free = (char *) b->data + b->size;
528 529 530 531 532 533
      heap->last_bloc = b;
      if (heap->first_bloc == NIL_BLOC)
	heap->first_bloc = b;

      /* Record that B is in HEAP.  */
      b->heap = heap;
534 535 536
    }

  /* If there are any remaining heaps and no blocs left,
537
     mark those heaps as empty.  */
538 539 540
  heap = heap->next;
  while (heap)
    {
541 542
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
543 544 545 546
      heap->free = heap->bloc_start;
      heap = heap->next;
    }
}
547

548 549 550
/* Resize BLOC to SIZE bytes.  This relocates the blocs
   that come after BLOC in memory.  */

551
static int
Paul Eggert's avatar
Paul Eggert committed
552
resize_bloc (bloc_ptr bloc, size_t size)
Jim Blandy's avatar
Jim Blandy committed
553
{
Paul Eggert's avatar
Paul Eggert committed
554
  bloc_ptr b;
555
  heap_ptr heap;
Paul Eggert's avatar
Paul Eggert committed
556 557
  void *address;
  size_t old_size;
558

559
  /* No need to ever call this if arena is frozen, bug somewhere!  */
560
  if (r_alloc_freeze_level)
561
    emacs_abort ();
562

563 564 565 566 567 568 569 570 571 572
  if (bloc == NIL_BLOC || size == bloc->size)
    return 1;

  for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
    {
      if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
	break;
    }

  if (heap == NIL_HEAP)
573
    emacs_abort ();
574 575 576 577

  old_size = bloc->size;
  bloc->size = size;

578
  /* Note that bloc could be moved into the previous heap.  */
579 580
  address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
	     : (char *) first_heap->bloc_start);
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
  while (heap)
    {
      if (heap->bloc_start <= address && address <= heap->end)
	break;
      heap = heap->prev;
    }

  if (! relocate_blocs (bloc, heap, address))
    {
      bloc->size = old_size;
      return 0;
    }

  if (size > old_size)
    {
      for (b = last_bloc; b != bloc; b = b->prev)
	{
598 599 600 601
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
602 603
            }
	  else
604
	    {
605 606
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
607 608 609 610 611 612 613 614 615 616
	      *b->variable = b->data = b->new_data;
            }
	}
      if (!bloc->variable)
	{
	  bloc->size = 0;
	  bloc->data = bloc->new_data;
	}
      else
	{
617 618
	  if (bloc->new_data != bloc->data)
	    memmove (bloc->new_data, bloc->data, old_size);
619
	  memset ((char *) bloc->new_data + old_size, 0, size - old_size);
620
	  *bloc->variable = bloc->data = bloc->new_data;
621 622 623
	}
    }
  else
Jim Blandy's avatar
Jim Blandy committed
624
    {
625 626
      for (b = bloc; b != NIL_BLOC; b = b->next)
	{
627 628 629 630
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
631 632
            }
	  else
633
	    {
634 635
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
636 637
	      *b->variable = b->data = b->new_data;
	    }
638 639
	}
    }
Jim Blandy's avatar
Jim Blandy committed
640

641
  update_heap_bloc_correspondence (bloc, heap);
642

643 644
  break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
645 646
  return 1;
}
647

648 649
/* Free BLOC from the chain of blocs, relocating any blocs above it.
   This may return space to the system.  */
Jim Blandy's avatar
Jim Blandy committed
650 651

static void
652
free_bloc (bloc_ptr bloc)
Jim Blandy's avatar
Jim Blandy committed
653
{
654
  heap_ptr heap = bloc->heap;
655
  heap_ptr h;
656

657 658
  if (r_alloc_freeze_level)
    {
Paul Eggert's avatar
Paul Eggert committed
659
      bloc->variable = NULL;
660 661
      return;
    }
662

663 664
  resize_bloc (bloc, 0);

Jim Blandy's avatar
Jim Blandy committed
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
  if (bloc == first_bloc && bloc == last_bloc)
    {
      first_bloc = last_bloc = NIL_BLOC;
    }
  else if (bloc == last_bloc)
    {
      last_bloc = bloc->prev;
      last_bloc->next = NIL_BLOC;
    }
  else if (bloc == first_bloc)
    {
      first_bloc = bloc->next;
      first_bloc->prev = NIL_BLOC;
    }
  else
    {
      bloc->next->prev = bloc->prev;
      bloc->prev->next = bloc->next;
    }

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
  /* Sometimes, 'heap' obtained from bloc->heap above is not really a
     'heap' structure.  It can even be beyond the current break point,
     which will cause crashes when we dereference it below (see
     bug#12242).  Evidently, the reason is bloc allocations done while
     use_relocatable_buffers was non-positive, because additional
     memory we get then is not recorded in the heaps we manage.  If
     bloc->heap records such a "heap", we cannot (and don't need to)
     update its records.  So we validate the 'heap' value by making
     sure it is one of the heaps we manage via the heaps linked list,
     and don't touch a 'heap' that isn't found there.  This avoids
     accessing memory we know nothing about.  */
  for (h = first_heap; h != NIL_HEAP; h = h->next)
    if (heap == h)
      break;

  if (h)
701
    {
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
      /* Update the records of which blocs are in HEAP.  */
      if (heap->first_bloc == bloc)
	{
	  if (bloc->next != 0 && bloc->next->heap == heap)
	    heap->first_bloc = bloc->next;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
      if (heap->last_bloc == bloc)
	{
	  if (bloc->prev != 0 && bloc->prev->heap == heap)
	    heap->last_bloc = bloc->prev;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
717 718
    }

719
  relinquish ();
Jim Blandy's avatar
Jim Blandy committed
720 721 722
  free (bloc);
}

Jim Blandy's avatar
Jim Blandy committed
723 724
/* Interface routines.  */

725
/* Obtain SIZE bytes of storage from the free pool, or the system, as
726
   necessary.  If relocatable blocs are in use, this means relocating
727 728 729
   them.  This function gets plugged into the GNU malloc's __morecore
   hook.

730 731
   We provide hysteresis, never relocating by less than extra_bytes.

732 733 734
   If we're out of memory, we should return zero, to imitate the other
   __morecore hook values - in particular, __default_morecore in the
   GNU malloc package.  */
Jim Blandy's avatar
Jim Blandy committed
735

Paul Eggert's avatar
Paul Eggert committed
736
static void *
737
r_alloc_sbrk (ptrdiff_t size)
Jim Blandy's avatar
Jim Blandy committed
738
{
Paul Eggert's avatar
Paul Eggert committed
739 740
  bloc_ptr b;
  void *address;
Jim Blandy's avatar
Jim Blandy committed
741

742 743 744
  if (! r_alloc_initialized)
    r_alloc_init ();

745
  if (use_relocatable_buffers <= 0)
Paul Eggert's avatar
Paul Eggert committed
746
    return real_morecore (size);
Jim Blandy's avatar
Jim Blandy committed
747

748 749
  if (size == 0)
    return virtual_break_value;
750

751
  if (size > 0)
Jim Blandy's avatar
Jim Blandy committed
752
    {
753 754
      /* Allocate a page-aligned space.  GNU malloc would reclaim an
	 extra space if we passed an unaligned one.  But we could
Karl Heuer's avatar
Karl Heuer committed
755
	 not always find a space which is contiguous to the previous.  */
Paul Eggert's avatar
Paul Eggert committed
756
      void *new_bloc_start;
757
      heap_ptr h = first_heap;
758
      size_t get = PAGE_ROUNDUP (size);
759

760
      address = (void *) PAGE_ROUNDUP (virtual_break_value);
761

762
      /* Search the list upward for a heap which is large enough.  */
Paul Eggert's avatar
Paul Eggert committed
763
      while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
764 765 766 767
	{
	  h = h->next;
	  if (h == NIL_HEAP)
	    break;
768
	  address = (void *) PAGE_ROUNDUP (h->start);
769 770
	}

771
      /* If not found, obtain more space.  */
772 773 774 775
      if (h == NIL_HEAP)
	{
	  get += extra_bytes + page_size;

776
	  if (! obtain (address, get))
777
	    return 0;
778

779
	  if (first_heap == last_heap)
780
	    address = (void *) PAGE_ROUNDUP (virtual_break_value);
781
	  else
782
	    address = (void *) PAGE_ROUNDUP (last_heap->start);
783 784 785
	  h = last_heap;
	}

Paul Eggert's avatar
Paul Eggert committed
786
      new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
787 788 789

      if (first_heap->bloc_start < new_bloc_start)
	{
790
	  /* This is no clean solution - no idea how to do it better.  */
791
	  if (r_alloc_freeze_level)
Paul Eggert's avatar
Paul Eggert committed
792
	    return NULL;
793 794 795 796 797

	  /* There is a bug here: if the above obtain call succeeded, but the
	     relocate_blocs call below does not succeed, we need to free
	     the memory that we got with obtain.  */

798
	  /* Move all blocs upward.  */
799
	  if (! relocate_blocs (first_bloc, h, new_bloc_start))
800 801
	    return 0;

Paul Eggert's avatar
Paul Eggert committed
802
	  /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
803
	     get >= page_size, so the following does not destroy the heap
804
	     header.  */
805 806
	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
	    {
807 808
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
809 810 811 812
	      *b->variable = b->data = b->new_data;
	    }

	  h->bloc_start = new_bloc_start;
813

814
	  update_heap_bloc_correspondence (first_bloc, h);
815 816 817 818
	}
      if (h != first_heap)
	{
	  /* Give up managing heaps below the one the new
819
	     virtual_break_value points to.  */
820 821 822 823
	  first_heap->prev = NIL_HEAP;
	  first_heap->next = h->next;
	  first_heap->start = h->start;
	  first_heap->end = h->end;
824
	  first_heap->free = h->free;
825 826
	  first_heap->first_bloc = h->first_bloc;
	  first_heap->last_bloc = h->last_bloc;
827 828 829 830 831 832 833 834
	  first_heap->bloc_start = h->bloc_start;

	  if (first_heap->next)
	    first_heap->next->prev = first_heap;
	  else
	    last_heap = first_heap;
	}

835
      memset (address, 0, size);
Jim Blandy's avatar
Jim Blandy committed
836
    }
837
  else /* size < 0 */
Jim Blandy's avatar
Jim Blandy committed
838
    {
Paul Eggert's avatar
Paul Eggert committed
839 840
      size_t excess = ((char *) first_heap->bloc_start
		       - ((char *) virtual_break_value + size));
841 842 843 844 845 846 847

      address = virtual_break_value;

      if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
	{
	  excess -= extra_bytes;
	  first_heap->bloc_start
Paul Eggert's avatar
Paul Eggert committed
848
	    = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
849

850
	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
851

852 853
	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
	    {
854 855
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
856 857 858 859
	      *b->variable = b->data = b->new_data;
	    }
	}

Paul Eggert's avatar
Paul Eggert committed
860
      if ((char *) virtual_break_value + size < (char *) first_heap->start)
861 862
	{
	  /* We found an additional space below the first heap */
Paul Eggert's avatar
Paul Eggert committed
863
	  first_heap->start = (void *) ((char *) virtual_break_value + size);
864
	}
Jim Blandy's avatar
Jim Blandy committed
865 866
    }

Paul Eggert's avatar
Paul Eggert committed
867
  virtual_break_value = (void *) ((char *) address + size);
868
  break_value = (last_bloc
869 870
		 ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
871
  if (size < 0)
872
    relinquish ();
873

874
  return address;
Jim Blandy's avatar
Jim Blandy committed
875 876
}

877

Jim Blandy's avatar
Jim Blandy committed
878 879
/* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
   the data is returned in *PTR.  PTR is thus the address of some variable
880 881
   which will use the data area.

882
   The allocation of 0 bytes is valid.
Juanma Barranquero's avatar
Juanma Barranquero committed
883 884
   In case r_alloc_freeze_level is set, a best fit of unused blocs could be
   done before allocating a new area.  Not yet done.
885

886 887
   If we can't allocate the necessary memory, set *PTR to zero, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
888

Paul Eggert's avatar
Paul Eggert committed
889 890
void *
r_alloc (void **ptr, size_t size)
Jim Blandy's avatar
Jim Blandy committed
891
{
Paul Eggert's avatar
Paul Eggert committed
892
  bloc_ptr new_bloc;
Jim Blandy's avatar
Jim Blandy committed
893

894 895 896
  if (! r_alloc_initialized)
    r_alloc_init ();

897
  new_bloc = get_bloc (MEM_ROUNDUP (size));
898 899 900 901 902 903 904
  if (new_bloc)
    {
      new_bloc->variable = ptr;
      *ptr = new_bloc->data;
    }
  else
    *ptr = 0;
Jim Blandy's avatar
Jim Blandy committed
905 906 907 908

  return *ptr;
}

909 910
/* Free a bloc of relocatable storage whose data is pointed to by PTR.
   Store 0 in *PTR to show there's no block allocated.  */
Jim Blandy's avatar
Jim Blandy committed
911 912

void
Paul Eggert's avatar
Paul Eggert committed
913
r_alloc_free (void **ptr)
Jim Blandy's avatar
Jim Blandy committed
914
{
Paul Eggert's avatar
Paul Eggert committed
915
  bloc_ptr dead_bloc;
Jim Blandy's avatar
Jim Blandy committed
916

917 918 919
  if (! r_alloc_initialized)
    r_alloc_init ();

Jim Blandy's avatar
Jim Blandy committed
920 921
  dead_bloc = find_bloc (ptr);
  if (dead_bloc == NIL_BLOC)
922
    emacs_abort (); /* Double free? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
923 924

  free_bloc (dead_bloc);
925
  *ptr = 0;
926

927
#ifdef emacs
928
  refill_memory_reserve ();
929
#endif
Jim Blandy's avatar
Jim Blandy committed
930 931
}

932
/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
933 934 935
   Do this by shifting all blocks above this one up in memory, unless
   SIZE is less than or equal to the current bloc size, in which case
   do nothing.
Jim Blandy's avatar
Jim Blandy committed
936

Juanma Barranquero's avatar
Juanma Barranquero committed
937
   In case r_alloc_freeze_level is set, a new bloc is allocated, and the
Karl Heuer's avatar
Karl Heuer committed
938
   memory copied to it.  Not very efficient.  We could traverse the
939 940
   bloc_list for a best fit of free blocs first.

941 942 943 944
   Change *PTR to reflect the new bloc, and return this value.

   If more memory cannot be allocated, then leave *PTR unchanged, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
945

Paul Eggert's avatar
Paul Eggert committed
946 947
void *
r_re_alloc (void **ptr, size_t size)
Jim Blandy's avatar
Jim Blandy committed
948
{
Paul Eggert's avatar
Paul Eggert committed
949
  bloc_ptr bloc;
Jim Blandy's avatar
Jim Blandy committed
950

951 952 953
  if (! r_alloc_initialized)
    r_alloc_init ();

954 955
  if (!*ptr)
    return r_alloc (ptr, size);
956
  if (!size)
957 958 959 960 961
    {
      r_alloc_free (ptr);
      return r_alloc (ptr, 0);
    }

962 963
  bloc = find_bloc (ptr);
  if (bloc == NIL_BLOC)
964
    emacs_abort (); /* Already freed? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
965

966
  if (size < bloc->size)
967 968 969
    {
      /* Wouldn't it be useful to actually resize the bloc here?  */
      /* I think so too, but not if it's too expensive...  */
970 971
      if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
          && r_alloc_freeze_level == 0)
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
	{
	  resize_bloc (bloc, MEM_ROUNDUP (size));
	  /* Never mind if this fails, just do nothing...  */
	  /* It *should* be infallible!  */
	}
    }
  else if (size > bloc->size)
    {
      if (r_alloc_freeze_level)
	{
	  bloc_ptr new_bloc;
	  new_bloc = get_bloc (MEM_ROUNDUP (size));
	  if (new_bloc)
	    {
	      new_bloc->variable = ptr;
	      *ptr = new_bloc->data;
Paul Eggert's avatar
Paul Eggert committed
988
	      bloc->variable = NULL;
989 990
	    }
          else
Paul Eggert's avatar
Paul Eggert committed
991
	    return NULL;
992
	}
993
      else
994 995
	{
	  if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
Paul Eggert's avatar
Paul Eggert committed
996
	    return NULL;
997 998
        }
    }
Jim Blandy's avatar
Jim Blandy committed
999 1000
  return *ptr;
}
1001

1002 1003 1004 1005 1006 1007

#if defined (emacs) && defined (DOUG_LEA_MALLOC)

/* Reinitialize the morecore hook variables after restarting a dumped
   Emacs.  This is needed when using Doug Lea's malloc from GNU libc.  */
void
1008
r_alloc_reinit (void)
1009 1010 1011 1012 1013 1014 1015 1016 1017
{
  /* Only do this if the hook has been reset, so that we don't get an
     infinite loop, in case Emacs was linked statically.  */
  if (__morecore != r_alloc_sbrk)
    {
      real_morecore = __morecore;
      __morecore = r_alloc_sbrk;
    }
}
1018 1019

#endif /* emacs && DOUG_LEA_MALLOC */
1020

1021
#ifdef DEBUG
1022

1023 1024
#include <assert.h>

1025
void
Andreas Schwab's avatar
Andreas Schwab committed
1026
r_alloc_check (void)
1027
{
Richard M. Stallman's avatar
Richard M. Stallman committed
1028 1029 1030 1031 1032 1033 1034 1035
  int found = 0;
  heap_ptr h, ph = 0;
  bloc_ptr b, pb = 0;

  if (!r_alloc_initialized)
    return;

  assert (first_heap);
Paul Eggert's avatar
Paul Eggert committed
1036 1037
  assert (last_heap->end <= (void *) sbrk (0));
  assert ((void *) first_heap < first_heap->start);
Richard M. Stallman's avatar
Richard M. Stallman committed
1038 1039 1040 1041 1042 1043
  assert (first_heap->start <= virtual_break_value);
  assert (virtual_break_value <= first_heap->end);

  for (h = first_heap; h; h = h->next)
    {
      assert (h->prev == ph);
Daniel Colascione's avatar