ralloc.c 32.5 KB
Newer Older
1
/* Block-relocating memory allocator.
Paul Eggert's avatar
Paul Eggert committed
2
   Copyright (C) 1993, 1995, 2000-2015 Free Software Foundation, Inc.
Jim Blandy's avatar
Jim Blandy committed
3 4 5

This file is part of GNU Emacs.

6
GNU Emacs is free software: you can redistribute it and/or modify
Jim Blandy's avatar
Jim Blandy committed
7
it under the terms of the GNU General Public License as published by
8 9
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Jim Blandy's avatar
Jim Blandy committed
10 11 12 13 14 15 16

GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
17
along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.  */
Jim Blandy's avatar
Jim Blandy committed
18 19 20

/* NOTES:

21
   Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
Jim Blandy's avatar
Jim Blandy committed
22
   rather than all of them.  This means allowing for a possible
23
   hole between the first bloc and the end of malloc storage.  */
Jim Blandy's avatar
Jim Blandy committed
24

25
#ifdef emacs
26

27
#include <config.h>
28

Jim Blandy's avatar
Jim Blandy committed
29
#include "lisp.h"		/* Needed for VALBITS.  */
Jan Djärv's avatar
Jan Djärv committed
30
#include "blockinput.h"
31

Dave Love's avatar
Dave Love committed
32
#include <unistd.h>
33

34
#ifdef DOUG_LEA_MALLOC
35
#define M_TOP_PAD           -2
36
extern int mallopt (int, int);
37
#else /* not DOUG_LEA_MALLOC */
38
#ifndef SYSTEM_MALLOC
39
extern size_t __malloc_extra_blocks;
40
#endif /* SYSTEM_MALLOC */
41
#endif /* not DOUG_LEA_MALLOC */
42

43
#else /* not emacs */
44

45
#include <stddef.h>
46 47
#include <malloc.h>

48
#endif	/* not emacs */
49

50

51
#include "getpagesize.h"
Jim Blandy's avatar
Jim Blandy committed
52

53 54 55 56 57
/* A flag to indicate whether we have initialized ralloc yet.  For
   Emacs's sake, please do not make this local to malloc_init; on some
   machines, the dumping procedure makes all static variables
   read-only.  On these machines, the word static is #defined to be
   the empty string, meaning that r_alloc_initialized becomes an
58 59 60
   automatic variable, and loses its value each time Emacs is started
   up.  */

61 62
static int r_alloc_initialized = 0;

63
static void r_alloc_init (void);
64

Jim Blandy's avatar
Jim Blandy committed
65

Jim Blandy's avatar
Jim Blandy committed
66 67
/* Declarations for working with the malloc, ralloc, and system breaks.  */

68
/* Function to set the real break value.  */
Paul Eggert's avatar
Paul Eggert committed
69
void *(*real_morecore) (ptrdiff_t);
Jim Blandy's avatar
Jim Blandy committed
70

71
/* The break value, as seen by malloc.  */
Paul Eggert's avatar
Paul Eggert committed
72
static void *virtual_break_value;
Jim Blandy's avatar
Jim Blandy committed
73

74 75
/* The address of the end of the last data in use by ralloc,
   including relocatable blocs as well as malloc data.  */
Paul Eggert's avatar
Paul Eggert committed
76
static void *break_value;
Jim Blandy's avatar
Jim Blandy committed
77

78 79 80
/* This is the size of a page.  We round memory requests to this boundary.  */
static int page_size;

81
/* Whenever we get memory from the system, get this many extra bytes.  This
82
   must be a multiple of page_size.  */
83 84
static int extra_bytes;

Jim Blandy's avatar
Jim Blandy committed
85
/* Macros for rounding.  Note that rounding to any value is possible
86
   by changing the definition of PAGE.  */
Jim Blandy's avatar
Jim Blandy committed
87
#define PAGE (getpagesize ())
88
#define PAGE_ROUNDUP(size) (((size_t) (size) + page_size - 1) \
Paul Eggert's avatar
Paul Eggert committed
89
		       & ~((size_t) (page_size - 1)))
90

Juanma Barranquero's avatar
Juanma Barranquero committed
91
#define MEM_ALIGN sizeof (double)
Paul Eggert's avatar
Paul Eggert committed
92
#define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
93
			   & ~(MEM_ALIGN - 1))
94

95 96 97 98
/* The hook `malloc' uses for the function which gets more space
   from the system.  */

#ifndef SYSTEM_MALLOC
Paul Eggert's avatar
Paul Eggert committed
99
extern void *(*__morecore) (ptrdiff_t);
100 101 102
#endif


103

104 105 106 107
/***********************************************************************
		      Implementation using sbrk
 ***********************************************************************/

108 109 110 111 112 113 114 115 116 117 118 119
/* Data structures of heaps and blocs.  */

/* The relocatable objects, or blocs, and the malloc data
   both reside within one or more heaps.
   Each heap contains malloc data, running from `start' to `bloc_start',
   and relocatable objects, running from `bloc_start' to `free'.

   Relocatable objects may relocate within the same heap
   or may move into another heap; the heaps themselves may grow
   but they never move.

   We try to make just one heap and make it larger as necessary.
Karl Heuer's avatar
Karl Heuer committed
120
   But sometimes we can't do that, because we can't get contiguous
121
   space to add onto the heap.  When that happens, we start a new heap.  */
122

123 124 125 126
typedef struct heap
{
  struct heap *next;
  struct heap *prev;
127
  /* Start of memory range of this heap.  */
Paul Eggert's avatar
Paul Eggert committed
128
  void *start;
129
  /* End of memory range of this heap.  */
Paul Eggert's avatar
Paul Eggert committed
130
  void *end;
131
  /* Start of relocatable data in this heap.  */
Paul Eggert's avatar
Paul Eggert committed
132
  void *bloc_start;
133
  /* Start of unused space in this heap.  */
Paul Eggert's avatar
Paul Eggert committed
134
  void *free;
135 136 137 138
  /* First bloc in this heap.  */
  struct bp *first_bloc;
  /* Last bloc in this heap.  */
  struct bp *last_bloc;
139 140 141 142
} *heap_ptr;

#define NIL_HEAP ((heap_ptr) 0)

143 144 145 146 147 148
/* This is the first heap object.
   If we need additional heap objects, each one resides at the beginning of
   the space it covers.   */
static struct heap heap_base;

/* Head and tail of the list of heaps.  */
149 150 151 152 153
static heap_ptr first_heap, last_heap;

/* These structures are allocated in the malloc arena.
   The linked list is kept in order of increasing '.data' members.
   The data blocks abut each other; if b->next is non-nil, then
154
   b->data + b->size == b->next->data.
155

Paul Eggert's avatar
Paul Eggert committed
156
   An element with variable==NULL denotes a freed block, which has not yet
Juanma Barranquero's avatar
Juanma Barranquero committed
157 158 159
   been collected.  They may only appear while r_alloc_freeze_level > 0,
   and will be freed when the arena is thawed.  Currently, these blocs are
   not reusable, while the arena is frozen.  Very inefficient.  */
160

161 162 163 164
typedef struct bp
{
  struct bp *next;
  struct bp *prev;
Paul Eggert's avatar
Paul Eggert committed
165 166 167 168
  void **variable;
  void *data;
  size_t size;
  void *new_data;		/* temporarily used for relocation */
169
  struct heap *heap; 		/* Heap this bloc is in.  */
170 171 172 173 174
} *bloc_ptr;

#define NIL_BLOC ((bloc_ptr) 0)
#define BLOC_PTR_SIZE (sizeof (struct bp))

175
/* Head and tail of the list of relocatable blocs.  */
176 177
static bloc_ptr first_bloc, last_bloc;

178 179 180 181 182
static int use_relocatable_buffers;

/* If >0, no relocation whatsoever takes place.  */
static int r_alloc_freeze_level;

Jim Blandy's avatar
Jim Blandy committed
183

Jim Blandy's avatar
Jim Blandy committed
184 185
/* Functions to get and return memory from the system.  */

186 187 188
/* Find the heap that ADDRESS falls within.  */

static heap_ptr
Paul Eggert's avatar
Paul Eggert committed
189
find_heap (void *address)
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
{
  heap_ptr heap;

  for (heap = last_heap; heap; heap = heap->prev)
    {
      if (heap->start <= address && address <= heap->end)
	return heap;
    }

  return NIL_HEAP;
}

/* Find SIZE bytes of space in a heap.
   Try to get them at ADDRESS (which must fall within some heap's range)
   if we can get that many within one heap.

206
   If enough space is not presently available in our reserve, this means
Karl Heuer's avatar
Karl Heuer committed
207 208
   getting more page-aligned space from the system.  If the returned space
   is not contiguous to the last heap, allocate a new heap, and append it
Juanma Barranquero's avatar
Juanma Barranquero committed
209
   to the heap list.
210

Juanma Barranquero's avatar
Juanma Barranquero committed
211 212 213 214 215
   obtain does not try to keep track of whether space is in use or not
   in use.  It just returns the address of SIZE bytes that fall within a
   single heap.  If you call obtain twice in a row with the same arguments,
   you typically get the same value.  It's the caller's responsibility to
   keep track of what space is in use.
Jim Blandy's avatar
Jim Blandy committed
216

217 218
   Return the address of the space if all went well, or zero if we couldn't
   allocate the memory.  */
219

Paul Eggert's avatar
Paul Eggert committed
220 221
static void *
obtain (void *address, size_t size)
Jim Blandy's avatar
Jim Blandy committed
222
{
223
  heap_ptr heap;
Paul Eggert's avatar
Paul Eggert committed
224
  size_t already_available;
Jim Blandy's avatar
Jim Blandy committed
225

226
  /* Find the heap that ADDRESS falls within.  */
227
  for (heap = last_heap; heap; heap = heap->prev)
Jim Blandy's avatar
Jim Blandy committed
228
    {
229 230 231
      if (heap->start <= address && address <= heap->end)
	break;
    }
Jim Blandy's avatar
Jim Blandy committed
232

233
  if (! heap)
234
    emacs_abort ();
Jim Blandy's avatar
Jim Blandy committed
235

236 237
  /* If we can't fit SIZE bytes in that heap,
     try successive later heaps.  */
238
  while (heap && (char *) address + size > (char *) heap->end)
239 240 241 242 243
    {
      heap = heap->next;
      if (heap == NIL_HEAP)
	break;
      address = heap->bloc_start;
Jim Blandy's avatar
Jim Blandy committed
244 245
    }

246 247
  /* If we can't fit them within any existing heap,
     get more space.  */
248 249
  if (heap == NIL_HEAP)
    {
Paul Eggert's avatar
Paul Eggert committed
250 251
      void *new = real_morecore (0);
      size_t get;
252

Paul Eggert's avatar
Paul Eggert committed
253
      already_available = (char *) last_heap->end - (char *) address;
Jim Blandy's avatar
Jim Blandy committed
254

255 256
      if (new != last_heap->end)
	{
257 258 259
	  /* Someone else called sbrk.  Make a new heap.  */

	  heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
Paul Eggert's avatar
Paul Eggert committed
260
	  void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
261

Paul Eggert's avatar
Paul Eggert committed
262
	  if (real_morecore ((char *) bloc_start - (char *) new) != new)
263 264 265 266 267
	    return 0;

	  new_heap->start = new;
	  new_heap->end = bloc_start;
	  new_heap->bloc_start = bloc_start;
268
	  new_heap->free = bloc_start;
269 270
	  new_heap->next = NIL_HEAP;
	  new_heap->prev = last_heap;
271 272
	  new_heap->first_bloc = NIL_BLOC;
	  new_heap->last_bloc = NIL_BLOC;
273 274 275 276 277 278
	  last_heap->next = new_heap;
	  last_heap = new_heap;

	  address = bloc_start;
	  already_available = 0;
	}
Jim Blandy's avatar
Jim Blandy committed
279

280 281 282
      /* Add space to the last heap (which we may have just created).
	 Get some extra, so we can come here less often.  */

283
      get = size + extra_bytes - already_available;
284
      get = (char *) PAGE_ROUNDUP ((char *) last_heap->end + get)
285
	- (char *) last_heap->end;
Jim Blandy's avatar
Jim Blandy committed
286

Paul Eggert's avatar
Paul Eggert committed
287
      if (real_morecore (get) != last_heap->end)
288 289
	return 0;

290
      last_heap->end = (char *) last_heap->end + get;
291 292 293 294
    }

  return address;
}
Jim Blandy's avatar
Jim Blandy committed
295

296 297 298 299 300
/* Return unused heap space to the system
   if there is a lot of unused space now.
   This can make the last heap smaller;
   it can also eliminate the last heap entirely.  */

Jim Blandy's avatar
Jim Blandy committed
301
static void
302
relinquish (void)
Jim Blandy's avatar
Jim Blandy committed
303
{
304
  register heap_ptr h;
305
  ptrdiff_t excess = 0;
306

307 308 309
  /* Add the amount of space beyond break_value
     in all heaps which have extend beyond break_value at all.  */

310 311 312 313 314 315
  for (h = last_heap; h && break_value < h->end; h = h->prev)
    {
      excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
					    ? h->bloc_start : break_value);
    }

Paul Eggert's avatar
Paul Eggert committed
316
  if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
Jim Blandy's avatar
Jim Blandy committed
317
    {
318 319
      /* Keep extra_bytes worth of empty space.
	 And don't free anything unless we can free at least extra_bytes.  */
320
      excess -= extra_bytes;
Jim Blandy's avatar
Jim Blandy committed
321

Paul Eggert's avatar
Paul Eggert committed
322
      if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
323
	{
324 325
	  heap_ptr lh_prev;

326 327
	  /* This heap should have no blocs in it.  If it does, we
	     cannot return it to the system.  */
328 329
	  if (last_heap->first_bloc != NIL_BLOC
	      || last_heap->last_bloc != NIL_BLOC)
330
	    return;
331

332
	  /* Return the last heap, with its header, to the system.  */
Paul Eggert's avatar
Paul Eggert committed
333
	  excess = (char *) last_heap->end - (char *) last_heap->start;
334 335 336 337
	  lh_prev = last_heap->prev;
	  /* If the system doesn't want that much memory back, leave
	     last_heap unaltered to reflect that.  This can occur if
	     break_value is still within the original data segment.  */
Paul Eggert's avatar
Paul Eggert committed
338
	  if (real_morecore (- excess) != 0)
339 340 341 342
	    {
	      last_heap = lh_prev;
	      last_heap->next = NIL_HEAP;
	    }
343 344 345
	}
      else
	{
Paul Eggert's avatar
Paul Eggert committed
346
	  excess = ((char *) last_heap->end
347
		    - (char *) PAGE_ROUNDUP ((char *) last_heap->end - excess));
348 349 350 351
	  /* If the system doesn't want that much memory back, leave
	     the end of the last heap unchanged to reflect that.  This
	     can occur if break_value is still within the original
	     data segment.  */
Paul Eggert's avatar
Paul Eggert committed
352
	  if (real_morecore (- excess) != 0)
353
	    last_heap->end = (char *) last_heap->end - excess;
354
	}
355
    }
Jim Blandy's avatar
Jim Blandy committed
356 357
}

Jim Blandy's avatar
Jim Blandy committed
358 359 360
/* The meat - allocating, freeing, and relocating blocs.  */

/* Find the bloc referenced by the address in PTR.  Returns a pointer
361
   to that block.  */
Jim Blandy's avatar
Jim Blandy committed
362 363

static bloc_ptr
Paul Eggert's avatar
Paul Eggert committed
364
find_bloc (void **ptr)
Jim Blandy's avatar
Jim Blandy committed
365
{
Paul Eggert's avatar
Paul Eggert committed
366
  bloc_ptr p = first_bloc;
Jim Blandy's avatar
Jim Blandy committed
367 368 369

  while (p != NIL_BLOC)
    {
370
      /* Consistency check. Don't return inconsistent blocs.
Juanma Barranquero's avatar
Juanma Barranquero committed
371
	 Don't abort here, as callers might be expecting this, but
372 373 374
	 callers that always expect a bloc to be returned should abort
	 if one isn't to avoid a memory corruption bug that is
	 difficult to track down.  */
Jim Blandy's avatar
Jim Blandy committed
375 376 377 378 379 380 381 382 383 384
      if (p->variable == ptr && p->data == *ptr)
	return p;

      p = p->next;
    }

  return p;
}

/* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
385 386
   Returns a pointer to the new bloc, or zero if we couldn't allocate
   memory for the new block.  */
Jim Blandy's avatar
Jim Blandy committed
387 388

static bloc_ptr
Paul Eggert's avatar
Paul Eggert committed
389
get_bloc (size_t size)
Jim Blandy's avatar
Jim Blandy committed
390
{
Paul Eggert's avatar
Paul Eggert committed
391 392
  bloc_ptr new_bloc;
  heap_ptr heap;
393

394
  if (! (new_bloc = malloc (BLOC_PTR_SIZE))
395
      || ! (new_bloc->data = obtain (break_value, size)))
396
    {
397
      free (new_bloc);
398 399 400

      return 0;
    }
Jim Blandy's avatar
Jim Blandy committed
401

402
  break_value = (char *) new_bloc->data + size;
403

Jim Blandy's avatar
Jim Blandy committed
404 405
  new_bloc->size = size;
  new_bloc->next = NIL_BLOC;
Paul Eggert's avatar
Paul Eggert committed
406
  new_bloc->variable = NULL;
407
  new_bloc->new_data = 0;
Jim Blandy's avatar
Jim Blandy committed
408

409 410 411 412
  /* Record in the heap that this space is in use.  */
  heap = find_heap (new_bloc->data);
  heap->free = break_value;

413 414 415 416 417 418
  /* Maintain the correspondence between heaps and blocs.  */
  new_bloc->heap = heap;
  heap->last_bloc = new_bloc;
  if (heap->first_bloc == NIL_BLOC)
    heap->first_bloc = new_bloc;

419
  /* Put this bloc on the doubly-linked list of blocs.  */
Jim Blandy's avatar
Jim Blandy committed
420 421 422 423 424 425 426 427 428 429 430 431 432 433
  if (first_bloc)
    {
      new_bloc->prev = last_bloc;
      last_bloc->next = new_bloc;
      last_bloc = new_bloc;
    }
  else
    {
      first_bloc = last_bloc = new_bloc;
      new_bloc->prev = NIL_BLOC;
    }

  return new_bloc;
}
434

435 436 437
/* Calculate new locations of blocs in the list beginning with BLOC,
   relocating it to start at ADDRESS, in heap HEAP.  If enough space is
   not presently available in our reserve, call obtain for
438 439
   more space.

440 441
   Store the new location of each bloc in its new_data field.
   Do not touch the contents of blocs or break_value.  */
Jim Blandy's avatar
Jim Blandy committed
442

443
static int
Paul Eggert's avatar
Paul Eggert committed
444
relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
445
{
Paul Eggert's avatar
Paul Eggert committed
446
  bloc_ptr b = bloc;
447

448
  /* No need to ever call this if arena is frozen, bug somewhere!  */
449
  if (r_alloc_freeze_level)
450
    emacs_abort ();
451

452 453
  while (b)
    {
454 455
      /* If bloc B won't fit within HEAP,
	 move to the next heap and try again.  */
456
      while (heap && (char *) address + b->size > (char *) heap->end)
457 458 459 460 461 462
	{
	  heap = heap->next;
	  if (heap == NIL_HEAP)
	    break;
	  address = heap->bloc_start;
	}
Jim Blandy's avatar
Jim Blandy committed
463

464 465
      /* If BLOC won't fit in any heap,
	 get enough new space to hold BLOC and all following blocs.  */
466 467
      if (heap == NIL_HEAP)
	{
Paul Eggert's avatar
Paul Eggert committed
468 469
	  bloc_ptr tb = b;
	  size_t s = 0;
470

471
	  /* Add up the size of all the following blocs.  */
472 473
	  while (tb != NIL_BLOC)
	    {
474
	      if (tb->variable)
475 476
		s += tb->size;

477 478 479
	      tb = tb->next;
	    }

480 481 482
	  /* Get that space.  */
	  address = obtain (address, s);
	  if (address == 0)
483 484 485 486 487
	    return 0;

	  heap = last_heap;
	}

488 489
      /* Record the new address of this bloc
	 and update where the next bloc can start.  */
490
      b->new_data = address;
491
      if (b->variable)
492
	address = (char *) address + b->size;
493 494 495 496 497
      b = b->next;
    }

  return 1;
}
498 499 500 501 502

/* Update the records of which heaps contain which blocs, starting
   with heap HEAP and bloc BLOC.  */

static void
503
update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
504 505 506
{
  register bloc_ptr b;

507 508 509 510 511
  /* Initialize HEAP's status to reflect blocs before BLOC.  */
  if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
    {
      /* The previous bloc is in HEAP.  */
      heap->last_bloc = bloc->prev;
512
      heap->free = (char *) bloc->prev->data + bloc->prev->size;
513 514 515 516 517 518 519 520 521
    }
  else
    {
      /* HEAP contains no blocs before BLOC.  */
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
      heap->free = heap->bloc_start;
    }

522 523 524
  /* Advance through blocs one by one.  */
  for (b = bloc; b != NIL_BLOC; b = b->next)
    {
525 526
      /* Advance through heaps, marking them empty,
	 till we get to the one that B is in.  */
527 528 529 530 531
      while (heap)
	{
	  if (heap->bloc_start <= b->data && b->data <= heap->end)
	    break;
	  heap = heap->next;
532 533 534 535
	  /* We know HEAP is not null now,
	     because there has to be space for bloc B.  */
	  heap->first_bloc = NIL_BLOC;
	  heap->last_bloc = NIL_BLOC;
536 537
	  heap->free = heap->bloc_start;
	}
538 539

      /* Update HEAP's status for bloc B.  */
540
      heap->free = (char *) b->data + b->size;
541 542 543 544 545 546
      heap->last_bloc = b;
      if (heap->first_bloc == NIL_BLOC)
	heap->first_bloc = b;

      /* Record that B is in HEAP.  */
      b->heap = heap;
547 548 549
    }

  /* If there are any remaining heaps and no blocs left,
550
     mark those heaps as empty.  */
551 552 553
  heap = heap->next;
  while (heap)
    {
554 555
      heap->first_bloc = NIL_BLOC;
      heap->last_bloc = NIL_BLOC;
556 557 558 559
      heap->free = heap->bloc_start;
      heap = heap->next;
    }
}
560

561 562 563
/* Resize BLOC to SIZE bytes.  This relocates the blocs
   that come after BLOC in memory.  */

564
static int
Paul Eggert's avatar
Paul Eggert committed
565
resize_bloc (bloc_ptr bloc, size_t size)
Jim Blandy's avatar
Jim Blandy committed
566
{
Paul Eggert's avatar
Paul Eggert committed
567
  bloc_ptr b;
568
  heap_ptr heap;
Paul Eggert's avatar
Paul Eggert committed
569 570
  void *address;
  size_t old_size;
571

572
  /* No need to ever call this if arena is frozen, bug somewhere!  */
573
  if (r_alloc_freeze_level)
574
    emacs_abort ();
575

576 577 578 579 580 581 582 583 584 585
  if (bloc == NIL_BLOC || size == bloc->size)
    return 1;

  for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
    {
      if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
	break;
    }

  if (heap == NIL_HEAP)
586
    emacs_abort ();
587 588 589 590

  old_size = bloc->size;
  bloc->size = size;

591
  /* Note that bloc could be moved into the previous heap.  */
592 593
  address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
	     : (char *) first_heap->bloc_start);
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
  while (heap)
    {
      if (heap->bloc_start <= address && address <= heap->end)
	break;
      heap = heap->prev;
    }

  if (! relocate_blocs (bloc, heap, address))
    {
      bloc->size = old_size;
      return 0;
    }

  if (size > old_size)
    {
      for (b = last_bloc; b != bloc; b = b->prev)
	{
611 612 613 614
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
615 616
            }
	  else
617
	    {
618 619
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
620 621 622 623 624 625 626 627 628 629
	      *b->variable = b->data = b->new_data;
            }
	}
      if (!bloc->variable)
	{
	  bloc->size = 0;
	  bloc->data = bloc->new_data;
	}
      else
	{
630 631
	  if (bloc->new_data != bloc->data)
	    memmove (bloc->new_data, bloc->data, old_size);
632
	  memset ((char *) bloc->new_data + old_size, 0, size - old_size);
633
	  *bloc->variable = bloc->data = bloc->new_data;
634 635 636
	}
    }
  else
Jim Blandy's avatar
Jim Blandy committed
637
    {
638 639
      for (b = bloc; b != NIL_BLOC; b = b->next)
	{
640 641 642 643
	  if (!b->variable)
	    {
	      b->size = 0;
	      b->data = b->new_data;
644 645
            }
	  else
646
	    {
647 648
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
649 650
	      *b->variable = b->data = b->new_data;
	    }
651 652
	}
    }
Jim Blandy's avatar
Jim Blandy committed
653

654
  update_heap_bloc_correspondence (bloc, heap);
655

656 657
  break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
658 659
  return 1;
}
660

661 662
/* Free BLOC from the chain of blocs, relocating any blocs above it.
   This may return space to the system.  */
Jim Blandy's avatar
Jim Blandy committed
663 664

static void
665
free_bloc (bloc_ptr bloc)
Jim Blandy's avatar
Jim Blandy committed
666
{
667
  heap_ptr heap = bloc->heap;
668
  heap_ptr h;
669

670 671
  if (r_alloc_freeze_level)
    {
Paul Eggert's avatar
Paul Eggert committed
672
      bloc->variable = NULL;
673 674
      return;
    }
675

676 677
  resize_bloc (bloc, 0);

Jim Blandy's avatar
Jim Blandy committed
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
  if (bloc == first_bloc && bloc == last_bloc)
    {
      first_bloc = last_bloc = NIL_BLOC;
    }
  else if (bloc == last_bloc)
    {
      last_bloc = bloc->prev;
      last_bloc->next = NIL_BLOC;
    }
  else if (bloc == first_bloc)
    {
      first_bloc = bloc->next;
      first_bloc->prev = NIL_BLOC;
    }
  else
    {
      bloc->next->prev = bloc->prev;
      bloc->prev->next = bloc->next;
    }

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
  /* Sometimes, 'heap' obtained from bloc->heap above is not really a
     'heap' structure.  It can even be beyond the current break point,
     which will cause crashes when we dereference it below (see
     bug#12242).  Evidently, the reason is bloc allocations done while
     use_relocatable_buffers was non-positive, because additional
     memory we get then is not recorded in the heaps we manage.  If
     bloc->heap records such a "heap", we cannot (and don't need to)
     update its records.  So we validate the 'heap' value by making
     sure it is one of the heaps we manage via the heaps linked list,
     and don't touch a 'heap' that isn't found there.  This avoids
     accessing memory we know nothing about.  */
  for (h = first_heap; h != NIL_HEAP; h = h->next)
    if (heap == h)
      break;

  if (h)
714
    {
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
      /* Update the records of which blocs are in HEAP.  */
      if (heap->first_bloc == bloc)
	{
	  if (bloc->next != 0 && bloc->next->heap == heap)
	    heap->first_bloc = bloc->next;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
      if (heap->last_bloc == bloc)
	{
	  if (bloc->prev != 0 && bloc->prev->heap == heap)
	    heap->last_bloc = bloc->prev;
	  else
	    heap->first_bloc = heap->last_bloc = NIL_BLOC;
	}
730 731
    }

732
  relinquish ();
Jim Blandy's avatar
Jim Blandy committed
733 734 735
  free (bloc);
}

Jim Blandy's avatar
Jim Blandy committed
736 737
/* Interface routines.  */

738
/* Obtain SIZE bytes of storage from the free pool, or the system, as
739
   necessary.  If relocatable blocs are in use, this means relocating
740 741 742
   them.  This function gets plugged into the GNU malloc's __morecore
   hook.

743 744
   We provide hysteresis, never relocating by less than extra_bytes.

745 746 747
   If we're out of memory, we should return zero, to imitate the other
   __morecore hook values - in particular, __default_morecore in the
   GNU malloc package.  */
Jim Blandy's avatar
Jim Blandy committed
748

Paul Eggert's avatar
Paul Eggert committed
749
static void *
750
r_alloc_sbrk (ptrdiff_t size)
Jim Blandy's avatar
Jim Blandy committed
751
{
Paul Eggert's avatar
Paul Eggert committed
752 753
  bloc_ptr b;
  void *address;
Jim Blandy's avatar
Jim Blandy committed
754

755 756 757
  if (! r_alloc_initialized)
    r_alloc_init ();

758
  if (use_relocatable_buffers <= 0)
Paul Eggert's avatar
Paul Eggert committed
759
    return real_morecore (size);
Jim Blandy's avatar
Jim Blandy committed
760

761 762
  if (size == 0)
    return virtual_break_value;
763

764
  if (size > 0)
Jim Blandy's avatar
Jim Blandy committed
765
    {
766 767
      /* Allocate a page-aligned space.  GNU malloc would reclaim an
	 extra space if we passed an unaligned one.  But we could
Karl Heuer's avatar
Karl Heuer committed
768
	 not always find a space which is contiguous to the previous.  */
Paul Eggert's avatar
Paul Eggert committed
769
      void *new_bloc_start;
770
      heap_ptr h = first_heap;
771
      size_t get = PAGE_ROUNDUP (size);
772

773
      address = (void *) PAGE_ROUNDUP (virtual_break_value);
774

775
      /* Search the list upward for a heap which is large enough.  */
Paul Eggert's avatar
Paul Eggert committed
776
      while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
777 778 779 780
	{
	  h = h->next;
	  if (h == NIL_HEAP)
	    break;
781
	  address = (void *) PAGE_ROUNDUP (h->start);
782 783
	}

784
      /* If not found, obtain more space.  */
785 786 787 788
      if (h == NIL_HEAP)
	{
	  get += extra_bytes + page_size;

789
	  if (! obtain (address, get))
790
	    return 0;
791

792
	  if (first_heap == last_heap)
793
	    address = (void *) PAGE_ROUNDUP (virtual_break_value);
794
	  else
795
	    address = (void *) PAGE_ROUNDUP (last_heap->start);
796 797 798
	  h = last_heap;
	}

Paul Eggert's avatar
Paul Eggert committed
799
      new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
800 801 802

      if (first_heap->bloc_start < new_bloc_start)
	{
803
	  /* This is no clean solution - no idea how to do it better.  */
804
	  if (r_alloc_freeze_level)
Paul Eggert's avatar
Paul Eggert committed
805
	    return NULL;
806 807 808 809 810

	  /* There is a bug here: if the above obtain call succeeded, but the
	     relocate_blocs call below does not succeed, we need to free
	     the memory that we got with obtain.  */

811
	  /* Move all blocs upward.  */
812
	  if (! relocate_blocs (first_bloc, h, new_bloc_start))
813 814
	    return 0;

Paul Eggert's avatar
Paul Eggert committed
815
	  /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
816
	     get >= page_size, so the following does not destroy the heap
817
	     header.  */
818 819
	  for (b = last_bloc; b != NIL_BLOC; b = b->prev)
	    {
820 821
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
822 823 824 825
	      *b->variable = b->data = b->new_data;
	    }

	  h->bloc_start = new_bloc_start;
826

827
	  update_heap_bloc_correspondence (first_bloc, h);
828 829 830 831
	}
      if (h != first_heap)
	{
	  /* Give up managing heaps below the one the new
832
	     virtual_break_value points to.  */
833 834 835 836
	  first_heap->prev = NIL_HEAP;
	  first_heap->next = h->next;
	  first_heap->start = h->start;
	  first_heap->end = h->end;
837
	  first_heap->free = h->free;
838 839
	  first_heap->first_bloc = h->first_bloc;
	  first_heap->last_bloc = h->last_bloc;
840 841 842 843 844 845 846 847
	  first_heap->bloc_start = h->bloc_start;

	  if (first_heap->next)
	    first_heap->next->prev = first_heap;
	  else
	    last_heap = first_heap;
	}

848
      memset (address, 0, size);
Jim Blandy's avatar
Jim Blandy committed
849
    }
850
  else /* size < 0 */
Jim Blandy's avatar
Jim Blandy committed
851
    {
Paul Eggert's avatar
Paul Eggert committed
852 853
      size_t excess = ((char *) first_heap->bloc_start
		       - ((char *) virtual_break_value + size));
854 855 856 857 858 859 860

      address = virtual_break_value;

      if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
	{
	  excess -= extra_bytes;
	  first_heap->bloc_start
Paul Eggert's avatar
Paul Eggert committed
861
	    = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
862

863
	  relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
864

865 866
	  for (b = first_bloc; b != NIL_BLOC; b = b->next)
	    {
867 868
	      if (b->new_data != b->data)
		memmove (b->new_data, b->data, b->size);
869 870 871 872
	      *b->variable = b->data = b->new_data;
	    }
	}

Paul Eggert's avatar
Paul Eggert committed
873
      if ((char *) virtual_break_value + size < (char *) first_heap->start)
874 875
	{
	  /* We found an additional space below the first heap */
Paul Eggert's avatar
Paul Eggert committed
876
	  first_heap->start = (void *) ((char *) virtual_break_value + size);
877
	}
Jim Blandy's avatar
Jim Blandy committed
878 879
    }

Paul Eggert's avatar
Paul Eggert committed
880
  virtual_break_value = (void *) ((char *) address + size);
881
  break_value = (last_bloc
882 883
		 ? (char *) last_bloc->data + last_bloc->size
		 : (char *) first_heap->bloc_start);
884
  if (size < 0)
885
    relinquish ();
886

887
  return address;
Jim Blandy's avatar
Jim Blandy committed
888 889
}

890

Jim Blandy's avatar
Jim Blandy committed
891 892
/* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
   the data is returned in *PTR.  PTR is thus the address of some variable
893 894
   which will use the data area.

895
   The allocation of 0 bytes is valid.
Juanma Barranquero's avatar
Juanma Barranquero committed
896 897
   In case r_alloc_freeze_level is set, a best fit of unused blocs could be
   done before allocating a new area.  Not yet done.
898

899 900
   If we can't allocate the necessary memory, set *PTR to zero, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
901

Paul Eggert's avatar
Paul Eggert committed
902 903
void *
r_alloc (void **ptr, size_t size)
Jim Blandy's avatar
Jim Blandy committed
904
{
Paul Eggert's avatar
Paul Eggert committed
905
  bloc_ptr new_bloc;
Jim Blandy's avatar
Jim Blandy committed
906

907 908 909
  if (! r_alloc_initialized)
    r_alloc_init ();

910
  new_bloc = get_bloc (MEM_ROUNDUP (size));
911 912 913 914 915 916 917
  if (new_bloc)
    {
      new_bloc->variable = ptr;
      *ptr = new_bloc->data;
    }
  else
    *ptr = 0;
Jim Blandy's avatar
Jim Blandy committed
918 919 920 921

  return *ptr;
}

922 923
/* Free a bloc of relocatable storage whose data is pointed to by PTR.
   Store 0 in *PTR to show there's no block allocated.  */
Jim Blandy's avatar
Jim Blandy committed
924 925

void
Paul Eggert's avatar
Paul Eggert committed
926
r_alloc_free (void **ptr)
Jim Blandy's avatar
Jim Blandy committed
927
{
Paul Eggert's avatar
Paul Eggert committed
928
  bloc_ptr dead_bloc;
Jim Blandy's avatar
Jim Blandy committed
929

930 931 932
  if (! r_alloc_initialized)
    r_alloc_init ();

Jim Blandy's avatar
Jim Blandy committed
933 934
  dead_bloc = find_bloc (ptr);
  if (dead_bloc == NIL_BLOC)
935
    emacs_abort (); /* Double free? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
936 937

  free_bloc (dead_bloc);
938
  *ptr = 0;
939

940
#ifdef emacs
941
  refill_memory_reserve ();
942
#endif
Jim Blandy's avatar
Jim Blandy committed
943 944
}

945
/* Given a pointer at address PTR to relocatable data, resize it to SIZE.
946 947 948
   Do this by shifting all blocks above this one up in memory, unless
   SIZE is less than or equal to the current bloc size, in which case
   do nothing.
Jim Blandy's avatar
Jim Blandy committed
949

Juanma Barranquero's avatar
Juanma Barranquero committed
950
   In case r_alloc_freeze_level is set, a new bloc is allocated, and the
Karl Heuer's avatar
Karl Heuer committed
951
   memory copied to it.  Not very efficient.  We could traverse the
952 953
   bloc_list for a best fit of free blocs first.

954 955 956 957
   Change *PTR to reflect the new bloc, and return this value.

   If more memory cannot be allocated, then leave *PTR unchanged, and
   return zero.  */
Jim Blandy's avatar
Jim Blandy committed
958

Paul Eggert's avatar
Paul Eggert committed
959 960
void *
r_re_alloc (void **ptr, size_t size)
Jim Blandy's avatar
Jim Blandy committed
961
{
Paul Eggert's avatar
Paul Eggert committed
962
  bloc_ptr bloc;
Jim Blandy's avatar
Jim Blandy committed
963

964 965 966
  if (! r_alloc_initialized)
    r_alloc_init ();

967 968
  if (!*ptr)
    return r_alloc (ptr, size);
969
  if (!size)
970 971 972 973 974
    {
      r_alloc_free (ptr);
      return r_alloc (ptr, 0);
    }

975 976
  bloc = find_bloc (ptr);
  if (bloc == NIL_BLOC)
977
    emacs_abort (); /* Already freed? PTR not originally used to allocate?  */
Jim Blandy's avatar
Jim Blandy committed
978

979
  if (size < bloc->size)
980 981 982
    {
      /* Wouldn't it be useful to actually resize the bloc here?  */
      /* I think so too, but not if it's too expensive...  */
983 984
      if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
          && r_alloc_freeze_level == 0)
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	{
	  resize_bloc (bloc, MEM_ROUNDUP (size));
	  /* Never mind if this fails, just do nothing...  */
	  /* It *should* be infallible!  */
	}
    }
  else if (size > bloc->size)
    {
      if (r_alloc_freeze_level)
	{
	  bloc_ptr new_bloc;
	  new_bloc = get_bloc (MEM_ROUNDUP (size));
	  if (new_bloc)
	    {
	      new_bloc->variable = ptr;
	      *ptr = new_bloc->data;
Paul Eggert's avatar
Paul Eggert committed
1001
	      bloc->variable = NULL;
1002 1003
	    }
          else
Paul Eggert's avatar
Paul Eggert committed
1004
	    return NULL;
1005
	}
1006
      else
1007 1008
	{
	  if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
Paul Eggert's avatar
Paul Eggert committed
1009
	    return NULL;
1010 1011
        }
    }
Jim Blandy's avatar
Jim Blandy committed
1012 1013
  return *ptr;
}