gmalloc.c 57.6 KB
Newer Older
Karl Heuer's avatar
Karl Heuer committed
1
/* Declarations for `malloc' and friends.
Paul Eggert's avatar
Paul Eggert committed
2
   Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2019 Free
3
   Software Foundation, Inc.
Karl Heuer's avatar
Karl Heuer committed
4 5 6
		  Written May 1989 by Mike Haertel.

This library is free software; you can redistribute it and/or
7
modify it under the terms of the GNU General Public License as
Karl Heuer's avatar
Karl Heuer committed
8 9 10 11 12 13
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
General Public License for more details.
Karl Heuer's avatar
Karl Heuer committed
15

16
You should have received a copy of the GNU General Public
17
License along with this library.  If not, see <https://www.gnu.org/licenses/>.
Karl Heuer's avatar
Karl Heuer committed
18 19 20 21 22 23

   The author may be reached (Email) at the address mike@ai.mit.edu,
   or (US mail) as Mike Haertel c/o Free Software Foundation.  */

#include <config.h>

24
#if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 26 27
#define USE_PTHREAD
#endif

28
#include <stddef.h>
29
#include <stdlib.h>
Karl Heuer's avatar
Karl Heuer committed
30 31
#include <string.h>
#include <limits.h>
32
#include <stdint.h>
Karl Heuer's avatar
Karl Heuer committed
33 34
#include <unistd.h>

35 36 37 38
#ifdef USE_PTHREAD
#include <pthread.h>
#endif

39
#include "lisp.h"
40

41 42
#include "ptr-bounds.h"

43
#ifdef HAVE_MALLOC_H
44
# if GNUC_PREREQ (4, 2, 0)
45 46 47 48 49 50 51 52 53 54 55 56 57
#  pragma GCC diagnostic ignored "-Wdeprecated-declarations"
# endif
# include <malloc.h>
#endif
#ifndef __MALLOC_HOOK_VOLATILE
# define __MALLOC_HOOK_VOLATILE volatile
#endif
#ifndef HAVE_MALLOC_H
extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
extern void *(*__morecore) (ptrdiff_t);
#endif

58 59 60 61 62 63
/* If HYBRID_MALLOC is defined, then temacs will use malloc,
   realloc... as defined in this file (and renamed gmalloc,
   grealloc... via the macros that follow).  The dumped emacs,
   however, will use the system malloc, realloc....  In other source
   files, malloc, realloc... are renamed hybrid_malloc,
   hybrid_realloc... via macros in conf_post.h.  hybrid_malloc and
64
   friends are wrapper functions defined later in this file.  */
65 66 67
#undef malloc
#undef realloc
#undef calloc
68
#undef aligned_alloc
69 70 71
#undef free
#define malloc gmalloc
#define realloc grealloc
Wolfgang Jenkner's avatar
Wolfgang Jenkner committed
72
#define calloc gcalloc
73 74
#define aligned_alloc galigned_alloc
#define free gfree
75
#define malloc_info gmalloc_info
76

77
#ifdef HYBRID_MALLOC
78
# include "sheap.h"
79 80
#endif

Karl Heuer's avatar
Karl Heuer committed
81 82 83 84 85
#ifdef	__cplusplus
extern "C"
{
#endif

86 87 88
#ifdef HYBRID_MALLOC
#define extern static
#endif
Karl Heuer's avatar
Karl Heuer committed
89 90

/* Allocate SIZE bytes of memory.  */
91
extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
Karl Heuer's avatar
Karl Heuer committed
92
/* Re-allocate the previously allocated block
93
   in ptr, making the new block SIZE bytes long.  */
94
extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
Wolfgang Jenkner's avatar
Wolfgang Jenkner committed
95 96
/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
97
/* Free a block.  */
98
extern void free (void *ptr);
Karl Heuer's avatar
Karl Heuer committed
99 100

/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
101
extern void *aligned_alloc (size_t, size_t);
102
#ifdef MSDOS
103 104
extern void *memalign (size_t, size_t);
extern int posix_memalign (void **, size_t, size_t);
Karl Heuer's avatar
Karl Heuer committed
105 106 107 108 109 110 111
#endif

/* The allocator divides the heap into blocks of fixed size; large
   requests receive one or more whole blocks, and small requests
   receive a fragment of a block.  Fragment sizes are powers of two,
   and all fragments of a block are the same size.  When all the
   fragments in a block have been freed, the block itself is freed.  */
112
#define BLOCKLOG	(INT_WIDTH > 16 ? 12 : 9)
Karl Heuer's avatar
Karl Heuer committed
113 114 115 116 117
#define BLOCKSIZE	(1 << BLOCKLOG)
#define BLOCKIFY(SIZE)	(((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)

/* Determine the amount of memory spanned by the initial heap table
   (not an absolute limit).  */
118
#define HEAP		(INT_WIDTH > 16 ? 4194304 : 65536)
Karl Heuer's avatar
Karl Heuer committed
119 120 121 122 123 124 125 126 127 128 129

/* Number of contiguous free blocks allowed to build up at the end of
   memory before they will be returned to the system.  */
#define FINAL_FREE_BLOCKS	8

/* Data structure giving per-block information.  */
typedef union
  {
    /* Heap information for a busy block.  */
    struct
      {
130 131 132 133 134 135 136
	/* Zero for a block that is not one of ours (typically,
	   allocated by system malloc), positive for the log base 2 of
	   the fragment size of a fragmented block, -1 for the first
	   block of a multiblock object, and unspecified for later
	   blocks of that object.  Type-0 blocks can be present
	   because the system malloc can be invoked by library
	   functions in an undumped Emacs.  */
Karl Heuer's avatar
Karl Heuer committed
137 138 139 140 141
	int type;
	union
	  {
	    struct
	      {
142 143
		size_t nfree; /* Free frags in a fragmented block.  */
		size_t first; /* First free fragment of the block.  */
Karl Heuer's avatar
Karl Heuer committed
144 145
	      } frag;
	    /* For a large object, in its first block, this has the number
Paul Eggert's avatar
Paul Eggert committed
146
	       of blocks in the object.  */
147
	    ptrdiff_t size;
Karl Heuer's avatar
Karl Heuer committed
148 149 150 151 152 153
	  } info;
      } busy;
    /* Heap information for a free block
       (that may be the first of a free cluster).  */
    struct
      {
154 155 156
	size_t size;	/* Size (in blocks) of a free cluster.  */
	size_t next;	/* Index of next free cluster.  */
	size_t prev;	/* Index of previous free cluster.  */
Karl Heuer's avatar
Karl Heuer committed
157 158 159 160 161 162 163 164 165 166
      } free;
  } malloc_info;

/* Pointer to first block of the heap.  */
extern char *_heapbase;

/* Table indexed by block number giving per-block information.  */
extern malloc_info *_heapinfo;

/* Address to block number and vice versa.  */
167
#define BLOCK(A)	((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
168
#define ADDRESS(B)	((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
Karl Heuer's avatar
Karl Heuer committed
169 170

/* Current search index for the heap table.  */
171
extern size_t _heapindex;
Karl Heuer's avatar
Karl Heuer committed
172 173

/* Limit of valid info table indices.  */
174
extern size_t _heaplimit;
Karl Heuer's avatar
Karl Heuer committed
175 176 177 178 179 180 181 182 183

/* Doubly linked lists of free fragments.  */
struct list
  {
    struct list *next;
    struct list *prev;
  };

/* Free list headers for each fragment size.  */
Paul Eggert's avatar
Paul Eggert committed
184
static struct list _fraghead[BLOCKLOG];
Karl Heuer's avatar
Karl Heuer committed
185

186
/* List of blocks allocated with aligned_alloc and friends.  */
Karl Heuer's avatar
Karl Heuer committed
187 188 189
struct alignlist
  {
    struct alignlist *next;
190
    void *aligned;		/* The address that aligned_alloc returned.  */
191
    void *exact;		/* The address that malloc returned.  */
Karl Heuer's avatar
Karl Heuer committed
192 193 194 195
  };
extern struct alignlist *_aligned_blocks;

/* Instrumentation.  */
196 197 198 199
extern size_t _chunks_used;
extern size_t _bytes_used;
extern size_t _chunks_free;
extern size_t _bytes_free;
Karl Heuer's avatar
Karl Heuer committed
200 201 202

/* Internal versions of `malloc', `realloc', and `free'
   used when these functions need to call each other.
203 204
   They are the same but don't call the hooks
   and don't bound the resulting pointers.  */
205 206 207 208 209 210
extern void *_malloc_internal (size_t);
extern void *_realloc_internal (void *, size_t);
extern void _free_internal (void *);
extern void *_malloc_internal_nolock (size_t);
extern void *_realloc_internal_nolock (void *, size_t);
extern void _free_internal_nolock (void *);
Karl Heuer's avatar
Karl Heuer committed
211

212
#ifdef USE_PTHREAD
213
extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
extern int _malloc_thread_enabled_p;
#define LOCK()					\
  do {						\
    if (_malloc_thread_enabled_p)		\
      pthread_mutex_lock (&_malloc_mutex);	\
  } while (0)
#define UNLOCK()				\
  do {						\
    if (_malloc_thread_enabled_p)		\
      pthread_mutex_unlock (&_malloc_mutex);	\
  } while (0)
#define LOCK_ALIGNED_BLOCKS()				\
  do {							\
    if (_malloc_thread_enabled_p)			\
      pthread_mutex_lock (&_aligned_blocks_mutex);	\
  } while (0)
#define UNLOCK_ALIGNED_BLOCKS()				\
  do {							\
    if (_malloc_thread_enabled_p)			\
      pthread_mutex_unlock (&_aligned_blocks_mutex);	\
  } while (0)
235 236 237
#else
#define LOCK()
#define UNLOCK()
238 239
#define LOCK_ALIGNED_BLOCKS()
#define UNLOCK_ALIGNED_BLOCKS()
240 241
#endif

Karl Heuer's avatar
Karl Heuer committed
242 243 244
/* Nonzero if `malloc' has been called and done its initialization.  */
extern int __malloc_initialized;
/* Function called to initialize malloc data structures.  */
245
extern int __malloc_initialize (void);
Karl Heuer's avatar
Karl Heuer committed
246

247 248
#ifdef GC_MCHECK

Karl Heuer's avatar
Karl Heuer committed
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/* Return values for `mprobe': these are the kinds of inconsistencies that
   `mcheck' enables detection of.  */
enum mcheck_status
  {
    MCHECK_DISABLED = -1,	/* Consistency checking is not turned on.  */
    MCHECK_OK,			/* Block is fine.  */
    MCHECK_FREE,		/* Block freed twice.  */
    MCHECK_HEAD,		/* Memory before the block was clobbered.  */
    MCHECK_TAIL			/* Memory after the block was clobbered.  */
  };

/* Activate a standard collection of debugging hooks.  This must be called
   before `malloc' is ever called.  ABORTFUNC is called with an error code
   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
   null, the standard function prints on stderr and then calls `abort'.  */
264
extern int mcheck (void (*abortfunc) (enum mcheck_status));
Karl Heuer's avatar
Karl Heuer committed
265 266 267 268

/* Check for aberrations in a particular malloc'd block.  You must have
   called `mcheck' already.  These are the same checks that `mcheck' does
   when you free or reallocate a block.  */
269
extern enum mcheck_status mprobe (void *ptr);
Karl Heuer's avatar
Karl Heuer committed
270 271

/* Activate a standard collection of tracing hooks.  */
272 273
extern void mtrace (void);
extern void muntrace (void);
Karl Heuer's avatar
Karl Heuer committed
274 275 276 277

/* Statistics available to the user.  */
struct mstats
  {
278 279 280 281 282
    size_t bytes_total;	/* Total size of the heap. */
    size_t chunks_used;	/* Chunks allocated by the user. */
    size_t bytes_used;	/* Byte total of user-allocated chunks. */
    size_t chunks_free;	/* Chunks in the free list. */
    size_t bytes_free;	/* Byte total of chunks in the free list. */
Karl Heuer's avatar
Karl Heuer committed
283 284 285
  };

/* Pick up the current statistics. */
286
extern struct mstats mstats (void);
Karl Heuer's avatar
Karl Heuer committed
287

288 289
#endif

290 291
#undef extern

Karl Heuer's avatar
Karl Heuer committed
292 293 294 295 296 297 298 299 300
#ifdef	__cplusplus
}
#endif

/* Memory allocator `malloc'.
   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
		  Written May 1989 by Mike Haertel.

This library is free software; you can redistribute it and/or
301
modify it under the terms of the GNU General Public License as
Karl Heuer's avatar
Karl Heuer committed
302 303 304 305 306 307
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
308
General Public License for more details.
Karl Heuer's avatar
Karl Heuer committed
309

310
You should have received a copy of the GNU General Public
311
License along with this library.  If not, see <https://www.gnu.org/licenses/>.
Karl Heuer's avatar
Karl Heuer committed
312 313 314 315 316 317

   The author may be reached (Email) at the address mike@ai.mit.edu,
   or (US mail) as Mike Haertel c/o Free Software Foundation.  */

#include <errno.h>

318 319
/* Debugging hook for 'malloc'.  */
static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
Karl Heuer's avatar
Karl Heuer committed
320

321 322 323 324 325 326 327
/* Replacements for traditional glibc malloc hooks, for platforms that
   do not already have these hooks.  Platforms with these hooks all
   used relaxed ref/def, so it is OK to define them here too.  */
void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
void *(*__morecore) (ptrdiff_t);

328 329
#ifndef HYBRID_MALLOC

Karl Heuer's avatar
Karl Heuer committed
330 331 332 333 334 335 336
/* Pointer to the base of the first block.  */
char *_heapbase;

/* Block information table.  Allocated with align/__free (not malloc/free).  */
malloc_info *_heapinfo;

/* Search index in the info table.  */
337
size_t _heapindex;
Karl Heuer's avatar
Karl Heuer committed
338 339

/* Limit of valid info table indices.  */
340
size_t _heaplimit;
Karl Heuer's avatar
Karl Heuer committed
341 342

/* Instrumentation.  */
343 344 345 346
size_t _chunks_used;
size_t _bytes_used;
size_t _chunks_free;
size_t _bytes_free;
Karl Heuer's avatar
Karl Heuer committed
347 348 349 350

/* Are you experienced?  */
int __malloc_initialized;

351 352
#endif /* HYBRID_MALLOC */

353 354 355 356 357 358 359
/* Number of extra blocks to get each time we ask for more core.
   This reduces the frequency of calling `(*__morecore)'.  */
#if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
static
#endif
size_t __malloc_extra_blocks;

360 361 362
/* Number of info entries.  */
static size_t heapsize;

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE

/* Some code for hunting a bug writing into _heapinfo.

   Call this macro with argument PROT non-zero to protect internal
   malloc state against writing to it, call it with a zero argument to
   make it readable and writable.

   Note that this only works if BLOCKSIZE == page size, which is
   the case on the i386.  */

#include <sys/types.h>
#include <sys/mman.h>

static int state_protected_p;
378
static size_t last_state_size;
379 380 381
static malloc_info *last_heapinfo;

void
382
protect_malloc_state (int protect_p)
383 384 385 386 387 388 389 390 391 392
{
  /* If _heapinfo has been relocated, make sure its old location
     isn't left read-only; it will be reused by malloc.  */
  if (_heapinfo != last_heapinfo
      && last_heapinfo
      && state_protected_p)
    mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);

  last_state_size = _heaplimit * sizeof *_heapinfo;
  last_heapinfo   = _heapinfo;
393

394 395 396 397 398 399 400 401 402
  if (protect_p != state_protected_p)
    {
      state_protected_p = protect_p;
      if (mprotect (_heapinfo, last_state_size,
		    protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
	abort ();
    }
}

Juanma Barranquero's avatar
Juanma Barranquero committed
403
#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
404 405 406 407 408

#else
#define PROTECT_MALLOC_STATE(PROT)	/* empty */
#endif

Karl Heuer's avatar
Karl Heuer committed
409 410

/* Aligned allocation.  */
411 412
static void *
align (size_t size)
Karl Heuer's avatar
Karl Heuer committed
413
{
414 415
  void *result;
  ptrdiff_t adj;
Karl Heuer's avatar
Karl Heuer committed
416

417
  /* align accepts an unsigned argument, but __morecore accepts a
418 419
     signed one.  This could lead to trouble if SIZE overflows the
     ptrdiff_t type accepted by __morecore.  We just punt in that
420
     case, since they are requesting a ludicrous amount anyway.  */
421
  if (PTRDIFF_MAX < size)
422 423 424
    result = 0;
  else
    result = (*__morecore) (size);
425
  adj = (uintptr_t) result % BLOCKSIZE;
Karl Heuer's avatar
Karl Heuer committed
426 427 428
  if (adj != 0)
    {
      adj = BLOCKSIZE - adj;
429
      (*__morecore) (adj);
Karl Heuer's avatar
Karl Heuer committed
430 431 432 433 434 435 436 437 438 439 440 441
      result = (char *) result + adj;
    }

  if (__after_morecore_hook)
    (*__after_morecore_hook) ();

  return result;
}

/* Get SIZE bytes, if we can get them starting at END.
   Return the address of the space we got.
   If we cannot get space at END, fail and return 0.  */
442 443
static void *
get_contiguous_space (ptrdiff_t size, void *position)
Karl Heuer's avatar
Karl Heuer committed
444
{
445 446
  void *before;
  void *after;
Karl Heuer's avatar
Karl Heuer committed
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472

  before = (*__morecore) (0);
  /* If we can tell in advance that the break is at the wrong place,
     fail now.  */
  if (before != position)
    return 0;

  /* Allocate SIZE bytes and get the address of them.  */
  after = (*__morecore) (size);
  if (!after)
    return 0;

  /* It was not contiguous--reject it.  */
  if (after != position)
    {
      (*__morecore) (- size);
      return 0;
    }

  return after;
}


/* This is called when `_heapinfo' and `heapsize' have just
   been set to describe a new info table.  Set up the table
   to describe itself and account for it in the statistics.  */
473
static void
Paul Eggert's avatar
Paul Eggert committed
474
register_heapinfo (void)
Karl Heuer's avatar
Karl Heuer committed
475
{
476
  size_t block, blocks;
Karl Heuer's avatar
Karl Heuer committed
477 478 479 480 481 482 483 484 485

  block = BLOCK (_heapinfo);
  blocks = BLOCKIFY (heapsize * sizeof (malloc_info));

  /* Account for the _heapinfo block itself in the statistics.  */
  _bytes_used += blocks * BLOCKSIZE;
  ++_chunks_used;

  /* Describe the heapinfo block itself in the heapinfo.  */
486
  _heapinfo[block].busy.type = -1;
Karl Heuer's avatar
Karl Heuer committed
487 488 489
  _heapinfo[block].busy.info.size = blocks;
}

490
#ifdef USE_PTHREAD
491 492
pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
493 494 495
int _malloc_thread_enabled_p;

static void
496
malloc_atfork_handler_prepare (void)
497 498 499 500 501 502
{
  LOCK ();
  LOCK_ALIGNED_BLOCKS ();
}

static void
503
malloc_atfork_handler_parent (void)
504 505 506 507 508 509
{
  UNLOCK_ALIGNED_BLOCKS ();
  UNLOCK ();
}

static void
510
malloc_atfork_handler_child (void)
511 512 513 514 515 516 517
{
  UNLOCK_ALIGNED_BLOCKS ();
  UNLOCK ();
}

/* Set up mutexes and make malloc etc. thread-safe.  */
void
518
malloc_enable_thread (void)
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
{
  if (_malloc_thread_enabled_p)
    return;

  /* Some pthread implementations call malloc for statically
     initialized mutexes when they are used first.  To avoid such a
     situation, we initialize mutexes here while their use is
     disabled in malloc etc.  */
  pthread_mutex_init (&_malloc_mutex, NULL);
  pthread_mutex_init (&_aligned_blocks_mutex, NULL);
  pthread_atfork (malloc_atfork_handler_prepare,
		  malloc_atfork_handler_parent,
		  malloc_atfork_handler_child);
  _malloc_thread_enabled_p = 1;
}
534
#endif	/* USE_PTHREAD */
Karl Heuer's avatar
Karl Heuer committed
535

536
static void
537
malloc_initialize_1 (void)
538
{
539 540 541 542
#ifdef GC_MCHECK
  mcheck (NULL);
#endif

Karl Heuer's avatar
Karl Heuer committed
543 544 545 546
  if (__malloc_initialize_hook)
    (*__malloc_initialize_hook) ();

  heapsize = HEAP / BLOCKSIZE;
547
  _heapinfo = align (heapsize * sizeof (malloc_info));
Karl Heuer's avatar
Karl Heuer committed
548
  if (_heapinfo == NULL)
549
    return;
Karl Heuer's avatar
Karl Heuer committed
550 551 552 553
  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
  _heapinfo[0].free.size = 0;
  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
  _heapindex = 0;
554
  _heapbase = (char *) ptr_bounds_init (_heapinfo);
Karl Heuer's avatar
Karl Heuer committed
555 556 557 558 559
  _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));

  register_heapinfo ();

  __malloc_initialized = 1;
560
  PROTECT_MALLOC_STATE (1);
561 562 563
  return;
}

564 565 566
/* Set everything up and remember that we have.
   main will call malloc which calls this function.  That is before any threads
   or signal handlers has been set up, so we don't need thread protection.  */
567
int
568
__malloc_initialize (void)
569 570 571 572 573 574 575
{
  if (__malloc_initialized)
    return 0;

  malloc_initialize_1 ();

  return __malloc_initialized;
Karl Heuer's avatar
Karl Heuer committed
576 577 578 579 580 581
}

static int morecore_recursing;

/* Get neatly aligned memory, initializing or
   growing the heap info table as necessary. */
582 583
static void *
morecore_nolock (size_t size)
Karl Heuer's avatar
Karl Heuer committed
584
{
585
  void *result;
Karl Heuer's avatar
Karl Heuer committed
586
  malloc_info *newinfo, *oldinfo;
587
  size_t newsize;
Karl Heuer's avatar
Karl Heuer committed
588 589 590 591 592 593 594 595 596

  if (morecore_recursing)
    /* Avoid recursion.  The caller will know how to handle a null return.  */
    return NULL;

  result = align (size);
  if (result == NULL)
    return NULL;

597 598
  PROTECT_MALLOC_STATE (0);

Karl Heuer's avatar
Karl Heuer committed
599
  /* Check if we need to grow the info table.  */
600
  if (heapsize < BLOCK ((char *) result + size))
Karl Heuer's avatar
Karl Heuer committed
601 602 603 604 605 606 607 608
    {
      /* Calculate the new _heapinfo table size.  We do not account for the
	 added blocks in the table itself, as we hope to place them in
	 existing free space, which is already covered by part of the
	 existing table.  */
      newsize = heapsize;
      do
	newsize *= 2;
609
      while (newsize < BLOCK ((char *) result + size));
Karl Heuer's avatar
Karl Heuer committed
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

      /* We must not reuse existing core for the new info table when called
	 from realloc in the case of growing a large block, because the
	 block being grown is momentarily marked as free.  In this case
	 _heaplimit is zero so we know not to reuse space for internal
	 allocation.  */
      if (_heaplimit != 0)
	{
	  /* First try to allocate the new info table in core we already
	     have, in the usual way using realloc.  If realloc cannot
	     extend it in place or relocate it to existing sufficient core,
	     we will get called again, and the code above will notice the
	     `morecore_recursing' flag and return null.  */
	  int save = errno;	/* Don't want to clobber errno with ENOMEM.  */
	  morecore_recursing = 1;
625 626
	  newinfo = _realloc_internal_nolock (_heapinfo,
					      newsize * sizeof (malloc_info));
Karl Heuer's avatar
Karl Heuer committed
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	  morecore_recursing = 0;
	  if (newinfo == NULL)
	    errno = save;
	  else
	    {
	      /* We found some space in core, and realloc has put the old
		 table's blocks on the free list.  Now zero the new part
		 of the table and install the new table location.  */
	      memset (&newinfo[heapsize], 0,
		      (newsize - heapsize) * sizeof (malloc_info));
	      _heapinfo = newinfo;
	      heapsize = newsize;
	      goto got_heap;
	    }
	}

      /* Allocate new space for the malloc info table.  */
      while (1)
  	{
646
 	  newinfo = align (newsize * sizeof (malloc_info));
Karl Heuer's avatar
Karl Heuer committed
647 648 649 650 651 652 653 654 655 656

 	  /* Did it fail?  */
 	  if (newinfo == NULL)
 	    {
 	      (*__morecore) (-size);
 	      return NULL;
 	    }

 	  /* Is it big enough to record status for its own space?
 	     If so, we win.  */
657
	  if (BLOCK ((char *) newinfo + newsize * sizeof (malloc_info))
Karl Heuer's avatar
Karl Heuer committed
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
 	      < newsize)
 	    break;

 	  /* Must try again.  First give back most of what we just got.  */
 	  (*__morecore) (- newsize * sizeof (malloc_info));
 	  newsize *= 2;
  	}

      /* Copy the old table to the beginning of the new,
	 and zero the rest of the new table.  */
      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
      memset (&newinfo[heapsize], 0,
	      (newsize - heapsize) * sizeof (malloc_info));
      oldinfo = _heapinfo;
      _heapinfo = newinfo;
      heapsize = newsize;

      register_heapinfo ();

      /* Reset _heaplimit so _free_internal never decides
	 it can relocate or resize the info table.  */
      _heaplimit = 0;
680
      _free_internal_nolock (oldinfo);
681
      PROTECT_MALLOC_STATE (0);
Karl Heuer's avatar
Karl Heuer committed
682 683 684 685 686 687 688 689 690 691 692 693

      /* The new heap limit includes the new table just allocated.  */
      _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
      return result;
    }

 got_heap:
  _heaplimit = BLOCK ((char *) result + size);
  return result;
}

/* Allocate memory from the heap.  */
694 695
void *
_malloc_internal_nolock (size_t size)
Karl Heuer's avatar
Karl Heuer committed
696
{
697 698 699
  void *result;
  size_t block, blocks, lastblocks, start;
  register size_t i;
Karl Heuer's avatar
Karl Heuer committed
700 701 702 703 704 705 706 707 708 709 710 711 712 713
  struct list *next;

  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
     valid address you can realloc and free (though not dereference).

     It turns out that some extant code (sunrpc, at least Ultrix's version)
     expects `malloc (0)' to return non-NULL and breaks otherwise.
     Be compatible.  */

#if	0
  if (size == 0)
    return NULL;
#endif

714 715
  PROTECT_MALLOC_STATE (0);

Karl Heuer's avatar
Karl Heuer committed
716 717 718 719 720 721 722 723
  if (size < sizeof (struct list))
    size = sizeof (struct list);

  /* Determine the allocation policy based on the request size.  */
  if (size <= BLOCKSIZE / 2)
    {
      /* Small allocation to receive a fragment of a block.
	 Determine the logarithm to base two of the fragment size. */
724
      register size_t log = 1;
Karl Heuer's avatar
Karl Heuer committed
725 726 727 728 729 730 731 732 733 734 735 736
      --size;
      while ((size /= 2) != 0)
	++log;

      /* Look in the fragment lists for a
	 free fragment of the desired size. */
      next = _fraghead[log].next;
      if (next != NULL)
	{
	  /* There are free fragments of this size.
	     Pop a fragment out of the fragment list and return it.
	     Update the block's nfree and first counters. */
737
	  result = next;
Karl Heuer's avatar
Karl Heuer committed
738 739 740 741 742
	  next->prev->next = next->next;
	  if (next->next != NULL)
	    next->next->prev = next->prev;
	  block = BLOCK (result);
	  if (--_heapinfo[block].busy.info.frag.nfree != 0)
743 744
	    _heapinfo[block].busy.info.frag.first =
	      (uintptr_t) next->next % BLOCKSIZE >> log;
Karl Heuer's avatar
Karl Heuer committed
745 746 747 748 749 750 751 752 753 754 755

	  /* Update the statistics.  */
	  ++_chunks_used;
	  _bytes_used += 1 << log;
	  --_chunks_free;
	  _bytes_free -= 1 << log;
	}
      else
	{
	  /* No free fragments of the desired size, so get a new block
	     and break it into fragments, returning the first.  */
756
#ifdef GC_MALLOC_CHECK
757
	  result = _malloc_internal_nolock (BLOCKSIZE);
758
	  PROTECT_MALLOC_STATE (0);
759 760
#elif defined (USE_PTHREAD)
	  result = _malloc_internal_nolock (BLOCKSIZE);
761
#else
Karl Heuer's avatar
Karl Heuer committed
762
	  result = malloc (BLOCKSIZE);
763
#endif
Karl Heuer's avatar
Karl Heuer committed
764
	  if (result == NULL)
765 766
	    {
	      PROTECT_MALLOC_STATE (1);
767
	      goto out;
768
	    }
Karl Heuer's avatar
Karl Heuer committed
769 770 771 772 773 774 775

	  /* Link all fragments but the first into the free list.  */
	  next = (struct list *) ((char *) result + (1 << log));
	  next->next = NULL;
	  next->prev = &_fraghead[log];
	  _fraghead[log].next = next;

776
	  for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
Karl Heuer's avatar
Karl Heuer committed
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	    {
	      next = (struct list *) ((char *) result + (i << log));
	      next->next = _fraghead[log].next;
	      next->prev = &_fraghead[log];
	      next->prev->next = next;
	      next->next->prev = next;
	    }

	  /* Initialize the nfree and first counters for this block.  */
	  block = BLOCK (result);
	  _heapinfo[block].busy.type = log;
	  _heapinfo[block].busy.info.frag.nfree = i - 1;
	  _heapinfo[block].busy.info.frag.first = i - 1;

	  _chunks_free += (BLOCKSIZE >> log) - 1;
	  _bytes_free += BLOCKSIZE - (1 << log);
	  _bytes_used -= BLOCKSIZE - (1 << log);
	}
    }
  else
    {
      /* Large allocation to receive one or more blocks.
	 Search the free list in a circle starting at the last place visited.
	 If we loop completely around without finding a large enough
	 space we will have to get more memory from the system.  */
      blocks = BLOCKIFY (size);
      start = block = _heapindex;
      while (_heapinfo[block].free.size < blocks)
	{
	  block = _heapinfo[block].free.next;
	  if (block == start)
	    {
	      /* Need to get more from the system.  Get a little extra.  */
810
	      size_t wantblocks = blocks + __malloc_extra_blocks;
Karl Heuer's avatar
Karl Heuer committed
811 812 813 814 815 816
	      block = _heapinfo[0].free.prev;
	      lastblocks = _heapinfo[block].free.size;
	      /* Check to see if the new core will be contiguous with the
		 final free block; if so we don't need to get as much.  */
	      if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
		  /* We can't do this if we will have to make the heap info
Glenn Morris's avatar
Glenn Morris committed
817
                     table bigger to accommodate the new space.  */
Karl Heuer's avatar
Karl Heuer committed
818 819 820 821 822 823 824 825 826 827 828 829 830
		  block + wantblocks <= heapsize &&
		  get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
					ADDRESS (block + lastblocks)))
		{
 		  /* We got it contiguously.  Which block we are extending
		     (the `final free block' referred to above) might have
		     changed, if it got combined with a freed info table.  */
 		  block = _heapinfo[0].free.prev;
  		  _heapinfo[block].free.size += (wantblocks - lastblocks);
		  _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
 		  _heaplimit += wantblocks - lastblocks;
		  continue;
		}
831
	      result = morecore_nolock (wantblocks * BLOCKSIZE);
Karl Heuer's avatar
Karl Heuer committed
832
	      if (result == NULL)
833
		goto out;
Karl Heuer's avatar
Karl Heuer committed
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
	      block = BLOCK (result);
	      /* Put the new block at the end of the free list.  */
	      _heapinfo[block].free.size = wantblocks;
	      _heapinfo[block].free.prev = _heapinfo[0].free.prev;
	      _heapinfo[block].free.next = 0;
	      _heapinfo[0].free.prev = block;
	      _heapinfo[_heapinfo[block].free.prev].free.next = block;
	      ++_chunks_free;
	      /* Now loop to use some of that block for this allocation.  */
	    }
	}

      /* At this point we have found a suitable free list entry.
	 Figure out how to remove what we need from the list. */
      result = ADDRESS (block);
      if (_heapinfo[block].free.size > blocks)
	{
	  /* The block we found has a bit left over,
	     so relink the tail end back into the free list. */
	  _heapinfo[block + blocks].free.size
	    = _heapinfo[block].free.size - blocks;
	  _heapinfo[block + blocks].free.next
	    = _heapinfo[block].free.next;
	  _heapinfo[block + blocks].free.prev
	    = _heapinfo[block].free.prev;
	  _heapinfo[_heapinfo[block].free.prev].free.next
	    = _heapinfo[_heapinfo[block].free.next].free.prev
	    = _heapindex = block + blocks;
	}
      else
	{
	  /* The block exactly matches our requirements,
	     so just remove it from the list. */
	  _heapinfo[_heapinfo[block].free.next].free.prev
	    = _heapinfo[block].free.prev;
	  _heapinfo[_heapinfo[block].free.prev].free.next
	    = _heapindex = _heapinfo[block].free.next;
	  --_chunks_free;
	}

874
      _heapinfo[block].busy.type = -1;
Karl Heuer's avatar
Karl Heuer committed
875 876 877 878 879 880
      _heapinfo[block].busy.info.size = blocks;
      ++_chunks_used;
      _bytes_used += blocks * BLOCKSIZE;
      _bytes_free -= blocks * BLOCKSIZE;
    }

881
  PROTECT_MALLOC_STATE (1);
882
 out:
883 884 885
  return result;
}

886 887
void *
_malloc_internal (size_t size)
888
{
889
  void *result;
890 891 892

  LOCK ();
  result = _malloc_internal_nolock (size);
893
  UNLOCK ();
894

Karl Heuer's avatar
Karl Heuer committed
895 896 897
  return result;
}

898 899
void *
malloc (size_t size)
Karl Heuer's avatar
Karl Heuer committed
900
{
901
  void *(*hook) (size_t);
902

Karl Heuer's avatar
Karl Heuer committed
903 904 905
  if (!__malloc_initialized && !__malloc_initialize ())
    return NULL;

906 907
  /* Copy the value of gmalloc_hook to an automatic variable in case
     gmalloc_hook is modified in another thread between its
908 909 910 911 912
     NULL-check and the use.

     Note: Strictly speaking, this is not a right solution.  We should
     use mutexes to access non-read-only variables that are shared
     among multiple threads.  We just leave it for compatibility with
913 914
     glibc malloc (i.e., assignments to gmalloc_hook) for now.  */
  hook = gmalloc_hook;
915 916
  void *result = (hook ? hook : _malloc_internal) (size);
  return ptr_bounds_clip (result, size);
Karl Heuer's avatar
Karl Heuer committed
917 918
}

919
#if !(defined (_LIBC) || defined (HYBRID_MALLOC))
Karl Heuer's avatar
Karl Heuer committed
920 921 922 923

/* On some ANSI C systems, some libc functions call _malloc, _free
   and _realloc.  Make them use the GNU functions.  */

924 925 926 927 928 929
extern void *_malloc (size_t);
extern void _free (void *);
extern void *_realloc (void *, size_t);

void *
_malloc (size_t size)
Karl Heuer's avatar
Karl Heuer committed
930 931 932 933 934
{
  return malloc (size);
}

void
935
_free (void *ptr)
Karl Heuer's avatar
Karl Heuer committed
936 937 938 939
{
  free (ptr);
}

940 941
void *
_realloc (void *ptr, size_t size)
Karl Heuer's avatar
Karl Heuer committed
942 943 944 945 946 947 948 949 950 951
{
  return realloc (ptr, size);
}

#endif
/* Free a block of memory allocated by `malloc'.
   Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
		  Written May 1989 by Mike Haertel.

This library is free software; you can redistribute it and/or
952
modify it under the terms of the GNU General Public License as
Karl Heuer's avatar
Karl Heuer committed
953 954 955 956 957 958
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.

This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
959
General Public License for more details.
Karl Heuer's avatar
Karl Heuer committed
960

961
You should have received a copy of the GNU General Public
962
License along with this library.  If not, see <https://www.gnu.org/licenses/>.
Karl Heuer's avatar
Karl Heuer committed
963 964 965 966

   The author may be reached (Email) at the address mike@ai.mit.edu,
   or (US mail) as Mike Haertel c/o Free Software Foundation.  */

967 968
/* Debugging hook for free.  */
static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
Karl Heuer's avatar
Karl Heuer committed
969

970
#ifndef HYBRID_MALLOC
Karl Heuer's avatar
Karl Heuer committed
971

972
/* List of blocks allocated by aligned_alloc.  */
Karl Heuer's avatar
Karl Heuer committed
973
struct alignlist *_aligned_blocks = NULL;
974
#endif
Karl Heuer's avatar
Karl Heuer committed
975 976

/* Return memory to the heap.
977
   Like `_free_internal' but don't lock mutex.  */
Karl Heuer's avatar
Karl Heuer committed
978
void
979
_free_internal_nolock (void *ptr)
Karl Heuer's avatar
Karl Heuer committed
980 981
{
  int type;
982 983
  size_t block, blocks;
  register size_t i;
Karl Heuer's avatar
Karl Heuer committed
984
  struct list *prev, *next;
985 986
  void *curbrk;
  const size_t lesscore_threshold
Karl Heuer's avatar
Karl Heuer committed
987 988 989 990 991 992 993
    /* Threshold of free space at which we will return some to the system.  */
    = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;

  register struct alignlist *l;

  if (ptr == NULL)
    return;
994
  ptr = ptr_bounds_init (ptr);
Karl Heuer's avatar
Karl Heuer committed
995

996
  PROTECT_MALLOC_STATE (0);
997

998
  LOCK_ALIGNED_BLOCKS ();
Karl Heuer's avatar
Karl Heuer committed
999 1000 1001 1002 1003 1004 1005
  for (l = _aligned_blocks; l != NULL; l = l->next)
    if (l->aligned == ptr)
      {
	l->aligned = NULL;	/* Mark the slot in the list as free.  */
	ptr = l->exact;
	break;
      }
1006
  UNLOCK_ALIGNED_BLOCKS ();
Karl Heuer's avatar
Karl Heuer committed
1007 1008 1009 1010 1011 1012

  block = BLOCK (ptr);

  type = _heapinfo[block].busy.type;
  switch (type)
    {
1013
    case -1:
Karl Heuer's avatar
Karl Heuer committed
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
      /* Get as many statistics as early as we can.  */
      --_chunks_used;
      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;

      /* Find the free cluster previous to this one in the free list.
	 Start searching at the last block referenced; this may benefit
	 programs with locality of allocation.  */
      i = _heapindex;
      if (i > block)
	while (i > block)
	  i = _heapinfo[i].free.prev;
      else
	{
	  do
	    i = _heapinfo[i].free.next;
	  while (i > 0 && i < block);
	  i = _heapinfo[i].free.prev;
	}

      /* Determine how to link this block into the free list.  */
      if (block == i + _heapinfo[i].free.size)
	{
	  /* Coalesce this block with its predecessor.  */
	  _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
	  block = i;
	}
      else
	{
	  /* Really link this block back into the free list.  */
	  _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
	  _heapinfo[block].free.next = _heapinfo[i].free.next;
	  _heapinfo[block].free.prev = i;
	  _heapinfo[i].free.next = block;
	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
	  ++_chunks_free;
	}

      /* Now that the block is linked in, see if we can coalesce it
	 with its successor (by deleting its successor from the list
	 and adding in its size).  */
      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
	{
	  _heapinfo[block].free.size
	    += _heapinfo[_heapinfo[block].free.next].free.size;
	  _heapinfo[block].free.next
	    = _heapinfo[_heapinfo[block].free.next].free.next;
	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
	  --_chunks_free;
	}

      /* How many trailing free blocks are there now?  */
      blocks = _heapinfo[block].free.size;

      /* Where is the current end of accessible core?  */
      curbrk = (*__morecore) (0);

      if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
	{
	  /* The end of the malloc heap is at the end of accessible core.
	     It's possible that moving _heapinfo will allow us to
	     return some space to the system.  */

1077 1078 1079 1080 1081 1082
 	  size_t info_block = BLOCK (_heapinfo);
 	  size_t info_blocks = _heapinfo[info_block].busy.info.size;
 	  size_t prev_block = _heapinfo[block].free.prev;
 	  size_t prev_blocks = _heapinfo[prev_block].free.size;
 	  size_t next_block = _heapinfo[block].free.next;
 	  size_t next_blocks = _heapinfo[next_block].free.size;
Karl Heuer's avatar
Karl Heuer committed
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

	  if (/* Win if this block being freed is last in core, the info table
		 is just before it, the previous free block is just before the
		 info table, and the two free blocks together form a useful
		 amount to return to the system.  */
	      (block + blocks == _heaplimit &&
	       info_block + info_blocks == block &&
	       prev_block != 0 && prev_block + prev_blocks == info_block &&
	       blocks + prev_blocks >= lesscore_threshold) ||
	      /* Nope, not the case.  We can also win if this block being
		 freed is just before the info table, and the table extends
		 to the end of core or is followed only by a free block,
		 and the total free space is worth returning to the system.  */
	      (block + blocks == info_block &&
	       ((info_block + info_blocks == _heaplimit &&
		 blocks >= lesscore_threshold) ||
		(info_block + info_blocks == next_block &&
		 next_block + next_blocks == _heaplimit &&
		 blocks + next_blocks >= lesscore_threshold)))
	      )
	    {
	      malloc_info *newinfo;
1105
	      size_t oldlimit = _heaplimit;
Karl Heuer's avatar
Karl Heuer committed
1106 1107 1108 1109 1110 1111

	      /* Free the old info table, clearing _heaplimit to avoid
		 recursion into this code.  We don't want to return the
		 table's blocks to the system before we have copied them to
		 the new location.  */
	      _heaplimit = 0;
1112
	      _free_internal_nolock (_heapinfo);
Karl Heuer's avatar
Karl Heuer committed
1113 1114 1115 1116 1117 1118 1119
	      _heaplimit = oldlimit;

	      /* Tell malloc to search from the beginning of the heap for
		 free blocks, so it doesn't reuse the ones just freed.  */
	      _heapindex = 0;

	      /* Allocate new space for the info table and move its data.  */
1120
	      newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1121
	      PROTECT_MALLOC_STATE (0);
Karl Heuer's avatar
Karl Heuer committed
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	      memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
	      _heapinfo = newinfo;

	      /* We should now have coalesced the free block with the
		 blocks freed from the old info table.  Examine the entire
		 trailing free block to decide below whether to return some
		 to the system.  */
	      block = _heapinfo[0].free.prev;
	      blocks = _heapinfo[block].free.size;
 	    }

	  /* Now see if we can return stuff to the system.  */
	  if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
	    {
1136
	      register size_t bytes = blocks * BLOCKSIZE;
Karl Heuer's avatar
Karl Heuer committed
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	      _heaplimit -= blocks;
	      (*__morecore) (-bytes);
	      _heapinfo[_heapinfo[block].free.prev].free.next
		= _heapinfo[block].free.next;
	      _heapinfo[_heapinfo[block].free.next].free.prev
		= _heapinfo[block].free.prev;
	      block = _heapinfo[block].free.prev;
	      --_chunks_free;
	      _bytes_free -= bytes;
	    }
	}

      /* Set the next search to begin at this block.  */
      _heapindex = block;
      break;

    default:
      /* Do some of the statistics.  */
      --_chunks_used;
      _bytes_used -= 1 << type;
      ++_chunks_free;
      _bytes_free += 1 << type;

      /* Get the address of the first free fragment in this block.  */
      prev = (struct list *) ((char *) ADDRESS (block) +
			      (_heapinfo[block].busy.info.frag.first << type));

      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
	{
	  /* If all fragments of this block are free, remove them
	     from the fragment list and free the whole block.  */
	  next = prev;
1169
	  for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
Karl Heuer's avatar
Karl Heuer committed
1170 1171 1172 1173
	    next = next->next;
	  prev->prev->next = next;
	  if (next != NULL)
	    next->prev = prev->prev;
1174
	  _heapinfo[block].busy.type = -1;
Karl Heuer's avatar
Karl Heuer committed
1175 1176 1177 1178 1179 1180 1181 1182
	  _heapinfo[block].busy.info.size = 1;

	  /* Keep the statistics accurate.  */
	  ++_chunks_used;
	  _bytes_used += BLOCKSIZE;
	  _chunks_free -= BLOCKSIZE >> type;
	  _bytes_free -= BLOCKSIZE;

1183 1184
#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
	  _free_internal_nolock (ADDRESS (block));
1185
#else
Karl Heuer's avatar
Karl Heuer committed
1186
	  free (ADDRESS (block));
1187
#endif
Karl Heuer's avatar
Karl Heuer committed
1188 1189 1190 1191 1192 1193
	}
      else if (_heapinfo[block].busy.info.frag.nfree != 0)
	{
	  /* If some fragments of this block are free, link this
	     fragment into the fragment list after the first free
	     fragment of this block. */
1194
	  next = ptr;
Karl Heuer's avatar
Karl Heuer committed
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	  next->next = prev->next;
	  next->prev = prev;
	  prev->next = next;
	  if (next->next != NULL)
	    next->next->prev = next;
	  ++_heapinfo[block].busy.info.frag.nfree;
	}
      else
	{
	  /* No fragments of this block are free, so link this
	     fragment into the fragment list and announce that
	     it is the first free fragment of this block. */
1207
	  prev = ptr;
Karl Heuer's avatar
Karl Heuer committed
1208
	  _heapinfo[block].busy.info.frag.nfree = 1;
1209 1210
	  _heapinfo[block].busy.info.frag.first =
	    (uintptr_t) ptr % BLOCKSIZE >> type;
Karl Heuer's avatar
Karl Heuer committed
1211 1212 1213 1214 1215 1216 1217 1218
	  prev->next = _fraghead[type].next;
	  prev->prev = &_fraghead[type];
	  prev->prev->next = prev;
	  if (prev->next != NULL)
	    prev->next->prev = prev;
	}
      break;
    }
1219

1220
  PROTECT_MALLOC_STATE (1);
1221 1222 1223
}

/* Return memory to the heap.
1224
   Like 'free' but don't call a hook if there is one.  */
1225
void
1226
_free_internal (void *ptr)
1227 1228 1229
{
  LOCK ();
  _free_internal_nolock (ptr);
1230
  UNLOCK ();
Karl Heuer's avatar
Karl Heuer committed
1231 1232 1233
}

/* Return memory to the heap.  */
1234

1235
void
1236
free (void *ptr)
Karl Heuer's avatar
Karl Heuer committed
1237
{
1238
  void (*hook) (void *) = gfree_hook;
1239 1240 1241

  if (hook != NULL)
    (*hook) (ptr);
Karl Heuer's avatar
Karl Heuer committed
1242 1243 1244 1245
  else
    _free_internal (ptr);
}

1246
#ifndef HYBRID_MALLOC
Karl Heuer's avatar
Karl Heuer committed
1247 1248 1249 1250 1251
/* Define the `cfree' alias for `free'.  */
#ifdef weak_alias
weak_alias (free, cfree)
#else
void
1252
cfree (void *ptr)
Karl Heuer's avatar
Karl Heuer committed
1253 1254 1255 1256
{
  free (ptr);
}
#endif