w32heap.c 26.5 KB
Newer Older
1
/* Heap management routines for GNU Emacs on the Microsoft Windows API.
Paul Eggert's avatar
Paul Eggert committed
2
   Copyright (C) 1994, 2001-2020 Free Software Foundation, Inc.
Richard M. Stallman's avatar
Richard M. Stallman committed
3

4
   This file is part of GNU Emacs.
Richard M. Stallman's avatar
Richard M. Stallman committed
5

6 7 8 9
   GNU Emacs is free software: you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation, either version 3 of the License, or
   (at your option) any later version.
Richard M. Stallman's avatar
Richard M. Stallman committed
10

11 12 13 14
   GNU Emacs is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
Richard M. Stallman's avatar
Richard M. Stallman committed
15

16
   You should have received a copy of the GNU General Public License
Paul Eggert's avatar
Paul Eggert committed
17
   along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>. */
Richard M. Stallman's avatar
Richard M. Stallman committed
18

19
/*
20
  Geoff Voelker (voelker@cs.washington.edu)                          7-29-94
Richard M. Stallman's avatar
Richard M. Stallman committed
21 22
*/

23 24 25 26 27 28 29 30
/*
  Heavily modified by Fabrice Popineau (fabrice.popineau@gmail.com) 28-02-2014
*/

/*
  Memory allocation scheme for w32/w64:

  - Buffers are mmap'ed using a very simple emulation of mmap/munmap
31
  - During the temacs phase, if unexec is to be used:
32 33 34 35 36 37 38 39
    * we use a private heap declared to be stored into the `dumped_data'
    * unfortunately, this heap cannot be made growable, so the size of
      blocks it can allocate is limited to (0x80000 - pagesize)
    * the blocks that are larger than this are allocated from the end
      of the `dumped_data' array; there are not so many of them.
      We use a very simple first-fit scheme to reuse those blocks.
    * we check that the private heap does not cross the area used
      by the bigger chunks.
40
  - During the emacs phase, or always if pdumper is used:
41 42 43 44 45 46 47
    * we create a private heap for new memory blocks
    * we make sure that we never free a block that has been dumped.
      Freeing a dumped block could work in principle, but may prove
      unreliable if we distribute binaries of emacs.exe: MS does not
      guarantee that the heap data structures are the same across all
      versions of their OS, even though the API is available since XP.  */

Pavel Janík's avatar
Pavel Janík committed
48
#include <config.h>
Richard M. Stallman's avatar
Richard M. Stallman committed
49
#include <stdio.h>
50
#include <errno.h>
Richard M. Stallman's avatar
Richard M. Stallman committed
51

52
#include <sys/mman.h>
53
#include <sys/resource.h>
Daniel Colascione's avatar
Daniel Colascione committed
54
#include "w32common.h"
Geoff Voelker's avatar
Geoff Voelker committed
55
#include "w32heap.h"
56
#include "lisp.h"
57
#include "w32.h"	/* for FD_SETSIZE */
Richard M. Stallman's avatar
Richard M. Stallman committed
58

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/* We chose to leave those declarations here.  They are used only in
   this file.  The RtlCreateHeap is available since XP.  It is located
   in ntdll.dll and is available with the DDK.  People often
   complained that HeapCreate doesn't offer the ability to create a
   heap at a given place, which we need here, and which RtlCreateHeap
   provides.  We reproduce here the definitions available with the
   DDK.  */

typedef PVOID (WINAPI * RtlCreateHeap_Proc) (
                                             /* _In_ */      ULONG Flags,
                                             /* _In_opt_ */  PVOID HeapBase,
                                             /* _In_opt_ */  SIZE_T ReserveSize,
                                             /* _In_opt_ */  SIZE_T CommitSize,
                                             /* _In_opt_ */  PVOID Lock,
                                             /* _In_opt_ */  PVOID Parameters
                                             );

typedef LONG NTSTATUS;

78 79 80 81 82
typedef NTSTATUS (NTAPI *PRTL_HEAP_COMMIT_ROUTINE) (
						    IN PVOID Base,
						    IN OUT PVOID *CommitAddress,
						    IN OUT PSIZE_T CommitSize
						    );
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

typedef struct _RTL_HEAP_PARAMETERS {
  ULONG Length;
  SIZE_T SegmentReserve;
  SIZE_T SegmentCommit;
  SIZE_T DeCommitFreeBlockThreshold;
  SIZE_T DeCommitTotalFreeThreshold;
  SIZE_T MaximumAllocationSize;
  SIZE_T VirtualMemoryThreshold;
  SIZE_T InitialCommit;
  SIZE_T InitialReserve;
  PRTL_HEAP_COMMIT_ROUTINE CommitRoutine;
  SIZE_T Reserved[ 2 ];
} RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS;

/* We reserve space for dumping emacs lisp byte-code inside a static
   array.  By storing it in an array, the generic mechanism in
   unexecw32.c will be able to dump it without the need to add a
   special segment to the executable.  In order to be able to do this
   without losing too much space, we need to create a Windows heap at
   the specific address of the static array.  The RtlCreateHeap
104 105
   available inside the NT kernel since XP will do this.  It allows the
   creation of a non-growable heap at a specific address.  So before
106 107 108 109 110 111
   dumping, we create a non-growable heap at the address of the
   dumped_data[] array.  After dumping, we reuse memory allocated
   there without being able to free it (but most of it is not meant to
   be freed anyway), and we use a new private heap for all new
   allocations.  */

112 113 114 115 116 117
/* FIXME: Most of the space reserved for dumped_data[] is only used by
   the 1st bootstrap-emacs.exe built while bootstrapping.  Once the
   preloaded Lisp files are byte-compiled, the next loadup uses less
   than half of the size stated below.  It would be nice to find a way
   to build only the first bootstrap-emacs.exe with the large size,
   and reset that to a lower value afterwards.  */
118 119 120
#ifndef HAVE_UNEXEC
/* We don't use dumped_data[], so define to a small size that won't
   matter.  */
121
# define DUMPED_HEAP_SIZE 10
122
#else
123 124 125 126 127
# if defined _WIN64 || defined WIDE_EMACS_INT
#  define DUMPED_HEAP_SIZE (23*1024*1024)
# else
#  define DUMPED_HEAP_SIZE (13*1024*1024)
# endif
128 129 130
#endif

static unsigned char dumped_data[DUMPED_HEAP_SIZE];
131

132
/* Info for keeping track of our dynamic heap used after dumping. */
Richard M. Stallman's avatar
Richard M. Stallman committed
133 134
unsigned char *data_region_base = NULL;
unsigned char *data_region_end = NULL;
135
static DWORD_PTR committed = 0;
Richard M. Stallman's avatar
Richard M. Stallman committed
136

137 138 139
/* The maximum block size that can be handled by a non-growable w32
   heap is limited by the MaxBlockSize value below.

140
   This point deserves an explanation.
141

142 143
   The W32 heap allocator can be used for a growable heap or a
   non-growable one.
144 145 146 147

   A growable heap is not compatible with a fixed base address for the
   heap.  Only a non-growable one is.  One drawback of non-growable
   heaps is that they can hold only objects smaller than a certain
148 149
   size (the one defined below).  Most of the larger blocks are GC'ed
   before dumping.  In any case, and to be safe, we implement a simple
150
   first-fit allocation algorithm starting at the end of the
151
   dumped_data[] array as depicted below:
Richard M. Stallman's avatar
Richard M. Stallman committed
152

153 154 155 156 157 158 159 160 161 162
  ----------------------------------------------
  |               |              |             |
  | Private heap  |->          <-|  Big chunks |
  |               |              |             |
  ----------------------------------------------
  ^               ^              ^
  dumped_data     dumped_data    bc_limit
                  + committed

*/
163 164 165

/* Info for managing our preload heap, which is essentially a fixed size
   data area in the executable. */
166 167 168 169 170 171
#define PAGE_SIZE 0x1000
#define MaxBlockSize (0x80000 - PAGE_SIZE)

#define MAX_BLOCKS 0x40

static struct
Richard M. Stallman's avatar
Richard M. Stallman committed
172
{
173 174 175 176 177 178 179 180 181
  unsigned char *address;
  size_t size;
  DWORD occupied;
} blocks[MAX_BLOCKS];

static DWORD          blocks_number = 0;
static unsigned char *bc_limit;

/* Handle for the private heap:
182 183
    - inside the dumped_data[] array before dump with unexec,
    - outside of it after dump, or always if pdumper is used.
184 185 186 187 188 189 190
*/
HANDLE heap = NULL;

/* We redirect the standard allocation functions.  */
malloc_fn the_malloc_fn;
realloc_fn the_realloc_fn;
free_fn the_free_fn;
Richard M. Stallman's avatar
Richard M. Stallman committed
191

192 193 194 195 196
/* It doesn't seem to be useful to allocate from a file mapping.
   It would be if the memory was shared.
     http://stackoverflow.com/questions/307060/what-is-the-purpose-of-allocating-pages-in-the-pagefile-with-createfilemapping  */

/* This is the function to commit memory when the heap allocator
197 198
   claims for new memory.  Before dumping with unexec, we allocate
   space from the fixed size dumped_data[] array.
199
*/
200
static NTSTATUS NTAPI
201
dumped_data_commit (PVOID Base, PVOID *CommitAddress, PSIZE_T CommitSize)
202
{
203 204 205 206 207 208 209
  /* This is used before dumping.

     The private heap is stored at dumped_data[] address.
     We commit contiguous areas of the dumped_data array
     as requests arrive.  */
  *CommitAddress = data_region_base + committed;
  committed += *CommitSize;
210
  /* Check that the private heap area does not overlap the big chunks area.  */
211
  if (((unsigned char *)(*CommitAddress)) + *CommitSize >= bc_limit)
212
    {
213 214
      fprintf (stderr,
	       "dumped_data_commit: memory exhausted.\nEnlarge dumped_data[]!\n");
215
      exit (-1);
216
    }
217
  return 0;
218
}
219 220 221

/* Heap creation.  */

222
/* We want to turn on Low Fragmentation Heap for XP and older systems.
223
   MinGW32 lacks those definitions.  */
224
#ifndef MINGW_W64
225 226 227 228 229 230 231 232
typedef enum _HEAP_INFORMATION_CLASS {
  HeapCompatibilityInformation
} HEAP_INFORMATION_CLASS;

typedef WINBASEAPI BOOL (WINAPI * HeapSetInformation_Proc)(HANDLE,HEAP_INFORMATION_CLASS,PVOID,SIZE_T);
#endif

void
233
init_heap (bool use_dynamic_heap)
234
{
235 236 237 238
  /* FIXME: Remove the condition, the 'else' branch below, and all the
     related definitions and code, including dumped_data[], when unexec
     support is removed from Emacs.  */
  if (use_dynamic_heap)
239 240
    {
      /* After dumping, use a new private heap.  We explicitly enable
241 242 243
         the low fragmentation heap (LFH) here, for the sake of pre
         Vista versions.  Note: this will harmlessly fail on Vista and
         later, where the low-fragmentation heap is enabled by
244 245 246 247 248 249 250 251 252
         default.  It will also fail on pre-Vista versions when Emacs
         is run under a debugger; set _NO_DEBUG_HEAP=1 in the
         environment before starting GDB to get low fragmentation heap
         on XP and older systems, for the price of losing "certain
         heap debug options"; for the details see
         http://msdn.microsoft.com/en-us/library/windows/desktop/aa366705%28v=vs.85%29.aspx.  */
      data_region_end = data_region_base;

      /* Create the private heap.  */
253
      heap = HeapCreate (0, 0, 0);
254

255
#ifndef MINGW_W64
256
      unsigned long enable_lfh = 2;
257
      /* Set the low-fragmentation heap for OS before Vista.  */
258
      HMODULE hm_kernel32dll = LoadLibrary ("kernel32.dll");
259 260 261
      HeapSetInformation_Proc s_pfn_Heap_Set_Information =
        (HeapSetInformation_Proc) get_proc_addr (hm_kernel32dll,
                                                        "HeapSetInformation");
262
      if (s_pfn_Heap_Set_Information != NULL)
263 264 265 266 267 268 269
	{
	  if (s_pfn_Heap_Set_Information ((PVOID) heap,
					  HeapCompatibilityInformation,
					  &enable_lfh, sizeof(enable_lfh)) == 0)
	    DebPrint (("Enabling Low Fragmentation Heap failed: error %ld\n",
		       GetLastError ()));
	}
270
#endif
271

272 273 274 275 276 277 278 279 280 281 282 283
      if (os_subtype == OS_9X)
        {
          the_malloc_fn = malloc_after_dump_9x;
          the_realloc_fn = realloc_after_dump_9x;
          the_free_fn = free_after_dump_9x;
        }
      else
        {
          the_malloc_fn = malloc_after_dump;
          the_realloc_fn = realloc_after_dump;
          the_free_fn = free_after_dump;
        }
284
    }
285
  else	/* Before dumping with unexec: use static heap.  */
286
    {
287
      /* Find the RtlCreateHeap function.  Headers for this function
288
         are provided with the w32 DDK, but the function is available
289 290 291
         in ntdll.dll since XP.  */
      HMODULE hm_ntdll = LoadLibrary ("ntdll.dll");
      RtlCreateHeap_Proc s_pfn_Rtl_Create_Heap
292
	= (RtlCreateHeap_Proc) get_proc_addr (hm_ntdll, "RtlCreateHeap");
293 294
      /* Specific parameters for the private heap.  */
      RTL_HEAP_PARAMETERS params;
295
      ZeroMemory (&params, sizeof(params));
296 297 298 299 300 301 302 303 304 305 306 307
      params.Length = sizeof(RTL_HEAP_PARAMETERS);

      data_region_base = (unsigned char *)ROUND_UP (dumped_data, 0x1000);
      data_region_end = bc_limit = dumped_data + DUMPED_HEAP_SIZE;

      params.InitialCommit = committed = 0x1000;
      params.InitialReserve = sizeof(dumped_data);
      /* Use our own routine to commit memory from the dumped_data
         array.  */
      params.CommitRoutine = &dumped_data_commit;

      /* Create the private heap.  */
308 309 310 311 312
      if (s_pfn_Rtl_Create_Heap == NULL)
	{
	  fprintf (stderr, "Cannot build Emacs without RtlCreateHeap being available; exiting.\n");
	  exit (-1);
	}
313
      heap = s_pfn_Rtl_Create_Heap (0, data_region_base, 0, 0, NULL, &params);
314 315 316 317 318 319 320 321 322 323 324 325

      if (os_subtype == OS_9X)
        {
          fprintf (stderr, "Cannot dump Emacs on Windows 9X; exiting.\n");
          exit (-1);
        }
      else
        {
          the_malloc_fn = malloc_before_dump;
          the_realloc_fn = realloc_before_dump;
          the_free_fn = free_before_dump;
        }
326 327
    }

328 329
  /* Update system version information to match current system.  */
  cache_system_info ();
330
}
331

332 333 334

/* malloc, realloc, free.  */

335 336 337 338 339
#undef malloc
#undef realloc
#undef free

/* FREEABLE_P checks if the block can be safely freed.  */
340
#define FREEABLE_P(addr)						\
341 342 343
  ((DWORD_PTR)(unsigned char *)(addr) > 0				\
   && ((unsigned char *)(addr) < dumped_data				\
       || (unsigned char *)(addr) >= dumped_data + DUMPED_HEAP_SIZE))
344

Richard M. Stallman's avatar
Richard M. Stallman committed
345
void *
346
malloc_after_dump (size_t size)
Richard M. Stallman's avatar
Richard M. Stallman committed
347
{
348 349
  /* Use the new private heap.  */
  void *p = HeapAlloc (heap, 0, size);
350

351
  /* After dump, keep track of the "brk value" for sbrk(0).  */
352
  if (p)
353 354 355 356 357 358
    {
      unsigned char *new_brk = (unsigned char *)p + size;

      if (new_brk > data_region_end)
	data_region_end = new_brk;
    }
359 360
  else
    errno = ENOMEM;
361 362
  return p;
}
363

364 365
/* FIXME: The *_before_dump functions should be removed when pdumper
   becomes the only dumping method.  */
366 367 368 369 370 371 372 373 374 375 376
void *
malloc_before_dump (size_t size)
{
  void *p;

  /* Before dumping.  The private heap can handle only requests for
     less than MaxBlockSize.  */
  if (size < MaxBlockSize)
    {
      /* Use the private heap if possible.  */
      p = HeapAlloc (heap, 0, size);
377 378
      if (!p)
	errno = ENOMEM;
379 380
    }
  else
Richard M. Stallman's avatar
Richard M. Stallman committed
381
    {
382 383 384 385 386 387 388 389 390
      /* Find the first big chunk that can hold the requested size.  */
      int i = 0;

      for (i = 0; i < blocks_number; i++)
	{
	  if (blocks[i].occupied == 0 && blocks[i].size >= size)
	    break;
	}
      if (i < blocks_number)
Geoff Voelker's avatar
Geoff Voelker committed
391
	{
392 393 394 395 396 397 398 399 400 401
	  /* If found, use it.  */
	  p = blocks[i].address;
	  blocks[i].occupied = TRUE;
	}
      else
	{
	  /* Allocate a new big chunk from the end of the dumped_data
	     array.  */
	  if (blocks_number >= MAX_BLOCKS)
	    {
402 403
	      fprintf (stderr,
		       "malloc_before_dump: no more big chunks available.\nEnlarge MAX_BLOCKS!\n");
404 405 406 407 408 409 410 411 412
	      exit (-1);
	    }
	  bc_limit -= size;
	  bc_limit = (unsigned char *)ROUND_DOWN (bc_limit, 0x10);
	  p = bc_limit;
	  blocks[blocks_number].address = p;
	  blocks[blocks_number].size = size;
	  blocks[blocks_number].occupied = TRUE;
	  blocks_number++;
413
	  /* Check that areas do not overlap.  */
414 415
	  if (bc_limit < dumped_data + committed)
	    {
416 417
	      fprintf (stderr,
		       "malloc_before_dump: memory exhausted.\nEnlarge dumped_data[]!\n");
418 419 420 421 422 423 424 425 426 427 428 429 430
	      exit (-1);
	    }
	}
    }
  return p;
}

/* Re-allocate the previously allocated block in ptr, making the new
   block SIZE bytes long.  */
void *
realloc_after_dump (void *ptr, size_t size)
{
  void *p;
Richard M. Stallman's avatar
Richard M. Stallman committed
431

432 433 434 435 436
  /* After dumping.  */
  if (FREEABLE_P (ptr))
    {
      /* Reallocate the block since it lies in the new heap.  */
      p = HeapReAlloc (heap, 0, ptr, size);
437 438
      if (!p)
	errno = ENOMEM;
439
    }
440
  else
Richard M. Stallman's avatar
Richard M. Stallman committed
441
    {
442 443 444
      /* If the block lies in the dumped data, do not free it.  Only
         allocate a new one.  */
      p = HeapAlloc (heap, 0, size);
445
      if (!p)
446
	errno = ENOMEM;
447 448
      else if (ptr)
	CopyMemory (p, ptr, size);
Richard M. Stallman's avatar
Richard M. Stallman committed
449
    }
450
  /* After dump, keep track of the "brk value" for sbrk(0).  */
451
  if (p)
452 453 454 455 456 457
    {
      unsigned char *new_brk = (unsigned char *)p + size;

      if (new_brk > data_region_end)
	data_region_end = new_brk;
    }
458 459
  return p;
}
460

461 462 463 464 465 466 467 468
void *
realloc_before_dump (void *ptr, size_t size)
{
  void *p;

  /* Before dumping.  */
  if (dumped_data < (unsigned char *)ptr
      && (unsigned char *)ptr < bc_limit && size <= MaxBlockSize)
469 470 471 472 473
    {
      p = HeapReAlloc (heap, 0, ptr, size);
      if (!p)
	errno = ENOMEM;
    }
474 475 476 477 478 479 480
  else
    {
      /* In this case, either the new block is too large for the heap,
         or the old block was already too large.  In both cases,
         malloc_before_dump() and free_before_dump() will take care of
         reallocation.  */
      p = malloc_before_dump (size);
481 482 483 484 485 486
      /* If SIZE is below MaxBlockSize, malloc_before_dump will try to
	 allocate it in the fixed heap.  If that fails, we could have
	 kept the block in its original place, above bc_limit, instead
	 of failing the call as below.  But this doesn't seem to be
	 worth the added complexity, as loadup allocates only a very
	 small number of large blocks, and never reallocates them.  */
487
      if (p && ptr)
488 489 490 491
	{
	  CopyMemory (p, ptr, size);
	  free_before_dump (ptr);
	}
492 493
    }
  return p;
Richard M. Stallman's avatar
Richard M. Stallman committed
494 495
}

496
/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
Richard M. Stallman's avatar
Richard M. Stallman committed
497
void
498
free_after_dump (void *ptr)
Richard M. Stallman's avatar
Richard M. Stallman committed
499
{
500 501 502 503 504 505 506
  /* After dumping.  */
  if (FREEABLE_P (ptr))
    {
      /* Free the block if it is in the new private heap.  */
      HeapFree (heap, 0, ptr);
    }
}
507

508 509 510
void
free_before_dump (void *ptr)
{
511 512 513
  if (!ptr)
    return;

514 515 516
  /* Before dumping.  */
  if (dumped_data < (unsigned char *)ptr
      && (unsigned char *)ptr < bc_limit)
517
    {
518 519 520 521 522 523 524
      /* Free the block if it is allocated in the private heap.  */
      HeapFree (heap, 0, ptr);
    }
  else
    {
      /* Look for the big chunk.  */
      int i;
525

526
      for (i = 0; i < blocks_number; i++)
527
	{
528 529 530 531 532 533 534 535 536
	  if (blocks[i].address == ptr)
	    {
	      /* Reset block occupation if found.  */
	      blocks[i].occupied = 0;
	      break;
	    }
	  /* What if the block is not found?  We should trigger an
	     error here.  */
	  eassert (i < blocks_number);
537 538
	}
    }
539 540
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
/* On Windows 9X, HeapAlloc may return pointers that are not aligned
   on 8-byte boundary, alignment which is required by the Lisp memory
   management.  To circumvent this problem, manually enforce alignment
   on Windows 9X.  */

void *
malloc_after_dump_9x (size_t size)
{
  void *p = malloc_after_dump (size + 8);
  void *pa;
  if (p == NULL)
    return p;
  pa = (void*)(((intptr_t)p + 8) & ~7);
  *((void**)pa-1) = p;
  return pa;
}

void *
realloc_after_dump_9x (void *ptr, size_t size)
{
  if (FREEABLE_P (ptr))
    {
      void *po = *((void**)ptr-1);
      void *p;
      void *pa;
      p = realloc_after_dump (po, size + 8);
      if (p == NULL)
        return p;
      pa = (void*)(((intptr_t)p + 8) & ~7);
      if (ptr != NULL &&
          (char*)pa - (char*)p != (char*)ptr - (char*)po)
        {
          /* Handle the case where alignment in pre-realloc and
             post-realloc blocks does not match.  */
          MoveMemory (pa, (void*)((char*)p + ((char*)ptr - (char*)po)), size);
        }
      *((void**)pa-1) = p;
      return pa;
    }
  else
    {
      /* Non-freeable pointers have no alignment-enforcing header
         (since dumping is not allowed on Windows 9X).  */
      void* p = malloc_after_dump_9x (size);
      if (p != NULL)
	CopyMemory (p, ptr, size);
      return p;
    }
}

void
free_after_dump_9x (void *ptr)
{
  if (FREEABLE_P (ptr))
    {
      free_after_dump (*((void**)ptr-1));
    }
}

600 601 602 603 604 605 606 607 608 609
void *
sys_calloc (size_t number, size_t size)
{
  size_t nbytes = number * size;
  void *ptr = (*the_malloc_fn) (nbytes);
  if (ptr)
    memset (ptr, 0, nbytes);
  return ptr;
}

610
#if defined HAVE_UNEXEC && defined ENABLE_CHECKING
611 612 613
void
report_temacs_memory_usage (void)
{
Eli Zaretskii's avatar
Eli Zaretskii committed
614 615
  DWORD blocks_used = 0;
  size_t large_mem_used = 0;
616 617 618 619 620 621 622 623 624
  int i;

  for (i = 0; i < blocks_number; i++)
    if (blocks[i].occupied)
      {
	blocks_used++;
	large_mem_used += blocks[i].size;
      }

625 626 627
  /* Emulate 'message', which writes to stderr in non-interactive
     sessions.  */
  fprintf (stderr,
628 629 630
	   "Dump memory usage: Heap: %" PRIu64 "  Large blocks(%lu/%lu): %" PRIu64 "/%" PRIu64 "\n",
	   (unsigned long long)committed, blocks_used, blocks_number,
	   (unsigned long long)large_mem_used,
631 632 633 634
	   (unsigned long long)(dumped_data + DUMPED_HEAP_SIZE - bc_limit));
}
#endif

635 636 637 638 639 640 641 642 643 644
/* Emulate getpagesize. */
int
getpagesize (void)
{
  return sysinfo_cache.dwPageSize;
}

void *
sbrk (ptrdiff_t increment)
{
645 646 647 648 649
  /* data_region_end is the address beyond the last allocated byte.
     The sbrk() function is not emulated at all, except for a 0 value
     of its parameter.  This is needed by the Emacs Lisp function
     `memory-limit'.  */
  eassert (increment == 0);
650 651 652
  return data_region_end;
}

653

654 655

/* MMAP allocation for buffers.  */
656 657 658

#define MAX_BUFFER_SIZE (512 * 1024 * 1024)

659 660 661 662 663 664 665 666 667 668 669 670 671 672
void *
mmap_alloc (void **var, size_t nbytes)
{
  void *p = NULL;

  /* We implement amortized allocation.  We start by reserving twice
     the size requested and commit only the size requested.  Then
     realloc could proceed and use the reserved pages, reallocating
     only if needed.  Buffer shrink would happen only so that we stay
     in the 2x range.  This is a big win when visiting compressed
     files, where the final size of the buffer is not known in
     advance, and the buffer is enlarged several times as the data is
     decompressed on the fly.  */
  if (nbytes < MAX_BUFFER_SIZE)
673 674
    p = VirtualAlloc (NULL, ROUND_UP (nbytes * 2, get_allocation_unit ()),
		      MEM_RESERVE, PAGE_READWRITE);
675 676 677 678

  /* If it fails, or if the request is above 512MB, try with the
     requested size.  */
  if (p == NULL)
679 680
    p = VirtualAlloc (NULL, ROUND_UP (nbytes, get_allocation_unit ()),
		      MEM_RESERVE, PAGE_READWRITE);
681 682

  if (p != NULL)
683
    {
684 685
      /* Now, commit pages for NBYTES.  */
      *var = VirtualAlloc (p, nbytes, MEM_COMMIT, PAGE_READWRITE);
686 687
      if (*var == NULL)
	p = *var;
688
    }
Geoff Voelker's avatar
Geoff Voelker committed
689

690 691
  if (!p)
    {
692 693 694
      DWORD e = GetLastError ();

      if (e == ERROR_NOT_ENOUGH_MEMORY)
695 696 697
	errno = ENOMEM;
      else
	{
698
	  DebPrint (("mmap_alloc: error %ld\n", e));
699 700 701
	  errno = EINVAL;
	}
    }
702 703

  return *var = p;
Richard M. Stallman's avatar
Richard M. Stallman committed
704 705 706
}

void
707
mmap_free (void **var)
Richard M. Stallman's avatar
Richard M. Stallman committed
708
{
709 710 711
  if (*var)
    {
      if (VirtualFree (*var, 0, MEM_RELEASE) == 0)
712
        DebPrint (("mmap_free: error %ld\n", GetLastError ()));
713 714 715
      *var = NULL;
    }
}
716

717 718 719 720
void *
mmap_realloc (void **var, size_t nbytes)
{
  MEMORY_BASIC_INFORMATION memInfo, m2;
721
  void *old_ptr;
722 723 724 725 726 727 728 729 730 731 732

  if (*var == NULL)
    return mmap_alloc (var, nbytes);

  /* This case happens in init_buffer().  */
  if (nbytes == 0)
    {
      mmap_free (var);
      return mmap_alloc (var, nbytes);
    }

733
  memset (&memInfo, 0, sizeof (memInfo));
734
  if (VirtualQuery (*var, &memInfo, sizeof (memInfo)) == 0)
735
    DebPrint (("mmap_realloc: VirtualQuery error = %ld\n", GetLastError ()));
736 737 738 739

  /* We need to enlarge the block.  */
  if (memInfo.RegionSize < nbytes)
    {
740
      memset (&m2, 0, sizeof (m2));
741
      if (VirtualQuery ((char *)*var + memInfo.RegionSize, &m2, sizeof(m2)) == 0)
742 743
        DebPrint (("mmap_realloc: VirtualQuery error = %ld\n",
		   GetLastError ()));
744 745 746
      /* If there is enough room in the current reserved area, then
	 commit more pages as needed.  */
      if (m2.State == MEM_RESERVE
747
	  && m2.AllocationBase == memInfo.AllocationBase
748 749 750 751
	  && nbytes <= memInfo.RegionSize + m2.RegionSize)
	{
	  void *p;

752
	  p = VirtualAlloc (*var, nbytes, MEM_COMMIT, PAGE_READWRITE);
753
	  if (!p /* && GetLastError() != ERROR_NOT_ENOUGH_MEMORY */)
754
	    {
755 756 757
	      DebPrint (("realloc enlarge: VirtualAlloc (%p + %I64x, %I64x) error %ld\n",
			 *var, (uint64_t)memInfo.RegionSize,
			 (uint64_t)(nbytes - memInfo.RegionSize),
758
			 GetLastError ()));
759
	      DebPrint (("next region: %p %p %I64x %x\n", m2.BaseAddress,
760 761
			 m2.AllocationBase, (uint64_t)m2.RegionSize,
			 m2.AllocationProtect));
762
	    }
763 764 765 766 767 768 769 770 771 772 773
	  else
	    return *var;
	}
      /* Else we must actually enlarge the block by allocating a new
	 one and copying previous contents from the old to the new one.  */
      old_ptr = *var;

      if (mmap_alloc (var, nbytes))
	{
	  CopyMemory (*var, old_ptr, memInfo.RegionSize);
	  mmap_free (&old_ptr);
774 775 776 777
	  return *var;
	}
      else
	{
778 779 780
	  /* We failed to reallocate the buffer.  */
	  *var = old_ptr;
	  return NULL;
781 782 783 784 785 786 787 788 789 790 791
	}
    }

  /* If we are shrinking by more than one page...  */
  if (memInfo.RegionSize  > nbytes + getpagesize())
    {
      /* If we are shrinking a lot...  */
      if ((memInfo.RegionSize / 2) > nbytes)
        {
          /* Let's give some memory back to the system and release
	     some pages.  */
792
          old_ptr = *var;
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810

	  if (mmap_alloc (var, nbytes))
            {
              CopyMemory (*var, old_ptr, nbytes);
              mmap_free (&old_ptr);
              return *var;
            }
          else
	    {
	      /* In case we fail to shrink, try to go on with the old block.
		 But that means there is a lot of memory pressure.
		 We could also decommit pages.  */
	      *var = old_ptr;
	      return *var;
	    }
        }

      /* We still can decommit pages.  */
811
      if (VirtualFree ((char *)*var + nbytes + get_page_size(),
812 813
		       memInfo.RegionSize - nbytes - get_page_size(),
		       MEM_DECOMMIT) == 0)
814
        DebPrint (("mmap_realloc: VirtualFree error %ld\n", GetLastError ()));
815 816
      return *var;
    }
817

818 819
  /* Not enlarging, not shrinking by more than one page.  */
  return *var;
Richard M. Stallman's avatar
Richard M. Stallman committed
820
}
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895


/* Emulation of getrlimit and setrlimit.  */

int
getrlimit (rlimit_resource_t rltype, struct rlimit *rlp)
{
  int retval = -1;

  switch (rltype)
    {
    case RLIMIT_STACK:
      {
	MEMORY_BASIC_INFORMATION m;
	/* Implementation note: Posix says that RLIMIT_STACK returns
	   information about the stack size for the main thread.  The
	   implementation below returns the stack size for the calling
	   thread, so it's more like pthread_attr_getstacksize.  But
	   Emacs clearly wants the latter, given how it uses the
	   results, so the implementation below is more future-proof,
	   if what's now the main thread will become some other thread
	   at some future point.  */
	if (!VirtualQuery ((LPCVOID) &m, &m, sizeof m))
	  errno = EPERM;
	else
	  {
	    rlp->rlim_cur = (DWORD_PTR) &m - (DWORD_PTR) m.AllocationBase;
	    rlp->rlim_max =
	      (DWORD_PTR) m.BaseAddress + m.RegionSize
	      - (DWORD_PTR) m.AllocationBase;

	    /* The last page is the guard page, so subtract that.  */
	    rlp->rlim_cur -= getpagesize ();
	    rlp->rlim_max -= getpagesize ();
	    retval = 0;
	  }
	}
      break;
    case RLIMIT_NOFILE:
      /* Implementation note: The real value is returned by
	 _getmaxstdio.  But our FD_SETSIZE is smaller, to cater to
	 Windows 9X, and process.c includes some logic that's based on
	 the assumption that the handle resource is inherited to child
	 processes.  We want to avoid that logic, so we tell process.c
	 our current limit is already equal to FD_SETSIZE.  */
      rlp->rlim_cur = FD_SETSIZE;
      rlp->rlim_max = 2048;	/* see _setmaxstdio documentation */
      retval = 0;
      break;
    default:
      /* Note: we could return meaningful results for other RLIMIT_*
	 requests, but Emacs doesn't currently need that, so we just
	 punt for them.  */
      errno = ENOSYS;
      break;
    }
  return retval;
}

int
setrlimit (rlimit_resource_t rltype, const struct rlimit *rlp)
{
  switch (rltype)
    {
    case RLIMIT_STACK:
    case RLIMIT_NOFILE:
      /* We cannot modfy these limits, so we always fail.  */
      errno = EPERM;
      break;
    default:
      errno = ENOSYS;
      break;
    }
  return -1;
}