profiler.c 18.3 KB
Newer Older
1
/* Profiler implementation.
2

Paul Eggert's avatar
Paul Eggert committed
3
Copyright (C) 2012-2019 Free Software Foundation, Inc.
4 5 6 7 8

This file is part of GNU Emacs.

GNU Emacs is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
9 10
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
11 12 13 14 15 16 17

GNU Emacs is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
18
along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
19 20 21

#include <config.h>
#include "lisp.h"
22
#include "syssignal.h"
23
#include "systime.h"
Daniel Colascione's avatar
Daniel Colascione committed
24
#include "pdumper.h"
25 26 27 28 29 30 31 32 33

/* Return A + B, but return the maximum fixnum if the result would overflow.
   Assume A and B are nonnegative and in fixnum range.  */

static EMACS_INT
saturated_add (EMACS_INT a, EMACS_INT b)
{
  return min (a + b, MOST_POSITIVE_FIXNUM);
}
34

35
/* Logs.  */
36

37
typedef struct Lisp_Hash_Table log_t;
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool cmpfn_profiler (
  struct hash_table_test *, Lisp_Object, Lisp_Object);

static EMACS_UINT hashfn_profiler (
  struct hash_table_test *, Lisp_Object);

static const struct hash_table_test hashtest_profiler =
  {
   LISPSYM_INITIALLY (Qprofiler_backtrace_equal),
   LISPSYM_INITIALLY (Qnil) /* user_hash_function */,
   LISPSYM_INITIALLY (Qnil) /* user_cmp_function */,
   cmpfn_profiler,
   hashfn_profiler,
  };
53

54
static Lisp_Object
55
make_log (void)
56 57 58 59 60
{
  /* We use a standard Elisp hash-table object, but we use it in
     a special way.  This is OK as long as the object is not exposed
     to Elisp, i.e. until it is returned by *-profiler-log, after which
     it can't be used any more.  */
61 62 63 64
  EMACS_INT heap_size
    = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM);
  ptrdiff_t max_stack_depth
    = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);;
65 66
  Lisp_Object log = make_hash_table (hashtest_profiler, heap_size,
				     DEFAULT_REHASH_SIZE,
67 68
				     DEFAULT_REHASH_THRESHOLD,
				     Qnil, false);
69 70 71 72
  struct Lisp_Hash_Table *h = XHASH_TABLE (log);

  /* What is special about our hash-tables is that the keys are pre-filled
     with the vectors we'll put in them.  */
73
  ptrdiff_t i = ASIZE (h->key_and_value) >> 1;
74
  while (i > 0)
75
    set_hash_key_slot (h, --i, make_nil_vector (max_stack_depth));
76
  return log;
77 78
}

79
/* Evict the least used half of the hash_table.
80

81 82 83
   When the table is full, we have to evict someone.
   The easiest and most efficient is to evict the value we're about to add
   (i.e. once the table is full, stop sampling).
84

85 86 87 88
   We could also pick the element with the lowest count and evict it,
   but finding it is O(N) and for that amount of work we get very
   little in return: for the next sample, this latest sample will have
   count==1 and will hence be a prime candidate for eviction :-(
89

90 91 92 93
   So instead, we take O(N) time to eliminate more or less half of the
   entries (the half with the lowest counts).  So we get an amortized
   cost of O(1) and we get O(N) time for a new entry to grow larger
   than the other least counts before a new round of eviction.  */
94

95 96
static EMACS_INT approximate_median (log_t *log,
				     ptrdiff_t start, ptrdiff_t size)
97
{
98 99
  eassert (size > 0);
  if (size < 2)
Tom Tromey's avatar
Tom Tromey committed
100
    return XFIXNUM (HASH_VALUE (log, start));
101 102 103
  if (size < 3)
    /* Not an actual median, but better for our application than
       choosing either of the two numbers.  */
Tom Tromey's avatar
Tom Tromey committed
104 105
    return ((XFIXNUM (HASH_VALUE (log, start))
	     + XFIXNUM (HASH_VALUE (log, start + 1)))
106
	    / 2);
107 108
  else
    {
109 110 111 112 113 114 115 116 117
      ptrdiff_t newsize = size / 3;
      ptrdiff_t start2 = start + newsize;
      EMACS_INT i1 = approximate_median (log, start, newsize);
      EMACS_INT i2 = approximate_median (log, start2, newsize);
      EMACS_INT i3 = approximate_median (log, start2 + newsize,
					 size - 2 * newsize);
      return (i1 < i2
	      ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
	      : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
118 119 120
    }
}

121
static void evict_lower_half (log_t *log)
122
{
123 124
  ptrdiff_t size = ASIZE (log->key_and_value) / 2;
  EMACS_INT median = approximate_median (log, 0, size);
125

Paul Eggert's avatar
Paul Eggert committed
126
  for (ptrdiff_t i = 0; i < size; i++)
127 128
    /* Evict not only values smaller but also values equal to the median,
       so as to make sure we evict something no matter what.  */
Tom Tromey's avatar
Tom Tromey committed
129
    if (XFIXNUM (HASH_VALUE (log, i)) <= median)
130 131 132 133 134 135 136
      {
	Lisp_Object key = HASH_KEY (log, i);
	{ /* FIXME: we could make this more efficient.  */
	  Lisp_Object tmp;
	  XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr.  */
	  Fremhash (key, tmp);
	}
137
	eassert (log->next_free == i);
138 139 140 141 142

	eassert (VECTORP (key));
	for (ptrdiff_t j = 0; j < ASIZE (key); j++)
	  ASET (key, j, Qnil);

143 144
	set_hash_key_slot (log, i, key);
      }
145 146
}

147
/* Record the current backtrace in LOG.  COUNT is the weight of this
148 149
   current backtrace: interrupt counts for CPU, and the allocation
   size for memory.  */
150

151
static void
152
record_backtrace (log_t *log, EMACS_INT count)
153
{
154
  if (log->next_free < 0)
155 156
    /* FIXME: transfer the evicted counts to a special entry rather
       than dropping them on the floor.  */
157
    evict_lower_half (log);
Paul Eggert's avatar
Paul Eggert committed
158
  ptrdiff_t index = log->next_free;
159

160
  /* Get a "working memory" vector.  */
Paul Eggert's avatar
Paul Eggert committed
161
  Lisp_Object backtrace = HASH_KEY (log, index);
162
  get_backtrace (backtrace);
163

164 165 166 167 168 169 170
  { /* We basically do a `gethash+puthash' here, except that we have to be
       careful to avoid memory allocation since we're in a signal
       handler, and we optimize the code to try and avoid computing the
       hash+lookup twice.  See fns.c:Fputhash for reference.  */
    EMACS_UINT hash;
    ptrdiff_t j = hash_lookup (log, backtrace, &hash);
    if (j >= 0)
171
      {
Tom Tromey's avatar
Tom Tromey committed
172
	EMACS_INT old_val = XFIXNUM (HASH_VALUE (log, j));
173
	EMACS_INT new_val = saturated_add (old_val, count);
174
	set_hash_value_slot (log, j, make_fixnum (new_val));
175
      }
176 177
    else
      { /* BEWARE!  hash_put in general can allocate memory.
178 179
	   But currently it only does that if log->next_free is -1.  */
	eassert (0 <= log->next_free);
180
	ptrdiff_t j = hash_put (log, backtrace, make_fixnum (count), hash);
181 182 183 184 185 186
	/* Let's make sure we've put `backtrace' right where it
	   already was to start with.  */
	eassert (index == j);

	/* FIXME: If the hash-table is almost full, we should set
	   some global flag so that some Elisp code can offload its
187 188
	   data elsewhere, so as to avoid the eviction code.
	   There are 2 ways to do that, AFAICT:
Paul Eggert's avatar
Paul Eggert committed
189 190
	   - Set a flag checked in maybe_quit, such that maybe_quit can then
	     call Fprofiler_cpu_log and stash the full log for later use.
191 192 193 194 195 196 197 198
	   - Set a flag check in post-gc-hook, so that Elisp code can call
	     profiler-cpu-log.  That gives us more flexibility since that
	     Elisp code can then do all kinds of fun stuff like write
	     the log to disk.  Or turn it right away into a call tree.
	   Of course, using Elisp is generally preferable, but it may
	   take longer until we get a chance to run the Elisp code, so
	   there's more risk that the table will get full before we
	   get there.  */
199 200
      }
  }
201 202
}

203
/* Sampling profiler.  */
204

205 206 207 208
#ifdef PROFILER_CPU_SUPPORT

/* The profiler timer and whether it was properly initialized, if
   POSIX timers are available.  */
209
#ifdef HAVE_ITIMERSPEC
210 211 212
static timer_t profiler_timer;
static bool profiler_timer_ok;
#endif
213

214 215
/* Status of sampling profiler.  */
static enum profiler_cpu_running
216 217 218 219 220 221
  { NOT_RUNNING,
#ifdef HAVE_ITIMERSPEC
    TIMER_SETTIME_RUNNING,
#endif
    SETITIMER_RUNNING
  }
222
  profiler_cpu_running;
223

224
/* Hash-table log of CPU profiler.  */
225
static Lisp_Object cpu_log;
226

227 228
/* Separate counter for the time spent in the GC.  */
static EMACS_INT cpu_gc_count;
229

230
/* The current sampling interval in nanoseconds.  */
231
static EMACS_INT current_sampling_interval;
232

233
/* Signal handler for sampling profiler.  */
234 235

static void
236
handle_profiler_signal (int signal)
237
{
238
  if (EQ (backtrace_top_function (), QAutomatic_GC))
239 240 241 242 243 244
    /* Special case the time-count inside GC because the hash-table
       code is not prepared to be used while the GC is running.
       More specifically it uses ASIZE at many places where it does
       not expect the ARRAY_MARK_FLAG to be set.  We could try and
       harden the hash-table code, but it doesn't seem worth the
       effort.  */
245
    cpu_gc_count = saturated_add (cpu_gc_count, 1);
246
  else
247
    {
248
      EMACS_INT count = 1;
Paul Eggert's avatar
Paul Eggert committed
249
#if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN
250 251 252
      if (profiler_timer_ok)
	{
	  int overruns = timer_getoverrun (profiler_timer);
253
	  eassert (overruns >= 0);
254 255 256
	  count += overruns;
	}
#endif
257
      eassert (HASH_TABLE_P (cpu_log));
258
      record_backtrace (XHASH_TABLE (cpu_log), count);
259
    }
260 261
}

262
static void
263 264 265 266 267
deliver_profiler_signal (int signal)
{
  deliver_process_signal (signal, handle_profiler_signal);
}

268
static int
269
setup_cpu_timer (Lisp_Object sampling_interval)
270
{
271
  int billion = 1000000000;
272

273
  if (! RANGED_FIXNUMP (1, sampling_interval,
274 275 276
			 (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
			  ? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
			     + (billion - 1))
277
			  : EMACS_INT_MAX)))
278
    return -1;
279

Tom Tromey's avatar
Tom Tromey committed
280
  current_sampling_interval = XFIXNUM (sampling_interval);
Paul Eggert's avatar
Paul Eggert committed
281 282 283 284
  struct timespec interval
    = make_timespec (current_sampling_interval / billion,
		     current_sampling_interval % billion);
  struct sigaction action;
285 286 287
  emacs_sigaction_init (&action, deliver_profiler_signal);
  sigaction (SIGPROF, &action, 0);

288
#ifdef HAVE_ITIMERSPEC
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
  if (! profiler_timer_ok)
    {
      /* System clocks to try, in decreasing order of desirability.  */
      static clockid_t const system_clock[] = {
#ifdef CLOCK_THREAD_CPUTIME_ID
	CLOCK_THREAD_CPUTIME_ID,
#endif
#ifdef CLOCK_PROCESS_CPUTIME_ID
	CLOCK_PROCESS_CPUTIME_ID,
#endif
#ifdef CLOCK_MONOTONIC
	CLOCK_MONOTONIC,
#endif
	CLOCK_REALTIME
      };
      struct sigevent sigev;
      sigev.sigev_value.sival_ptr = &profiler_timer;
      sigev.sigev_signo = SIGPROF;
      sigev.sigev_notify = SIGEV_SIGNAL;

Paul Eggert's avatar
Paul Eggert committed
309
      for (int i = 0; i < ARRAYELTS (system_clock); i++)
310 311
	if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
	  {
Paul Eggert's avatar
Paul Eggert committed
312
	    profiler_timer_ok = true;
313 314 315 316 317 318 319 320
	    break;
	  }
    }

  if (profiler_timer_ok)
    {
      struct itimerspec ispec;
      ispec.it_value = ispec.it_interval = interval;
321 322
      if (timer_settime (profiler_timer, 0, &ispec, 0) == 0)
	return TIMER_SETTIME_RUNNING;
323 324 325
    }
#endif

326
#ifdef HAVE_SETITIMER
Paul Eggert's avatar
Paul Eggert committed
327
  struct itimerval timer;
328
  timer.it_value = timer.it_interval = make_timeval (interval);
329 330 331 332 333
  if (setitimer (ITIMER_PROF, &timer, 0) == 0)
    return SETITIMER_RUNNING;
#endif

  return NOT_RUNNING;
334 335
}

336
DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
337
       1, 1, 0,
338
       doc: /* Start or restart the cpu profiler.
339
It takes call-stack samples each SAMPLING-INTERVAL nanoseconds, approximately.
340
See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
341
  (Lisp_Object sampling_interval)
342
{
343
  if (profiler_cpu_running)
344
    error ("CPU profiler is already running");
345

346 347 348
  if (NILP (cpu_log))
    {
      cpu_gc_count = 0;
349
      cpu_log = make_log ();
350
    }
351

352
  int status = setup_cpu_timer (sampling_interval);
Paul Eggert's avatar
Paul Eggert committed
353
  if (status < 0)
354 355 356 357 358 359 360 361 362 363
    {
      profiler_cpu_running = NOT_RUNNING;
      error ("Invalid sampling interval");
    }
  else
    {
      profiler_cpu_running = status;
      if (! profiler_cpu_running)
	error ("Unable to start profiler timer");
    }
364 365 366 367

  return Qt;
}

368
DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
369
       0, 0, 0,
370 371
       doc: /* Stop the cpu profiler.  The profiler log is not affected.
Return non-nil if the profiler was running.  */)
372 373
  (void)
{
374 375 376 377 378
  switch (profiler_cpu_running)
    {
    case NOT_RUNNING:
      return Qnil;

379
#ifdef HAVE_ITIMERSPEC
380 381
    case TIMER_SETTIME_RUNNING:
      {
Paul Eggert's avatar
Paul Eggert committed
382
	struct itimerspec disable = { 0, };
383 384 385
	timer_settime (profiler_timer, 0, &disable, 0);
      }
      break;
386
#endif
387

388
#ifdef HAVE_SETITIMER
389 390
    case SETITIMER_RUNNING:
      {
Paul Eggert's avatar
Paul Eggert committed
391
	struct itimerval disable = { 0, };
392 393 394
	setitimer (ITIMER_PROF, &disable, 0);
      }
      break;
395
#endif
396
    }
397

398 399
  signal (SIGPROF, SIG_IGN);
  profiler_cpu_running = NOT_RUNNING;
400 401 402
  return Qt;
}

403 404
DEFUN ("profiler-cpu-running-p",
       Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
405
       0, 0, 0,
Glenn Morris's avatar
Glenn Morris committed
406
       doc: /* Return non-nil if cpu profiler is running.  */)
407 408
  (void)
{
409
  return profiler_cpu_running ? Qt : Qnil;
410 411
}

412
DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
413
       0, 0, 0,
414 415 416 417 418
       doc: /* Return the current cpu profiler log.
The log is a hash-table mapping backtraces to counters which represent
the amount of time spent at those points.  Every backtrace is a vector
of functions, where the last few elements may be nil.
Before returning, a new log is allocated for future samples.  */)
419 420
  (void)
{
421
  Lisp_Object result = cpu_log;
422
  /* Here we're making the log visible to Elisp, so it's not safe any
423 424
     more for our use afterwards since we can't rely on its special
     pre-allocated keys anymore.  So we have to allocate a new one.  */
425
  cpu_log = profiler_cpu_running ? make_log () : Qnil;
426
  Fputhash (make_vector (1, QAutomatic_GC),
427
	    make_fixnum (cpu_gc_count),
428 429
	    result);
  cpu_gc_count = 0;
430 431
  return result;
}
432
#endif /* PROFILER_CPU_SUPPORT */
433

434
/* Memory profiler.  */
435

436 437 438
/* True if memory profiler is running.  */
bool profiler_memory_running;

439
static Lisp_Object memory_log;
440

441
DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
442
       0, 0, 0,
443 444 445 446 447
       doc: /* Start/restart the memory profiler.
The memory profiler will take samples of the call-stack whenever a new
allocation takes place.  Note that most small allocations only trigger
the profiler occasionally.
See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
448 449
  (void)
{
450
  if (profiler_memory_running)
451 452
    error ("Memory profiler is already running");

453
  if (NILP (memory_log))
454
    memory_log = make_log ();
455

456
  profiler_memory_running = true;
457 458 459 460

  return Qt;
}

461 462
DEFUN ("profiler-memory-stop",
       Fprofiler_memory_stop, Sprofiler_memory_stop,
463
       0, 0, 0,
464 465
       doc: /* Stop the memory profiler.  The profiler log is not affected.
Return non-nil if the profiler was running.  */)
466 467
  (void)
{
468
  if (!profiler_memory_running)
469 470
    return Qnil;
  profiler_memory_running = false;
471 472 473
  return Qt;
}

474 475
DEFUN ("profiler-memory-running-p",
       Fprofiler_memory_running_p, Sprofiler_memory_running_p,
476
       0, 0, 0,
477
       doc: /* Return non-nil if memory profiler is running.  */)
478 479
  (void)
{
480
  return profiler_memory_running ? Qt : Qnil;
481 482
}

483 484
DEFUN ("profiler-memory-log",
       Fprofiler_memory_log, Sprofiler_memory_log,
485
       0, 0, 0,
486 487 488 489 490
       doc: /* Return the current memory profiler log.
The log is a hash-table mapping backtraces to counters which represent
the amount of memory allocated at those points.  Every backtrace is a vector
of functions, where the last few elements may be nil.
Before returning, a new log is allocated for future samples.  */)
491 492
  (void)
{
493 494 495 496
  Lisp_Object result = memory_log;
  /* Here we're making the log visible to Elisp , so it's not safe any
     more for our use afterwards since we can't rely on its special
     pre-allocated keys anymore.  So we have to allocate a new one.  */
497
  memory_log = profiler_memory_running ? make_log () : Qnil;
498 499 500 501
  return result;
}


502
/* Signals and probes.  */
503

504
/* Record that the current backtrace allocated SIZE bytes.  */
505 506 507
void
malloc_probe (size_t size)
{
508
  eassert (HASH_TABLE_P (memory_log));
509
  record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
510 511
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
       doc: /* Return non-nil if F1 and F2 come from the same source.
Used to determine if different closures are just different instances of
the same lambda expression, or are really unrelated function.  */)
     (Lisp_Object f1, Lisp_Object f2)
{
  bool res;
  if (EQ (f1, f2))
    res = true;
  else if (COMPILEDP (f1) && COMPILEDP (f2))
    res = EQ (AREF (f1, COMPILED_BYTECODE), AREF (f2, COMPILED_BYTECODE));
  else if (CONSP (f1) && CONSP (f2) && CONSP (XCDR (f1)) && CONSP (XCDR (f2))
	   && EQ (Qclosure, XCAR (f1))
	   && EQ (Qclosure, XCAR (f2)))
    res = EQ (XCDR (XCDR (f1)), XCDR (XCDR (f2)));
  else
    res = false;
  return res ? Qt : Qnil;
}

static bool
cmpfn_profiler (struct hash_table_test *t,
		Lisp_Object bt1, Lisp_Object bt2)
{
  if (VECTORP (bt1) && VECTORP (bt2))
    {
Paul Eggert's avatar
Paul Eggert committed
538
      ptrdiff_t l = ASIZE (bt1);
539 540
      if (l != ASIZE (bt2))
	return false;
Paul Eggert's avatar
Paul Eggert committed
541
      for (ptrdiff_t i = 0; i < l; i++)
542 543 544 545 546 547 548 549 550 551 552 553 554 555
	if (NILP (Ffunction_equal (AREF (bt1, i), AREF (bt2, i))))
	  return false;
      return true;
    }
  else
    return EQ (bt1, bt2);
}

static EMACS_UINT
hashfn_profiler (struct hash_table_test *ht, Lisp_Object bt)
{
  if (VECTORP (bt))
    {
      EMACS_UINT hash = 0;
Paul Eggert's avatar
Paul Eggert committed
556 557
      ptrdiff_t l = ASIZE (bt);
      for (ptrdiff_t i = 0; i < l; i++)
558 559 560
	{
	  Lisp_Object f = AREF (bt, i);
	  EMACS_UINT hash1
561
	    = (COMPILEDP (f) ? XHASH (AREF (f, COMPILED_BYTECODE))
562
	       : (CONSP (f) && CONSP (XCDR (f)) && EQ (Qclosure, XCAR (f)))
563
	       ? XHASH (XCDR (XCDR (f))) : XHASH (f));
564
	  hash = sxhash_combine (hash, hash1);
565
	}
Paul Eggert's avatar
Paul Eggert committed
566
      return SXHASH_REDUCE (hash);
567 568
    }
  else
569
    return XHASH (bt);
570 571
}

Daniel Colascione's avatar
Daniel Colascione committed
572 573
static void syms_of_profiler_for_pdumper (void);

574 575 576 577
void
syms_of_profiler (void)
{
  DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
578
	      doc: /* Number of elements from the call-stack recorded in the log.  */);
579
  profiler_max_stack_depth = 16;
580 581 582 583 584
  DEFVAR_INT ("profiler-log-size", profiler_log_size,
	      doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
If the log gets full, some of the least-seen call-stacks will be evicted
to make room for new entries.  */);
  profiler_log_size = 10000;
585

586
  DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
Paul Eggert's avatar
Paul Eggert committed
587

588 589
  defsubr (&Sfunction_equal);

590
#ifdef PROFILER_CPU_SUPPORT
591
  profiler_cpu_running = NOT_RUNNING;
592 593
  cpu_log = Qnil;
  staticpro (&cpu_log);
594 595 596 597
  defsubr (&Sprofiler_cpu_start);
  defsubr (&Sprofiler_cpu_stop);
  defsubr (&Sprofiler_cpu_running_p);
  defsubr (&Sprofiler_cpu_log);
598
#endif
599
  profiler_memory_running = false;
600 601
  memory_log = Qnil;
  staticpro (&memory_log);
602 603 604 605
  defsubr (&Sprofiler_memory_start);
  defsubr (&Sprofiler_memory_stop);
  defsubr (&Sprofiler_memory_running_p);
  defsubr (&Sprofiler_memory_log);
Daniel Colascione's avatar
Daniel Colascione committed
606 607 608 609 610 611 612 613 614

  pdumper_do_now_and_after_load (syms_of_profiler_for_pdumper);
}

static void
syms_of_profiler_for_pdumper (void)
{
  if (dumped_with_pdumper_p ())
    {
615
#ifdef PROFILER_CPU_SUPPORT
Daniel Colascione's avatar
Daniel Colascione committed
616
      cpu_log = Qnil;
617
#endif
Daniel Colascione's avatar
Daniel Colascione committed
618 619 620 621
      memory_log = Qnil;
    }
  else
    {
622
#ifdef PROFILER_CPU_SUPPORT
Daniel Colascione's avatar
Daniel Colascione committed
623
      eassert (NILP (cpu_log));
624
#endif
Daniel Colascione's avatar
Daniel Colascione committed
625 626 627
      eassert (NILP (memory_log));
    }

628
}