GDB (xrefs)
/tmp/gdb-7.10/gdb/record-btrace.c
Go to the documentation of this file.
1 /* Branch trace support for GDB, the GNU debugger.
2 
3  Copyright (C) 2013-2015 Free Software Foundation, Inc.
4 
5  Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7  This file is part of GDB.
8 
9  This program is free software; you can redistribute it and/or modify
10  it under the terms of the GNU General Public License as published by
11  the Free Software Foundation; either version 3 of the License, or
12  (at your option) any later version.
13 
14  This program is distributed in the hope that it will be useful,
15  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  GNU General Public License for more details.
18 
19  You should have received a copy of the GNU General Public License
20  along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 
41 /* The target_ops of record-btrace. */
43 
44 /* A new thread observer enabling branch tracing for the new thread. */
46 
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
54  NULL
55 };
56 
57 /* The currently allowed replay memory access type. */
59 
60 /* Command lists for "set/show record btrace". */
63 
64 /* The execution direction of the last resume we got. See record-full.c. */
66 
67 /* The async event handler for reverse/replay execution. */
69 
70 /* A flag indicating that we are currently generating a core file. */
72 
73 /* The current branch trace configuration. */
75 
76 /* Command list for "record btrace". */
78 
79 /* Command lists for "set/show record btrace bts". */
82 
83 /* Command lists for "set/show record btrace pt". */
86 
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88  ambiguities when used in if statements. */
89 
90 #define DEBUG(msg, args...) \
91  do \
92  { \
93  if (record_debug != 0) \
94  fprintf_unfiltered (gdb_stdlog, \
95  "[record-btrace] " msg "\n", ##args); \
96  } \
97  while (0)
98 
99 
100 /* Update the branch trace for the current thread and return a pointer to its
101  thread_info.
102 
103  Throws an error if there is no thread or no trace. This function never
104  returns NULL. */
105 
106 static struct thread_info *
108 {
109  struct thread_info *tp;
110 
111  DEBUG ("require");
112 
114  if (tp == NULL)
115  error (_("No thread."));
116 
117  btrace_fetch (tp);
118 
119  if (btrace_is_empty (tp))
120  error (_("No trace."));
121 
122  return tp;
123 }
124 
125 /* Update the branch trace for the current thread and return a pointer to its
126  branch trace information struct.
127 
128  Throws an error if there is no thread or no trace. This function never
129  returns NULL. */
130 
131 static struct btrace_thread_info *
133 {
134  struct thread_info *tp;
135 
136  tp = require_btrace_thread ();
137 
138  return &tp->btrace;
139 }
140 
141 /* Enable branch tracing for one thread. Warn on errors. */
142 
143 static void
145 {
146  TRY
147  {
149  }
151  {
152  warning ("%s", error.message);
153  }
154  END_CATCH
155 }
156 
157 /* Callback function to disable branch tracing for one thread. */
158 
159 static void
161 {
162  struct thread_info *tp;
163 
164  tp = arg;
165 
166  btrace_disable (tp);
167 }
168 
169 /* Enable automatic tracing of new threads. */
170 
171 static void
173 {
174  DEBUG ("attach thread observer");
175 
176  record_btrace_thread_observer
178 }
179 
180 /* Disable automatic tracing of new threads. */
181 
182 static void
184 {
185  /* The observer may have been detached, already. */
186  if (record_btrace_thread_observer == NULL)
187  return;
188 
189  DEBUG ("detach thread observer");
190 
191  observer_detach_new_thread (record_btrace_thread_observer);
192  record_btrace_thread_observer = NULL;
193 }
194 
195 /* The record-btrace async event handler function. */
196 
197 static void
199 {
201 }
202 
203 /* The to_open method of target record-btrace. */
204 
205 static void
206 record_btrace_open (const char *args, int from_tty)
207 {
208  struct cleanup *disable_chain;
209  struct thread_info *tp;
210 
211  DEBUG ("open");
212 
213  record_preopen ();
214 
216  error (_("The program is not being run."));
217 
218  if (non_stop)
219  error (_("Record btrace can't debug inferior in non-stop mode."));
220 
221  gdb_assert (record_btrace_thread_observer == NULL);
222 
223  disable_chain = make_cleanup (null_cleanup, NULL);
225  if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226  {
228 
230  }
231 
233 
235 
236  record_btrace_async_inferior_event_handler
238  NULL);
240 
242 
243  discard_cleanups (disable_chain);
244 }
245 
246 /* The to_stop_recording method of target record-btrace. */
247 
248 static void
250 {
251  struct thread_info *tp;
252 
253  DEBUG ("stop recording");
254 
256 
258  if (tp->btrace.target != NULL)
259  btrace_disable (tp);
260 }
261 
262 /* The to_close method of target record-btrace. */
263 
264 static void
266 {
267  struct thread_info *tp;
268 
269  if (record_btrace_async_inferior_event_handler != NULL)
270  delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271 
272  /* Make sure automatic recording gets disabled even if we did not stop
273  recording before closing the record-btrace target. */
275 
276  /* We should have already stopped recording.
277  Tear down btrace in case we have not. */
279  btrace_teardown (tp);
280 }
281 
282 /* The to_async method of target record-btrace. */
283 
284 static void
286 {
287  if (enable)
288  mark_async_event_handler (record_btrace_async_inferior_event_handler);
289  else
290  clear_async_event_handler (record_btrace_async_inferior_event_handler);
291 
292  ops->beneath->to_async (ops->beneath, enable);
293 }
294 
295 /* Adjusts the size and returns a human readable size suffix. */
296 
297 static const char *
299 {
300  unsigned int sz;
301 
302  sz = *size;
303 
304  if ((sz & ((1u << 30) - 1)) == 0)
305  {
306  *size = sz >> 30;
307  return "GB";
308  }
309  else if ((sz & ((1u << 20) - 1)) == 0)
310  {
311  *size = sz >> 20;
312  return "MB";
313  }
314  else if ((sz & ((1u << 10) - 1)) == 0)
315  {
316  *size = sz >> 10;
317  return "kB";
318  }
319  else
320  return "";
321 }
322 
323 /* Print a BTS configuration. */
324 
325 static void
327 {
328  const char *suffix;
329  unsigned int size;
330 
331  size = conf->size;
332  if (size > 0)
333  {
334  suffix = record_btrace_adjust_size (&size);
335  printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336  }
337 }
338 
339 /* Print an Intel(R) Processor Trace configuration. */
340 
341 static void
343 {
344  const char *suffix;
345  unsigned int size;
346 
347  size = conf->size;
348  if (size > 0)
349  {
350  suffix = record_btrace_adjust_size (&size);
351  printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352  }
353 }
354 
355 /* Print a branch tracing configuration. */
356 
357 static void
359 {
360  printf_unfiltered (_("Recording format: %s.\n"),
361  btrace_format_string (conf->format));
362 
363  switch (conf->format)
364  {
365  case BTRACE_FORMAT_NONE:
366  return;
367 
368  case BTRACE_FORMAT_BTS:
370  return;
371 
372  case BTRACE_FORMAT_PT:
374  return;
375  }
376 
377  internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378 }
379 
380 /* The to_info_record method of target record-btrace. */
381 
382 static void
384 {
385  struct btrace_thread_info *btinfo;
386  const struct btrace_config *conf;
387  struct thread_info *tp;
388  unsigned int insns, calls, gaps;
389 
390  DEBUG ("info");
391 
393  if (tp == NULL)
394  error (_("No thread."));
395 
396  btinfo = &tp->btrace;
397 
398  conf = btrace_conf (btinfo);
399  if (conf != NULL)
401 
402  btrace_fetch (tp);
403 
404  insns = 0;
405  calls = 0;
406  gaps = 0;
407 
408  if (!btrace_is_empty (tp))
409  {
410  struct btrace_call_iterator call;
411  struct btrace_insn_iterator insn;
412 
413  btrace_call_end (&call, btinfo);
414  btrace_call_prev (&call, 1);
415  calls = btrace_call_number (&call);
416 
417  btrace_insn_end (&insn, btinfo);
418 
419  insns = btrace_insn_number (&insn);
420  if (insns != 0)
421  {
422  /* The last instruction does not really belong to the trace. */
423  insns -= 1;
424  }
425  else
426  {
427  unsigned int steps;
428 
429  /* Skip gaps at the end. */
430  do
431  {
432  steps = btrace_insn_prev (&insn, 1);
433  if (steps == 0)
434  break;
435 
436  insns = btrace_insn_number (&insn);
437  }
438  while (insns == 0);
439  }
440 
441  gaps = btinfo->ngaps;
442  }
443 
444  printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445  "for thread %d (%s).\n"), insns, calls, gaps,
446  tp->num, target_pid_to_str (tp->ptid));
447 
448  if (btrace_is_replaying (tp))
449  printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450  btrace_insn_number (btinfo->replay));
451 }
452 
453 /* Print a decode error. */
454 
455 static void
456 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457  enum btrace_format format)
458 {
459  const char *errstr;
460  int is_error;
461 
462  errstr = _("unknown");
463  is_error = 1;
464 
465  switch (format)
466  {
467  default:
468  break;
469 
470  case BTRACE_FORMAT_BTS:
471  switch (errcode)
472  {
473  default:
474  break;
475 
476  case BDE_BTS_OVERFLOW:
477  errstr = _("instruction overflow");
478  break;
479 
480  case BDE_BTS_INSN_SIZE:
481  errstr = _("unknown instruction");
482  break;
483  }
484  break;
485 
486 #if defined (HAVE_LIBIPT)
487  case BTRACE_FORMAT_PT:
488  switch (errcode)
489  {
490  case BDE_PT_USER_QUIT:
491  is_error = 0;
492  errstr = _("trace decode cancelled");
493  break;
494 
495  case BDE_PT_DISABLED:
496  is_error = 0;
497  errstr = _("disabled");
498  break;
499 
500  case BDE_PT_OVERFLOW:
501  is_error = 0;
502  errstr = _("overflow");
503  break;
504 
505  default:
506  if (errcode < 0)
507  errstr = pt_errstr (pt_errcode (errcode));
508  break;
509  }
510  break;
511 #endif /* defined (HAVE_LIBIPT) */
512  }
513 
514  ui_out_text (uiout, _("["));
515  if (is_error)
516  {
517  ui_out_text (uiout, _("decode error ("));
518  ui_out_field_int (uiout, "errcode", errcode);
519  ui_out_text (uiout, _("): "));
520  }
521  ui_out_text (uiout, errstr);
522  ui_out_text (uiout, _("]\n"));
523 }
524 
525 /* Print an unsigned int. */
526 
527 static void
528 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529 {
530  ui_out_field_fmt (uiout, fld, "%u", val);
531 }
532 
533 /* Disassemble a section of the recorded instruction trace. */
534 
535 static void
537  const struct btrace_thread_info *btinfo,
538  const struct btrace_insn_iterator *begin,
539  const struct btrace_insn_iterator *end, int flags)
540 {
541  struct gdbarch *gdbarch;
542  struct btrace_insn_iterator it;
543 
544  DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545  btrace_insn_number (end));
546 
547  gdbarch = target_gdbarch ();
548 
549  for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
550  {
551  const struct btrace_insn *insn;
552 
553  insn = btrace_insn_get (&it);
554 
555  /* A NULL instruction indicates a gap in the trace. */
556  if (insn == NULL)
557  {
558  const struct btrace_config *conf;
559 
560  conf = btrace_conf (btinfo);
561 
562  /* We have trace so we must have a configuration. */
563  gdb_assert (conf != NULL);
564 
566  conf->format);
567  }
568  else
569  {
570  /* Print the instruction index. */
571  ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
572  ui_out_text (uiout, "\t");
573 
574  /* Disassembly with '/m' flag may not produce the expected result.
575  See PR gdb/11833. */
576  gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
577  insn->pc + 1);
578  }
579  }
580 }
581 
582 /* The to_insn_history method of target record-btrace. */
583 
584 static void
586 {
587  struct btrace_thread_info *btinfo;
588  struct btrace_insn_history *history;
589  struct btrace_insn_iterator begin, end;
590  struct cleanup *uiout_cleanup;
591  struct ui_out *uiout;
592  unsigned int context, covered;
593 
594  uiout = current_uiout;
595  uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
596  "insn history");
597  context = abs (size);
598  if (context == 0)
599  error (_("Bad record instruction-history-size."));
600 
601  btinfo = require_btrace ();
602  history = btinfo->insn_history;
603  if (history == NULL)
604  {
605  struct btrace_insn_iterator *replay;
606 
607  DEBUG ("insn-history (0x%x): %d", flags, size);
608 
609  /* If we're replaying, we start at the replay position. Otherwise, we
610  start at the tail of the trace. */
611  replay = btinfo->replay;
612  if (replay != NULL)
613  begin = *replay;
614  else
615  btrace_insn_end (&begin, btinfo);
616 
617  /* We start from here and expand in the requested direction. Then we
618  expand in the other direction, as well, to fill up any remaining
619  context. */
620  end = begin;
621  if (size < 0)
622  {
623  /* We want the current position covered, as well. */
624  covered = btrace_insn_next (&end, 1);
625  covered += btrace_insn_prev (&begin, context - covered);
626  covered += btrace_insn_next (&end, context - covered);
627  }
628  else
629  {
630  covered = btrace_insn_next (&end, context);
631  covered += btrace_insn_prev (&begin, context - covered);
632  }
633  }
634  else
635  {
636  begin = history->begin;
637  end = history->end;
638 
639  DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
640  btrace_insn_number (&begin), btrace_insn_number (&end));
641 
642  if (size < 0)
643  {
644  end = begin;
645  covered = btrace_insn_prev (&begin, context);
646  }
647  else
648  {
649  begin = end;
650  covered = btrace_insn_next (&end, context);
651  }
652  }
653 
654  if (covered > 0)
655  btrace_insn_history (uiout, btinfo, &begin, &end, flags);
656  else
657  {
658  if (size < 0)
659  printf_unfiltered (_("At the start of the branch trace record.\n"));
660  else
661  printf_unfiltered (_("At the end of the branch trace record.\n"));
662  }
663 
664  btrace_set_insn_history (btinfo, &begin, &end);
665  do_cleanups (uiout_cleanup);
666 }
667 
668 /* The to_insn_history_range method of target record-btrace. */
669 
670 static void
672  ULONGEST from, ULONGEST to, int flags)
673 {
674  struct btrace_thread_info *btinfo;
675  struct btrace_insn_history *history;
676  struct btrace_insn_iterator begin, end;
677  struct cleanup *uiout_cleanup;
678  struct ui_out *uiout;
679  unsigned int low, high;
680  int found;
681 
682  uiout = current_uiout;
683  uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
684  "insn history");
685  low = from;
686  high = to;
687 
688  DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
689 
690  /* Check for wrap-arounds. */
691  if (low != from || high != to)
692  error (_("Bad range."));
693 
694  if (high < low)
695  error (_("Bad range."));
696 
697  btinfo = require_btrace ();
698 
699  found = btrace_find_insn_by_number (&begin, btinfo, low);
700  if (found == 0)
701  error (_("Range out of bounds."));
702 
703  found = btrace_find_insn_by_number (&end, btinfo, high);
704  if (found == 0)
705  {
706  /* Silently truncate the range. */
707  btrace_insn_end (&end, btinfo);
708  }
709  else
710  {
711  /* We want both begin and end to be inclusive. */
712  btrace_insn_next (&end, 1);
713  }
714 
715  btrace_insn_history (uiout, btinfo, &begin, &end, flags);
716  btrace_set_insn_history (btinfo, &begin, &end);
717 
718  do_cleanups (uiout_cleanup);
719 }
720 
721 /* The to_insn_history_from method of target record-btrace. */
722 
723 static void
725  ULONGEST from, int size, int flags)
726 {
727  ULONGEST begin, end, context;
728 
729  context = abs (size);
730  if (context == 0)
731  error (_("Bad record instruction-history-size."));
732 
733  if (size < 0)
734  {
735  end = from;
736 
737  if (from < context)
738  begin = 0;
739  else
740  begin = from - context + 1;
741  }
742  else
743  {
744  begin = from;
745  end = from + context - 1;
746 
747  /* Check for wrap-around. */
748  if (end < begin)
749  end = ULONGEST_MAX;
750  }
751 
752  record_btrace_insn_history_range (self, begin, end, flags);
753 }
754 
755 /* Print the instruction number range for a function call history line. */
756 
757 static void
759  const struct btrace_function *bfun)
760 {
761  unsigned int begin, end, size;
762 
763  size = VEC_length (btrace_insn_s, bfun->insn);
764  gdb_assert (size > 0);
765 
766  begin = bfun->insn_offset;
767  end = begin + size - 1;
768 
769  ui_out_field_uint (uiout, "insn begin", begin);
770  ui_out_text (uiout, ",");
771  ui_out_field_uint (uiout, "insn end", end);
772 }
773 
774 /* Compute the lowest and highest source line for the instructions in BFUN
775  and return them in PBEGIN and PEND.
776  Ignore instructions that can't be mapped to BFUN, e.g. instructions that
777  result from inlining or macro expansion. */
778 
779 static void
781  int *pbegin, int *pend)
782 {
783  struct btrace_insn *insn;
784  struct symtab *symtab;
785  struct symbol *sym;
786  unsigned int idx;
787  int begin, end;
788 
789  begin = INT_MAX;
790  end = INT_MIN;
791 
792  sym = bfun->sym;
793  if (sym == NULL)
794  goto out;
795 
796  symtab = symbol_symtab (sym);
797 
798  for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
799  {
800  struct symtab_and_line sal;
801 
802  sal = find_pc_line (insn->pc, 0);
803  if (sal.symtab != symtab || sal.line == 0)
804  continue;
805 
806  begin = min (begin, sal.line);
807  end = max (end, sal.line);
808  }
809 
810  out:
811  *pbegin = begin;
812  *pend = end;
813 }
814 
815 /* Print the source line information for a function call history line. */
816 
817 static void
819  const struct btrace_function *bfun)
820 {
821  struct symbol *sym;
822  int begin, end;
823 
824  sym = bfun->sym;
825  if (sym == NULL)
826  return;
827 
828  ui_out_field_string (uiout, "file",
830 
831  btrace_compute_src_line_range (bfun, &begin, &end);
832  if (end < begin)
833  return;
834 
835  ui_out_text (uiout, ":");
836  ui_out_field_int (uiout, "min line", begin);
837 
838  if (end == begin)
839  return;
840 
841  ui_out_text (uiout, ",");
842  ui_out_field_int (uiout, "max line", end);
843 }
844 
845 /* Get the name of a branch trace function. */
846 
847 static const char *
849 {
850  struct minimal_symbol *msym;
851  struct symbol *sym;
852 
853  if (bfun == NULL)
854  return "??";
855 
856  msym = bfun->msym;
857  sym = bfun->sym;
858 
859  if (sym != NULL)
860  return SYMBOL_PRINT_NAME (sym);
861  else if (msym != NULL)
862  return MSYMBOL_PRINT_NAME (msym);
863  else
864  return "??";
865 }
866 
867 /* Disassemble a section of the recorded function trace. */
868 
869 static void
871  const struct btrace_thread_info *btinfo,
872  const struct btrace_call_iterator *begin,
873  const struct btrace_call_iterator *end,
875 {
876  struct btrace_call_iterator it;
877 
878  DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
879  btrace_call_number (end));
880 
881  for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
882  {
883  const struct btrace_function *bfun;
884  struct minimal_symbol *msym;
885  struct symbol *sym;
886 
887  bfun = btrace_call_get (&it);
888  sym = bfun->sym;
889  msym = bfun->msym;
890 
891  /* Print the function index. */
892  ui_out_field_uint (uiout, "index", bfun->number);
893  ui_out_text (uiout, "\t");
894 
895  /* Indicate gaps in the trace. */
896  if (bfun->errcode != 0)
897  {
898  const struct btrace_config *conf;
899 
900  conf = btrace_conf (btinfo);
901 
902  /* We have trace so we must have a configuration. */
903  gdb_assert (conf != NULL);
904 
905  btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
906 
907  continue;
908  }
909 
910  if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
911  {
912  int level = bfun->level + btinfo->level, i;
913 
914  for (i = 0; i < level; ++i)
915  ui_out_text (uiout, " ");
916  }
917 
918  if (sym != NULL)
919  ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
920  else if (msym != NULL)
921  ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
922  else if (!ui_out_is_mi_like_p (uiout))
923  ui_out_field_string (uiout, "function", "??");
924 
925  if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
926  {
927  ui_out_text (uiout, _("\tinst "));
928  btrace_call_history_insn_range (uiout, bfun);
929  }
930 
931  if ((flags & RECORD_PRINT_SRC_LINE) != 0)
932  {
933  ui_out_text (uiout, _("\tat "));
934  btrace_call_history_src_line (uiout, bfun);
935  }
936 
937  ui_out_text (uiout, "\n");
938  }
939 }
940 
941 /* The to_call_history method of target record-btrace. */
942 
943 static void
945 {
946  struct btrace_thread_info *btinfo;
947  struct btrace_call_history *history;
948  struct btrace_call_iterator begin, end;
949  struct cleanup *uiout_cleanup;
950  struct ui_out *uiout;
951  unsigned int context, covered;
952 
953  uiout = current_uiout;
954  uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
955  "insn history");
956  context = abs (size);
957  if (context == 0)
958  error (_("Bad record function-call-history-size."));
959 
960  btinfo = require_btrace ();
961  history = btinfo->call_history;
962  if (history == NULL)
963  {
964  struct btrace_insn_iterator *replay;
965 
966  DEBUG ("call-history (0x%x): %d", flags, size);
967 
968  /* If we're replaying, we start at the replay position. Otherwise, we
969  start at the tail of the trace. */
970  replay = btinfo->replay;
971  if (replay != NULL)
972  {
973  begin.function = replay->function;
974  begin.btinfo = btinfo;
975  }
976  else
977  btrace_call_end (&begin, btinfo);
978 
979  /* We start from here and expand in the requested direction. Then we
980  expand in the other direction, as well, to fill up any remaining
981  context. */
982  end = begin;
983  if (size < 0)
984  {
985  /* We want the current position covered, as well. */
986  covered = btrace_call_next (&end, 1);
987  covered += btrace_call_prev (&begin, context - covered);
988  covered += btrace_call_next (&end, context - covered);
989  }
990  else
991  {
992  covered = btrace_call_next (&end, context);
993  covered += btrace_call_prev (&begin, context- covered);
994  }
995  }
996  else
997  {
998  begin = history->begin;
999  end = history->end;
1000 
1001  DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1002  btrace_call_number (&begin), btrace_call_number (&end));
1003 
1004  if (size < 0)
1005  {
1006  end = begin;
1007  covered = btrace_call_prev (&begin, context);
1008  }
1009  else
1010  {
1011  begin = end;
1012  covered = btrace_call_next (&end, context);
1013  }
1014  }
1015 
1016  if (covered > 0)
1017  btrace_call_history (uiout, btinfo, &begin, &end, flags);
1018  else
1019  {
1020  if (size < 0)
1021  printf_unfiltered (_("At the start of the branch trace record.\n"));
1022  else
1023  printf_unfiltered (_("At the end of the branch trace record.\n"));
1024  }
1025 
1026  btrace_set_call_history (btinfo, &begin, &end);
1027  do_cleanups (uiout_cleanup);
1028 }
1029 
1030 /* The to_call_history_range method of target record-btrace. */
1031 
1032 static void
1034  ULONGEST from, ULONGEST to, int flags)
1035 {
1036  struct btrace_thread_info *btinfo;
1037  struct btrace_call_history *history;
1038  struct btrace_call_iterator begin, end;
1039  struct cleanup *uiout_cleanup;
1040  struct ui_out *uiout;
1041  unsigned int low, high;
1042  int found;
1043 
1044  uiout = current_uiout;
1045  uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1046  "func history");
1047  low = from;
1048  high = to;
1049 
1050  DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1051 
1052  /* Check for wrap-arounds. */
1053  if (low != from || high != to)
1054  error (_("Bad range."));
1055 
1056  if (high < low)
1057  error (_("Bad range."));
1058 
1059  btinfo = require_btrace ();
1060 
1061  found = btrace_find_call_by_number (&begin, btinfo, low);
1062  if (found == 0)
1063  error (_("Range out of bounds."));
1064 
1065  found = btrace_find_call_by_number (&end, btinfo, high);
1066  if (found == 0)
1067  {
1068  /* Silently truncate the range. */
1069  btrace_call_end (&end, btinfo);
1070  }
1071  else
1072  {
1073  /* We want both begin and end to be inclusive. */
1074  btrace_call_next (&end, 1);
1075  }
1076 
1077  btrace_call_history (uiout, btinfo, &begin, &end, flags);
1078  btrace_set_call_history (btinfo, &begin, &end);
1079 
1080  do_cleanups (uiout_cleanup);
1081 }
1082 
1083 /* The to_call_history_from method of target record-btrace. */
1084 
1085 static void
1087  ULONGEST from, int size, int flags)
1088 {
1089  ULONGEST begin, end, context;
1090 
1091  context = abs (size);
1092  if (context == 0)
1093  error (_("Bad record function-call-history-size."));
1094 
1095  if (size < 0)
1096  {
1097  end = from;
1098 
1099  if (from < context)
1100  begin = 0;
1101  else
1102  begin = from - context + 1;
1103  }
1104  else
1105  {
1106  begin = from;
1107  end = from + context - 1;
1108 
1109  /* Check for wrap-around. */
1110  if (end < begin)
1111  end = ULONGEST_MAX;
1112  }
1113 
1114  record_btrace_call_history_range (self, begin, end, flags);
1115 }
1116 
1117 /* The to_record_is_replaying method of target record-btrace. */
1118 
1119 static int
1121 {
1122  struct thread_info *tp;
1123 
1125  if (btrace_is_replaying (tp))
1126  return 1;
1127 
1128  return 0;
1129 }
1130 
1131 /* The to_xfer_partial method of target record-btrace. */
1132 
1133 static enum target_xfer_status
1135  const char *annex, gdb_byte *readbuf,
1136  const gdb_byte *writebuf, ULONGEST offset,
1137  ULONGEST len, ULONGEST *xfered_len)
1138 {
1139  struct target_ops *t;
1140 
1141  /* Filter out requests that don't make sense during replay. */
1144  && record_btrace_is_replaying (ops))
1145  {
1146  switch (object)
1147  {
1148  case TARGET_OBJECT_MEMORY:
1149  {
1150  struct target_section *section;
1151 
1152  /* We do not allow writing memory in general. */
1153  if (writebuf != NULL)
1154  {
1155  *xfered_len = len;
1156  return TARGET_XFER_UNAVAILABLE;
1157  }
1158 
1159  /* We allow reading readonly memory. */
1160  section = target_section_by_addr (ops, offset);
1161  if (section != NULL)
1162  {
1163  /* Check if the section we found is readonly. */
1164  if ((bfd_get_section_flags (section->the_bfd_section->owner,
1165  section->the_bfd_section)
1166  & SEC_READONLY) != 0)
1167  {
1168  /* Truncate the request to fit into this section. */
1169  len = min (len, section->endaddr - offset);
1170  break;
1171  }
1172  }
1173 
1174  *xfered_len = len;
1175  return TARGET_XFER_UNAVAILABLE;
1176  }
1177  }
1178  }
1179 
1180  /* Forward the request. */
1181  ops = ops->beneath;
1182  return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1183  offset, len, xfered_len);
1184 }
1185 
1186 /* The to_insert_breakpoint method of target record-btrace. */
1187 
1188 static int
1190  struct gdbarch *gdbarch,
1191  struct bp_target_info *bp_tgt)
1192 {
1193  const char *old;
1194  int ret;
1195 
1196  /* Inserting breakpoints requires accessing memory. Allow it for the
1197  duration of this function. */
1198  old = replay_memory_access;
1200 
1201  ret = 0;
1202  TRY
1203  {
1204  ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1205  }
1206  CATCH (except, RETURN_MASK_ALL)
1207  {
1208  replay_memory_access = old;
1209  throw_exception (except);
1210  }
1211  END_CATCH
1212  replay_memory_access = old;
1213 
1214  return ret;
1215 }
1216 
1217 /* The to_remove_breakpoint method of target record-btrace. */
1218 
1219 static int
1221  struct gdbarch *gdbarch,
1222  struct bp_target_info *bp_tgt)
1223 {
1224  const char *old;
1225  int ret;
1226 
1227  /* Removing breakpoints requires accessing memory. Allow it for the
1228  duration of this function. */
1229  old = replay_memory_access;
1231 
1232  ret = 0;
1233  TRY
1234  {
1235  ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1236  }
1237  CATCH (except, RETURN_MASK_ALL)
1238  {
1239  replay_memory_access = old;
1240  throw_exception (except);
1241  }
1242  END_CATCH
1243  replay_memory_access = old;
1244 
1245  return ret;
1246 }
1247 
1248 /* The to_fetch_registers method of target record-btrace. */
1249 
1250 static void
1252  struct regcache *regcache, int regno)
1253 {
1254  struct btrace_insn_iterator *replay;
1255  struct thread_info *tp;
1256 
1258  gdb_assert (tp != NULL);
1259 
1260  replay = tp->btrace.replay;
1261  if (replay != NULL && !record_btrace_generating_corefile)
1262  {
1263  const struct btrace_insn *insn;
1264  struct gdbarch *gdbarch;
1265  int pcreg;
1266 
1267  gdbarch = get_regcache_arch (regcache);
1268  pcreg = gdbarch_pc_regnum (gdbarch);
1269  if (pcreg < 0)
1270  return;
1271 
1272  /* We can only provide the PC register. */
1273  if (regno >= 0 && regno != pcreg)
1274  return;
1275 
1276  insn = btrace_insn_get (replay);
1277  gdb_assert (insn != NULL);
1278 
1279  regcache_raw_supply (regcache, regno, &insn->pc);
1280  }
1281  else
1282  {
1283  struct target_ops *t = ops->beneath;
1284 
1285  t->to_fetch_registers (t, regcache, regno);
1286  }
1287 }
1288 
1289 /* The to_store_registers method of target record-btrace. */
1290 
1291 static void
1293  struct regcache *regcache, int regno)
1294 {
1295  struct target_ops *t;
1296 
1298  error (_("This record target does not allow writing registers."));
1299 
1301 
1302  t = ops->beneath;
1303  t->to_store_registers (t, regcache, regno);
1304 }
1305 
1306 /* The to_prepare_to_store method of target record-btrace. */
1307 
1308 static void
1310  struct regcache *regcache)
1311 {
1312  struct target_ops *t;
1313 
1315  return;
1316 
1317  t = ops->beneath;
1318  t->to_prepare_to_store (t, regcache);
1319 }
1320 
1321 /* The branch trace frame cache. */
1322 
1324 {
1325  /* The thread. */
1326  struct thread_info *tp;
1327 
1328  /* The frame info. */
1330 
1331  /* The branch trace function segment. */
1332  const struct btrace_function *bfun;
1333 };
1334 
1335 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1336 
1337 static htab_t bfcache;
1338 
1339 /* hash_f for htab_create_alloc of bfcache. */
1340 
1341 static hashval_t
1342 bfcache_hash (const void *arg)
1343 {
1344  const struct btrace_frame_cache *cache = arg;
1345 
1346  return htab_hash_pointer (cache->frame);
1347 }
1348 
1349 /* eq_f for htab_create_alloc of bfcache. */
1350 
1351 static int
1352 bfcache_eq (const void *arg1, const void *arg2)
1353 {
1354  const struct btrace_frame_cache *cache1 = arg1;
1355  const struct btrace_frame_cache *cache2 = arg2;
1356 
1357  return cache1->frame == cache2->frame;
1358 }
1359 
1360 /* Create a new btrace frame cache. */
1361 
1362 static struct btrace_frame_cache *
1364 {
1365  struct btrace_frame_cache *cache;
1366  void **slot;
1367 
1368  cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1369  cache->frame = frame;
1370 
1371  slot = htab_find_slot (bfcache, cache, INSERT);
1372  gdb_assert (*slot == NULL);
1373  *slot = cache;
1374 
1375  return cache;
1376 }
1377 
1378 /* Extract the branch trace function from a branch trace frame. */
1379 
1380 static const struct btrace_function *
1382 {
1383  const struct btrace_frame_cache *cache;
1384  const struct btrace_function *bfun;
1385  struct btrace_frame_cache pattern;
1386  void **slot;
1387 
1388  pattern.frame = frame;
1389 
1390  slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1391  if (slot == NULL)
1392  return NULL;
1393 
1394  cache = *slot;
1395  return cache->bfun;
1396 }
1397 
1398 /* Implement stop_reason method for record_btrace_frame_unwind. */
1399 
1400 static enum unwind_stop_reason
1402  void **this_cache)
1403 {
1404  const struct btrace_frame_cache *cache;
1405  const struct btrace_function *bfun;
1406 
1407  cache = *this_cache;
1408  bfun = cache->bfun;
1409  gdb_assert (bfun != NULL);
1410 
1411  if (bfun->up == NULL)
1412  return UNWIND_UNAVAILABLE;
1413 
1414  return UNWIND_NO_REASON;
1415 }
1416 
1417 /* Implement this_id method for record_btrace_frame_unwind. */
1418 
1419 static void
1420 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1421  struct frame_id *this_id)
1422 {
1423  const struct btrace_frame_cache *cache;
1424  const struct btrace_function *bfun;
1425  CORE_ADDR code, special;
1426 
1427  cache = *this_cache;
1428 
1429  bfun = cache->bfun;
1430  gdb_assert (bfun != NULL);
1431 
1432  while (bfun->segment.prev != NULL)
1433  bfun = bfun->segment.prev;
1434 
1435  code = get_frame_func (this_frame);
1436  special = bfun->number;
1437 
1438  *this_id = frame_id_build_unavailable_stack_special (code, special);
1439 
1440  DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1441  btrace_get_bfun_name (cache->bfun),
1442  core_addr_to_string_nz (this_id->code_addr),
1444 }
1445 
1446 /* Implement prev_register method for record_btrace_frame_unwind. */
1447 
1448 static struct value *
1450  void **this_cache,
1451  int regnum)
1452 {
1453  const struct btrace_frame_cache *cache;
1454  const struct btrace_function *bfun, *caller;
1455  const struct btrace_insn *insn;
1456  struct gdbarch *gdbarch;
1457  CORE_ADDR pc;
1458  int pcreg;
1459 
1460  gdbarch = get_frame_arch (this_frame);
1461  pcreg = gdbarch_pc_regnum (gdbarch);
1462  if (pcreg < 0 || regnum != pcreg)
1464  _("Registers are not available in btrace record history"));
1465 
1466  cache = *this_cache;
1467  bfun = cache->bfun;
1468  gdb_assert (bfun != NULL);
1469 
1470  caller = bfun->up;
1471  if (caller == NULL)
1473  _("No caller in btrace record history"));
1474 
1475  if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1476  {
1477  insn = VEC_index (btrace_insn_s, caller->insn, 0);
1478  pc = insn->pc;
1479  }
1480  else
1481  {
1482  insn = VEC_last (btrace_insn_s, caller->insn);
1483  pc = insn->pc;
1484 
1485  pc += gdb_insn_length (gdbarch, pc);
1486  }
1487 
1488  DEBUG ("[frame] unwound PC in %s on level %d: %s",
1489  btrace_get_bfun_name (bfun), bfun->level,
1490  core_addr_to_string_nz (pc));
1491 
1492  return frame_unwind_got_address (this_frame, regnum, pc);
1493 }
1494 
1495 /* Implement sniffer method for record_btrace_frame_unwind. */
1496 
1497 static int
1499  struct frame_info *this_frame,
1500  void **this_cache)
1501 {
1502  const struct btrace_function *bfun;
1503  struct btrace_frame_cache *cache;
1504  struct thread_info *tp;
1505  struct frame_info *next;
1506 
1507  /* THIS_FRAME does not contain a reference to its thread. */
1509  gdb_assert (tp != NULL);
1510 
1511  bfun = NULL;
1512  next = get_next_frame (this_frame);
1513  if (next == NULL)
1514  {
1515  const struct btrace_insn_iterator *replay;
1516 
1517  replay = tp->btrace.replay;
1518  if (replay != NULL)
1519  bfun = replay->function;
1520  }
1521  else
1522  {
1523  const struct btrace_function *callee;
1524 
1525  callee = btrace_get_frame_function (next);
1526  if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1527  bfun = callee->up;
1528  }
1529 
1530  if (bfun == NULL)
1531  return 0;
1532 
1533  DEBUG ("[frame] sniffed frame for %s on level %d",
1534  btrace_get_bfun_name (bfun), bfun->level);
1535 
1536  /* This is our frame. Initialize the frame cache. */
1537  cache = bfcache_new (this_frame);
1538  cache->tp = tp;
1539  cache->bfun = bfun;
1540 
1541  *this_cache = cache;
1542  return 1;
1543 }
1544 
1545 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1546 
1547 static int
1549  struct frame_info *this_frame,
1550  void **this_cache)
1551 {
1552  const struct btrace_function *bfun, *callee;
1553  struct btrace_frame_cache *cache;
1554  struct frame_info *next;
1555 
1556  next = get_next_frame (this_frame);
1557  if (next == NULL)
1558  return 0;
1559 
1560  callee = btrace_get_frame_function (next);
1561  if (callee == NULL)
1562  return 0;
1563 
1564  if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1565  return 0;
1566 
1567  bfun = callee->up;
1568  if (bfun == NULL)
1569  return 0;
1570 
1571  DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1572  btrace_get_bfun_name (bfun), bfun->level);
1573 
1574  /* This is our frame. Initialize the frame cache. */
1575  cache = bfcache_new (this_frame);
1576  cache->tp = find_thread_ptid (inferior_ptid);
1577  cache->bfun = bfun;
1578 
1579  *this_cache = cache;
1580  return 1;
1581 }
1582 
1583 static void
1584 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1585 {
1586  struct btrace_frame_cache *cache;
1587  void **slot;
1588 
1589  cache = this_cache;
1590 
1591  slot = htab_find_slot (bfcache, cache, NO_INSERT);
1592  gdb_assert (slot != NULL);
1593 
1594  htab_remove_elt (bfcache, cache);
1595 }
1596 
1597 /* btrace recording does not store previous memory content, neither the stack
1598  frames content. Any unwinding would return errorneous results as the stack
1599  contents no longer matches the changed PC value restored from history.
1600  Therefore this unwinder reports any possibly unwound registers as
1601  <unavailable>. */
1602 
1603 const struct frame_unwind record_btrace_frame_unwind =
1604 {
1605  NORMAL_FRAME,
1609  NULL,
1612 };
1613 
1614 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1615 {
1620  NULL,
1623 };
1624 
1625 /* Implement the to_get_unwinder method. */
1626 
1627 static const struct frame_unwind *
1629 {
1631 }
1632 
1633 /* Implement the to_get_tailcall_unwinder method. */
1634 
1635 static const struct frame_unwind *
1637 {
1639 }
1640 
1641 /* Indicate that TP should be resumed according to FLAG. */
1642 
1643 static void
1645  enum btrace_thread_flag flag)
1646 {
1647  struct btrace_thread_info *btinfo;
1648 
1649  DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1650 
1651  btinfo = &tp->btrace;
1652 
1653  if ((btinfo->flags & BTHR_MOVE) != 0)
1654  error (_("Thread already moving."));
1655 
1656  /* Fetch the latest branch trace. */
1657  btrace_fetch (tp);
1658 
1659  btinfo->flags |= flag;
1660 }
1661 
1662 /* Find the thread to resume given a PTID. */
1663 
1664 static struct thread_info *
1666 {
1667  struct thread_info *tp;
1668 
1669  /* When asked to resume everything, we pick the current thread. */
1670  if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1671  ptid = inferior_ptid;
1672 
1673  return find_thread_ptid (ptid);
1674 }
1675 
1676 /* Start replaying a thread. */
1677 
1678 static struct btrace_insn_iterator *
1680 {
1681  struct btrace_insn_iterator *replay;
1682  struct btrace_thread_info *btinfo;
1683  int executing;
1684 
1685  btinfo = &tp->btrace;
1686  replay = NULL;
1687 
1688  /* We can't start replaying without trace. */
1689  if (btinfo->begin == NULL)
1690  return NULL;
1691 
1692  /* Clear the executing flag to allow changes to the current frame.
1693  We are not actually running, yet. We just started a reverse execution
1694  command or a record goto command.
1695  For the latter, EXECUTING is false and this has no effect.
1696  For the former, EXECUTING is true and we're in to_wait, about to
1697  move the thread. Since we need to recompute the stack, we temporarily
1698  set EXECUTING to flase. */
1699  executing = is_executing (tp->ptid);
1700  set_executing (tp->ptid, 0);
1701 
1702  /* GDB stores the current frame_id when stepping in order to detects steps
1703  into subroutines.
1704  Since frames are computed differently when we're replaying, we need to
1705  recompute those stored frames and fix them up so we can still detect
1706  subroutines after we started replaying. */
1707  TRY
1708  {
1709  struct frame_info *frame;
1710  struct frame_id frame_id;
1711  int upd_step_frame_id, upd_step_stack_frame_id;
1712 
1713  /* The current frame without replaying - computed via normal unwind. */
1714  frame = get_current_frame ();
1715  frame_id = get_frame_id (frame);
1716 
1717  /* Check if we need to update any stepping-related frame id's. */
1718  upd_step_frame_id = frame_id_eq (frame_id,
1719  tp->control.step_frame_id);
1720  upd_step_stack_frame_id = frame_id_eq (frame_id,
1722 
1723  /* We start replaying at the end of the branch trace. This corresponds
1724  to the current instruction. */
1725  replay = xmalloc (sizeof (*replay));
1726  btrace_insn_end (replay, btinfo);
1727 
1728  /* Skip gaps at the end of the trace. */
1729  while (btrace_insn_get (replay) == NULL)
1730  {
1731  unsigned int steps;
1732 
1733  steps = btrace_insn_prev (replay, 1);
1734  if (steps == 0)
1735  error (_("No trace."));
1736  }
1737 
1738  /* We're not replaying, yet. */
1739  gdb_assert (btinfo->replay == NULL);
1740  btinfo->replay = replay;
1741 
1742  /* Make sure we're not using any stale registers. */
1744 
1745  /* The current frame with replaying - computed via btrace unwind. */
1746  frame = get_current_frame ();
1747  frame_id = get_frame_id (frame);
1748 
1749  /* Replace stepping related frames where necessary. */
1750  if (upd_step_frame_id)
1751  tp->control.step_frame_id = frame_id;
1752  if (upd_step_stack_frame_id)
1753  tp->control.step_stack_frame_id = frame_id;
1754  }
1755  CATCH (except, RETURN_MASK_ALL)
1756  {
1757  /* Restore the previous execution state. */
1758  set_executing (tp->ptid, executing);
1759 
1760  xfree (btinfo->replay);
1761  btinfo->replay = NULL;
1762 
1764 
1765  throw_exception (except);
1766  }
1767  END_CATCH
1768 
1769  /* Restore the previous execution state. */
1770  set_executing (tp->ptid, executing);
1771 
1772  return replay;
1773 }
1774 
1775 /* Stop replaying a thread. */
1776 
1777 static void
1779 {
1780  struct btrace_thread_info *btinfo;
1781 
1782  btinfo = &tp->btrace;
1783 
1784  xfree (btinfo->replay);
1785  btinfo->replay = NULL;
1786 
1787  /* Make sure we're not leaving any stale registers. */
1789 }
1790 
1791 /* The to_resume method of target record-btrace. */
1792 
1793 static void
1794 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1795  enum gdb_signal signal)
1796 {
1797  struct thread_info *tp, *other;
1798  enum btrace_thread_flag flag;
1799 
1800  DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1801 
1802  /* Store the execution direction of the last resume. */
1804 
1806  if (tp == NULL)
1807  error (_("Cannot find thread to resume."));
1808 
1809  /* Stop replaying other threads if the thread to resume is not replaying. */
1811  ALL_NON_EXITED_THREADS (other)
1813 
1814  /* As long as we're not replaying, just forward the request. */
1816  {
1817  ops = ops->beneath;
1818  return ops->to_resume (ops, ptid, step, signal);
1819  }
1820 
1821  /* Compute the btrace thread flag for the requested move. */
1822  if (step == 0)
1824  else
1826 
1827  /* At the moment, we only move a single thread. We could also move
1828  all threads in parallel by single-stepping each resumed thread
1829  until the first runs into an event.
1830  When we do that, we would want to continue all other threads.
1831  For now, just resume one thread to not confuse to_wait. */
1832  record_btrace_resume_thread (tp, flag);
1833 
1834  /* We just indicate the resume intent here. The actual stepping happens in
1835  record_btrace_wait below. */
1836 
1837  /* Async support. */
1838  if (target_can_async_p ())
1839  {
1840  target_async (1);
1841  mark_async_event_handler (record_btrace_async_inferior_event_handler);
1842  }
1843 }
1844 
1845 /* Find a thread to move. */
1846 
1847 static struct thread_info *
1849 {
1850  struct thread_info *tp;
1851 
1852  /* First check the parameter thread. */
1853  tp = find_thread_ptid (ptid);
1854  if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1855  return tp;
1856 
1857  /* Otherwise, find one other thread that has been resumed. */
1859  if ((tp->btrace.flags & BTHR_MOVE) != 0)
1860  return tp;
1861 
1862  return NULL;
1863 }
1864 
1865 /* Return a target_waitstatus indicating that we ran out of history. */
1866 
1867 static struct target_waitstatus
1869 {
1870  struct target_waitstatus status;
1871 
1873 
1874  return status;
1875 }
1876 
1877 /* Return a target_waitstatus indicating that a step finished. */
1878 
1879 static struct target_waitstatus
1881 {
1882  struct target_waitstatus status;
1883 
1884  status.kind = TARGET_WAITKIND_STOPPED;
1885  status.value.sig = GDB_SIGNAL_TRAP;
1886 
1887  return status;
1888 }
1889 
1890 /* Clear the record histories. */
1891 
1892 static void
1894 {
1895  xfree (btinfo->insn_history);
1896  xfree (btinfo->call_history);
1897 
1898  btinfo->insn_history = NULL;
1899  btinfo->call_history = NULL;
1900 }
1901 
1902 /* Step a single thread. */
1903 
1904 static struct target_waitstatus
1906 {
1907  struct btrace_insn_iterator *replay, end;
1908  struct btrace_thread_info *btinfo;
1909  struct address_space *aspace;
1910  struct inferior *inf;
1912  unsigned int steps;
1913 
1914  /* We can't step without an execution history. */
1915  if (btrace_is_empty (tp))
1916  return btrace_step_no_history ();
1917 
1918  btinfo = &tp->btrace;
1919  replay = btinfo->replay;
1920 
1921  flags = btinfo->flags & BTHR_MOVE;
1922  btinfo->flags &= ~BTHR_MOVE;
1923 
1924  DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1925 
1926  switch (flags)
1927  {
1928  default:
1929  internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1930 
1931  case BTHR_STEP:
1932  /* We're done if we're not replaying. */
1933  if (replay == NULL)
1934  return btrace_step_no_history ();
1935 
1936  /* Skip gaps during replay. */
1937  do
1938  {
1939  steps = btrace_insn_next (replay, 1);
1940  if (steps == 0)
1941  {
1943  return btrace_step_no_history ();
1944  }
1945  }
1946  while (btrace_insn_get (replay) == NULL);
1947 
1948  /* Determine the end of the instruction trace. */
1949  btrace_insn_end (&end, btinfo);
1950 
1951  /* We stop replaying if we reached the end of the trace. */
1952  if (btrace_insn_cmp (replay, &end) == 0)
1954 
1955  return btrace_step_stopped ();
1956 
1957  case BTHR_RSTEP:
1958  /* Start replaying if we're not already doing so. */
1959  if (replay == NULL)
1960  replay = record_btrace_start_replaying (tp);
1961 
1962  /* If we can't step any further, we reached the end of the history.
1963  Skip gaps during replay. */
1964  do
1965  {
1966  steps = btrace_insn_prev (replay, 1);
1967  if (steps == 0)
1968  return btrace_step_no_history ();
1969 
1970  }
1971  while (btrace_insn_get (replay) == NULL);
1972 
1973  return btrace_step_stopped ();
1974 
1975  case BTHR_CONT:
1976  /* We're done if we're not replaying. */
1977  if (replay == NULL)
1978  return btrace_step_no_history ();
1979 
1980  inf = find_inferior_ptid (tp->ptid);
1981  aspace = inf->aspace;
1982 
1983  /* Determine the end of the instruction trace. */
1984  btrace_insn_end (&end, btinfo);
1985 
1986  for (;;)
1987  {
1988  const struct btrace_insn *insn;
1989 
1990  /* Skip gaps during replay. */
1991  do
1992  {
1993  steps = btrace_insn_next (replay, 1);
1994  if (steps == 0)
1995  {
1997  return btrace_step_no_history ();
1998  }
1999 
2000  insn = btrace_insn_get (replay);
2001  }
2002  while (insn == NULL);
2003 
2004  /* We stop replaying if we reached the end of the trace. */
2005  if (btrace_insn_cmp (replay, &end) == 0)
2006  {
2008  return btrace_step_no_history ();
2009  }
2010 
2011  DEBUG ("stepping %d (%s) ... %s", tp->num,
2012  target_pid_to_str (tp->ptid),
2013  core_addr_to_string_nz (insn->pc));
2014 
2015  if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2016  &btinfo->stop_reason))
2017  return btrace_step_stopped ();
2018  }
2019 
2020  case BTHR_RCONT:
2021  /* Start replaying if we're not already doing so. */
2022  if (replay == NULL)
2023  replay = record_btrace_start_replaying (tp);
2024 
2025  inf = find_inferior_ptid (tp->ptid);
2026  aspace = inf->aspace;
2027 
2028  for (;;)
2029  {
2030  const struct btrace_insn *insn;
2031 
2032  /* If we can't step any further, we reached the end of the history.
2033  Skip gaps during replay. */
2034  do
2035  {
2036  steps = btrace_insn_prev (replay, 1);
2037  if (steps == 0)
2038  return btrace_step_no_history ();
2039 
2040  insn = btrace_insn_get (replay);
2041  }
2042  while (insn == NULL);
2043 
2044  DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2045  target_pid_to_str (tp->ptid),
2046  core_addr_to_string_nz (insn->pc));
2047 
2048  if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2049  &btinfo->stop_reason))
2050  return btrace_step_stopped ();
2051  }
2052  }
2053 }
2054 
2055 /* The to_wait method of target record-btrace. */
2056 
2057 static ptid_t
2059  struct target_waitstatus *status, int options)
2060 {
2061  struct thread_info *tp, *other;
2062 
2063  DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2064 
2065  /* As long as we're not replaying, just forward the request. */
2067  {
2068  ops = ops->beneath;
2069  return ops->to_wait (ops, ptid, status, options);
2070  }
2071 
2072  /* Let's find a thread to move. */
2074  if (tp == NULL)
2075  {
2076  DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2077 
2078  status->kind = TARGET_WAITKIND_IGNORE;
2079  return minus_one_ptid;
2080  }
2081 
2082  /* We only move a single thread. We're not able to correlate threads. */
2083  *status = record_btrace_step_thread (tp);
2084 
2085  /* Stop all other threads. */
2086  if (!non_stop)
2087  ALL_NON_EXITED_THREADS (other)
2088  other->btrace.flags &= ~BTHR_MOVE;
2089 
2090  /* Start record histories anew from the current position. */
2092 
2093  /* We moved the replay position but did not update registers. */
2095 
2096  return tp->ptid;
2097 }
2098 
2099 /* The to_can_execute_reverse method of target record-btrace. */
2100 
2101 static int
2103 {
2104  return 1;
2105 }
2106 
2107 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2108 
2109 static int
2111 {
2112  if (record_btrace_is_replaying (ops))
2113  {
2114  struct thread_info *tp = inferior_thread ();
2115 
2117  }
2118 
2119  return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2120 }
2121 
2122 /* The to_supports_stopped_by_sw_breakpoint method of target
2123  record-btrace. */
2124 
2125 static int
2127 {
2128  if (record_btrace_is_replaying (ops))
2129  return 1;
2130 
2132 }
2133 
2134 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2135 
2136 static int
2138 {
2139  if (record_btrace_is_replaying (ops))
2140  {
2141  struct thread_info *tp = inferior_thread ();
2142 
2144  }
2145 
2146  return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2147 }
2148 
2149 /* The to_supports_stopped_by_hw_breakpoint method of target
2150  record-btrace. */
2151 
2152 static int
2154 {
2155  if (record_btrace_is_replaying (ops))
2156  return 1;
2157 
2159 }
2160 
2161 /* The to_update_thread_list method of target record-btrace. */
2162 
2163 static void
2165 {
2166  /* We don't add or remove threads during replay. */
2167  if (record_btrace_is_replaying (ops))
2168  return;
2169 
2170  /* Forward the request. */
2171  ops = ops->beneath;
2172  ops->to_update_thread_list (ops);
2173 }
2174 
2175 /* The to_thread_alive method of target record-btrace. */
2176 
2177 static int
2179 {
2180  /* We don't add or remove threads during replay. */
2181  if (record_btrace_is_replaying (ops))
2182  return find_thread_ptid (ptid) != NULL;
2183 
2184  /* Forward the request. */
2185  ops = ops->beneath;
2186  return ops->to_thread_alive (ops, ptid);
2187 }
2188 
2189 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2190  is stopped. */
2191 
2192 static void
2194  const struct btrace_insn_iterator *it)
2195 {
2196  struct btrace_thread_info *btinfo;
2197 
2198  btinfo = &tp->btrace;
2199 
2200  if (it == NULL || it->function == NULL)
2202  else
2203  {
2204  if (btinfo->replay == NULL)
2206  else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2207  return;
2208 
2209  *btinfo->replay = *it;
2211  }
2212 
2213  /* Start anew from the new replay position. */
2215 
2218 }
2219 
2220 /* The to_goto_record_begin method of target record-btrace. */
2221 
2222 static void
2224 {
2225  struct thread_info *tp;
2226  struct btrace_insn_iterator begin;
2227 
2228  tp = require_btrace_thread ();
2229 
2230  btrace_insn_begin (&begin, &tp->btrace);
2231  record_btrace_set_replay (tp, &begin);
2232 }
2233 
2234 /* The to_goto_record_end method of target record-btrace. */
2235 
2236 static void
2238 {
2239  struct thread_info *tp;
2240 
2241  tp = require_btrace_thread ();
2242 
2243  record_btrace_set_replay (tp, NULL);
2244 }
2245 
2246 /* The to_goto_record method of target record-btrace. */
2247 
2248 static void
2250 {
2251  struct thread_info *tp;
2252  struct btrace_insn_iterator it;
2253  unsigned int number;
2254  int found;
2255 
2256  number = insn;
2257 
2258  /* Check for wrap-arounds. */
2259  if (number != insn)
2260  error (_("Instruction number out of range."));
2261 
2262  tp = require_btrace_thread ();
2263 
2264  found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2265  if (found == 0)
2266  error (_("No such instruction."));
2267 
2268  record_btrace_set_replay (tp, &it);
2269 }
2270 
2271 /* The to_execution_direction target method. */
2272 
2273 static enum exec_direction_kind
2275 {
2277 }
2278 
2279 /* The to_prepare_to_generate_core target method. */
2280 
2281 static void
2283 {
2285 }
2286 
2287 /* The to_done_generating_core target method. */
2288 
2289 static void
2291 {
2293 }
2294 
2295 /* Initialize the record-btrace target ops. */
2296 
2297 static void
2299 {
2300  struct target_ops *ops;
2301 
2302  ops = &record_btrace_ops;
2303  ops->to_shortname = "record-btrace";
2304  ops->to_longname = "Branch tracing target";
2305  ops->to_doc = "Collect control-flow trace and provide the execution history.";
2306  ops->to_open = record_btrace_open;
2309  ops->to_detach = record_detach;
2312  ops->to_kill = record_kill;
2331  ops->to_wait = record_btrace_wait;
2347  ops->to_stratum = record_stratum;
2348  ops->to_magic = OPS_MAGIC;
2349 }
2350 
2351 /* Start recording in BTS format. */
2352 
2353 static void
2354 cmd_record_btrace_bts_start (char *args, int from_tty)
2355 {
2356  if (args != NULL && *args != 0)
2357  error (_("Invalid argument."));
2358 
2360 
2361  TRY
2362  {
2363  execute_command ("target record-btrace", from_tty);
2364  }
2365  CATCH (exception, RETURN_MASK_ALL)
2366  {
2368  throw_exception (exception);
2369  }
2370  END_CATCH
2371 }
2372 
2373 /* Start recording Intel(R) Processor Trace. */
2374 
2375 static void
2376 cmd_record_btrace_pt_start (char *args, int from_tty)
2377 {
2378  if (args != NULL && *args != 0)
2379  error (_("Invalid argument."));
2380 
2382 
2383  TRY
2384  {
2385  execute_command ("target record-btrace", from_tty);
2386  }
2387  CATCH (exception, RETURN_MASK_ALL)
2388  {
2390  throw_exception (exception);
2391  }
2392  END_CATCH
2393 }
2394 
2395 /* Alias for "target record". */
2396 
2397 static void
2398 cmd_record_btrace_start (char *args, int from_tty)
2399 {
2400  if (args != NULL && *args != 0)
2401  error (_("Invalid argument."));
2402 
2404 
2405  TRY
2406  {
2407  execute_command ("target record-btrace", from_tty);
2408  }
2409  CATCH (exception, RETURN_MASK_ALL)
2410  {
2412 
2413  TRY
2414  {
2415  execute_command ("target record-btrace", from_tty);
2416  }
2417  CATCH (exception, RETURN_MASK_ALL)
2418  {
2420  throw_exception (exception);
2421  }
2422  END_CATCH
2423  }
2424  END_CATCH
2425 }
2426 
2427 /* The "set record btrace" command. */
2428 
2429 static void
2430 cmd_set_record_btrace (char *args, int from_tty)
2431 {
2432  cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2433 }
2434 
2435 /* The "show record btrace" command. */
2436 
2437 static void
2438 cmd_show_record_btrace (char *args, int from_tty)
2439 {
2440  cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2441 }
2442 
2443 /* The "show record btrace replay-memory-access" command. */
2444 
2445 static void
2446 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2447  struct cmd_list_element *c, const char *value)
2448 {
2449  fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2451 }
2452 
2453 /* The "set record btrace bts" command. */
2454 
2455 static void
2456 cmd_set_record_btrace_bts (char *args, int from_tty)
2457 {
2458  printf_unfiltered (_("\"set record btrace bts\" must be followed "
2459  "by an appropriate subcommand.\n"));
2460  help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2462 }
2463 
2464 /* The "show record btrace bts" command. */
2465 
2466 static void
2467 cmd_show_record_btrace_bts (char *args, int from_tty)
2468 {
2469  cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2470 }
2471 
2472 /* The "set record btrace pt" command. */
2473 
2474 static void
2475 cmd_set_record_btrace_pt (char *args, int from_tty)
2476 {
2477  printf_unfiltered (_("\"set record btrace pt\" must be followed "
2478  "by an appropriate subcommand.\n"));
2479  help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2481 }
2482 
2483 /* The "show record btrace pt" command. */
2484 
2485 static void
2486 cmd_show_record_btrace_pt (char *args, int from_tty)
2487 {
2488  cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2489 }
2490 
2491 /* The "record bts buffer-size" show value function. */
2492 
2493 static void
2494 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2495  struct cmd_list_element *c,
2496  const char *value)
2497 {
2498  fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2499  value);
2500 }
2501 
2502 /* The "record pt buffer-size" show value function. */
2503 
2504 static void
2505 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2506  struct cmd_list_element *c,
2507  const char *value)
2508 {
2509  fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2510  value);
2511 }
2512 
2513 void _initialize_record_btrace (void);
2514 
2515 /* Initialize btrace commands. */
2516 
2517 void
2519 {
2521  _("Start branch trace recording."), &record_btrace_cmdlist,
2522  "record btrace ", 0, &record_cmdlist);
2523  add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2524 
2526  _("\
2527 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2528 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2529 This format may not be available on all processors."),
2530  &record_btrace_cmdlist);
2531  add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2532 
2534  _("\
2535 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2536 This format may not be available on all processors."),
2537  &record_btrace_cmdlist);
2538  add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2539 
2541  _("Set record options"), &set_record_btrace_cmdlist,
2542  "set record btrace ", 0, &set_record_cmdlist);
2543 
2545  _("Show record options"), &show_record_btrace_cmdlist,
2546  "show record btrace ", 0, &show_record_cmdlist);
2547 
2548  add_setshow_enum_cmd ("replay-memory-access", no_class,
2550 Set what memory accesses are allowed during replay."), _("\
2551 Show what memory accesses are allowed during replay."),
2552  _("Default is READ-ONLY.\n\n\
2553 The btrace record target does not trace data.\n\
2554 The memory therefore corresponds to the live target and not \
2555 to the current replay position.\n\n\
2556 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2557 When READ-WRITE, allow accesses to read-only and read-write memory during \
2558 replay."),
2560  &set_record_btrace_cmdlist,
2561  &show_record_btrace_cmdlist);
2562 
2564  _("Set record btrace bts options"),
2565  &set_record_btrace_bts_cmdlist,
2566  "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2567 
2569  _("Show record btrace bts options"),
2570  &show_record_btrace_bts_cmdlist,
2571  "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2572 
2573  add_setshow_uinteger_cmd ("buffer-size", no_class,
2575  _("Set the record/replay bts buffer size."),
2576  _("Show the record/replay bts buffer size."), _("\
2577 When starting recording request a trace buffer of this size. \
2578 The actual buffer size may differ from the requested size. \
2579 Use \"info record\" to see the actual buffer size.\n\n\
2580 Bigger buffers allow longer recording but also take more time to process \
2581 the recorded execution trace.\n\n\
2582 The trace buffer size may not be changed while recording."), NULL,
2584  &set_record_btrace_bts_cmdlist,
2585  &show_record_btrace_bts_cmdlist);
2586 
2588  _("Set record btrace pt options"),
2589  &set_record_btrace_pt_cmdlist,
2590  "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2591 
2593  _("Show record btrace pt options"),
2594  &show_record_btrace_pt_cmdlist,
2595  "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2596 
2597  add_setshow_uinteger_cmd ("buffer-size", no_class,
2599  _("Set the record/replay pt buffer size."),
2600  _("Show the record/replay pt buffer size."), _("\
2601 Bigger buffers allow longer recording but also take more time to process \
2602 the recorded execution.\n\
2603 The actual buffer size may differ from the requested size. Use \"info record\" \
2604 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2605  &set_record_btrace_pt_cmdlist,
2606  &show_record_btrace_pt_cmdlist);
2607 
2610 
2611  bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2612  xcalloc, xfree);
2613 
2614  record_btrace_conf.bts.size = 64 * 1024;
2615  record_btrace_conf.pt.size = 16 * 1024;
2616 }
struct frame_id step_stack_frame_id
Definition: gdbthread.h:94
static void record_btrace_insn_history_from(struct target_ops *self, ULONGEST from, int size, int flags)
struct gdbarch * target_gdbarch(void)
Definition: gdbarch.c:5143
#define target_can_async_p()
Definition: target.h:1748
static int bfcache_eq(const void *arg1, const void *arg2)
void add_target(struct target_ops *t)
Definition: target.c:395
struct cmd_list_element * show_record_cmdlist
Definition: record.c:53
static int record_btrace_supports_stopped_by_hw_breakpoint(struct target_ops *ops)
const struct frame_unwind record_btrace_tailcall_frame_unwind
struct btrace_config_bts bts
CORE_ADDR special_addr
Definition: frame.h:147
struct cmd_list_element * add_prefix_cmd(const char *name, enum command_class theclass, cmd_cfunc_ftype *fun, const char *doc, struct cmd_list_element **prefixlist, const char *prefixname, int allow_unknown, struct cmd_list_element **list)
Definition: cli-decode.c:338
unsigned int btrace_insn_prev(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:1844
const char * symtab_to_filename_for_display(struct symtab *symtab)
Definition: source.c:1171
const struct btrace_function * bfun
struct thread_info * find_thread_ptid(ptid_t ptid)
Definition: thread.c:393
int(* to_supports_stopped_by_hw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:513
const char * to_longname
Definition: target.h:433
struct frame_info * get_selected_frame(const char *message)
Definition: frame.c:1535
static struct target_waitstatus btrace_step_stopped(void)
void(* to_goto_record_begin)(struct target_ops *) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1150
static void record_btrace_update_thread_list(struct target_ops *ops)
int ptid_is_pid(ptid_t ptid)
Definition: ptid.c:86
#define SYMBOL_PRINT_NAME(symbol)
Definition: symtab.h:260
static void record_btrace_insn_history(struct target_ops *self, int size, int flags)
void ui_out_field_int(struct ui_out *uiout, const char *fldname, int value)
Definition: ui-out.c:467
struct frame_info * get_current_frame(void)
Definition: frame.c:1461
int ptid_equal(ptid_t ptid1, ptid_t ptid2)
Definition: ptid.c:76
bfd_vma CORE_ADDR
Definition: common-types.h:41
static void record_btrace_prepare_to_store(struct target_ops *ops, struct regcache *regcache)
static struct cmd_list_element * set_record_btrace_cmdlist
Definition: record-btrace.c:61
btrace_thread_flag
Definition: btrace.h:218
static struct cmd_list_element * show_record_btrace_pt_cmdlist
Definition: record-btrace.c:85
CORE_ADDR code_addr
Definition: frame.h:135
void xfree(void *)
Definition: common-utils.c:97
static enum exec_direction_kind record_btrace_resume_exec_dir
Definition: record-btrace.c:65
struct symbol * sym
Definition: btrace.h:132
struct gdbarch * get_regcache_arch(const struct regcache *regcache)
Definition: regcache.c:297
int number_is_in_list(const char *list, int number)
Definition: cli-utils.c:205
void add_setshow_enum_cmd(const char *name, enum command_class theclass, const char *const *enumlist, const char **var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_sfunc_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:487
#define INT_MAX
Definition: defs.h:509
void warning(const char *fmt,...)
Definition: errors.c:26
CORE_ADDR end
Definition: symtab.h:1377
void(* to_stop_recording)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:1129
struct cmd_list_element * set_record_cmdlist
Definition: record.c:52
tuple inf
Definition: arm-linux.py:13
static void btrace_insn_history(struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end, int flags)
#define INT_MIN
Definition: defs.h:513
static int record_btrace_stopped_by_sw_breakpoint(struct target_ops *ops)
struct btrace_insn_iterator * replay
Definition: btrace.h:343
void push_target(struct target_ops *t)
Definition: target.c:664
int(* to_insert_breakpoint)(struct target_ops *, struct gdbarch *, struct bp_target_info *) TARGET_DEFAULT_FUNC(memory_insert_breakpoint)
Definition: target.h:481
static hashval_t bfcache_hash(const void *arg)
void record_preopen(void)
Definition: record.c:86
struct btrace_call_history * call_history
Definition: btrace.h:338
static void record_btrace_goto_end(struct target_ops *ops)
int gdb_insn_length(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: disasm.c:472
struct ui_file * gdb_stdout
Definition: main.c:71
static enum exec_direction_kind record_btrace_execution_direction(struct target_ops *self)
void(* to_insn_history_from)(struct target_ops *, ULONGEST from, int size, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1172
unsigned int size
void record_disconnect(struct target_ops *t, const char *args, int from_tty)
Definition: record.c:133
static int record_btrace_is_replaying(struct target_ops *self)
static void cmd_record_btrace_bts_start(char *args, int from_tty)
void internal_error(const char *file, int line, const char *fmt,...)
Definition: errors.c:50
struct thread_info * inferior_thread(void)
Definition: thread.c:85
static void record_btrace_frame_this_id(struct frame_info *this_frame, void **this_cache, struct frame_id *this_id)
void delete_async_event_handler(async_event_handler **async_handler_ptr)
Definition: event-loop.c:1050
void(* to_call_history)(struct target_ops *, int size, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1185
static htab_t bfcache
int ui_out_is_mi_like_p(struct ui_out *uiout)
Definition: ui-out.c:655
unsigned int btrace_call_number(const struct btrace_call_iterator *it)
Definition: btrace.c:1991
void(* to_close)(struct target_ops *)
Definition: target.h:448
enum btrace_function_flag flags
Definition: btrace.h:174
static void btrace_call_history_insn_range(struct ui_out *uiout, const struct btrace_function *bfun)
static void record_btrace_fetch_registers(struct target_ops *ops, struct regcache *regcache, int regno)
void mark_async_event_handler(async_event_handler *async_handler_ptr)
Definition: event-loop.c:1011
static int record_btrace_remove_breakpoint(struct target_ops *ops, struct gdbarch *gdbarch, struct bp_target_info *bp_tgt)
static const char * btrace_get_bfun_name(const struct btrace_function *bfun)
static void cmd_show_record_btrace_pt(char *args, int from_tty)
const struct btrace_thread_info * btinfo
Definition: btrace.h:192
unwind_stop_reason
Definition: frame.h:486
static void record_btrace_resume_thread(struct thread_info *tp, enum btrace_thread_flag flag)
enum btrace_thread_flag flags
Definition: btrace.h:332
const struct frame_unwind *(* to_get_tailcall_unwinder)(struct target_ops *self) TARGET_DEFAULT_RETURN(NULL)
Definition: target.h:1213
const struct btrace_function * function
Definition: btrace.h:182
struct inferior * find_inferior_ptid(ptid_t ptid)
Definition: inferior.c:373
int(* to_remove_breakpoint)(struct target_ops *, struct gdbarch *, struct bp_target_info *) TARGET_DEFAULT_FUNC(memory_remove_breakpoint)
Definition: target.h:484
ptid_t(* to_wait)(struct target_ops *, ptid_t, struct target_waitstatus *, int TARGET_DEBUG_PRINTER(target_debug_print_options)) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:468
static void cmd_record_btrace_pt_start(char *args, int from_tty)
char * target_pid_to_str(ptid_t ptid)
Definition: target.c:2233
void(* to_info_record)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:1133
struct btrace_call_iterator begin
Definition: btrace.h:213
#define _(String)
Definition: gdb_locale.h:40
void execute_command(char *, int)
Definition: top.c:388
static void cmd_show_record_btrace_bts(char *args, int from_tty)
struct btrace_insn_history * insn_history
Definition: btrace.h:335
static void record_btrace_goto(struct target_ops *self, ULONGEST insn)
int btrace_call_cmp(const struct btrace_call_iterator *lhs, const struct btrace_call_iterator *rhs)
Definition: btrace.c:2137
static void record_btrace_auto_disable(void)
void btrace_insn_begin(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:1721
static void show_record_bts_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static struct thread_info * record_btrace_find_thread_to_move(ptid_t ptid)
btrace_format
Definition: btrace-common.h:54
void btrace_set_call_history(struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end)
Definition: btrace.c:2194
#define ULONGEST_MAX
Definition: defs.h:525
#define END_CATCH
void ui_out_field_fmt(struct ui_out *uiout, const char *fldname, const char *format,...)
Definition: ui-out.c:556
static void btrace_call_history(struct ui_out *uiout, const struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end, enum record_print_flag flags)
void btrace_disable(struct thread_info *tp)
Definition: btrace.c:1050
Definition: ui-out.c:99
struct regcache * get_current_regcache(void)
Definition: regcache.c:541
static struct cmd_list_element * show_record_btrace_cmdlist
Definition: record-btrace.c:62
void ui_out_text(struct ui_out *uiout, const char *string)
Definition: ui-out.c:582
#define FRAME_OBSTACK_ZALLOC(TYPE)
Definition: frame.h:660
static struct observer * record_btrace_thread_observer
Definition: record-btrace.c:45
const struct frame_unwind record_btrace_frame_unwind
static const char * record_btrace_adjust_size(unsigned int *size)
Definition: ptid.h:35
static struct target_waitstatus btrace_step_no_history(void)
void(* to_insn_history_range)(struct target_ops *, ULONGEST begin, ULONGEST end, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1178
struct btrace_insn_iterator end
Definition: btrace.h:205
unsigned int btrace_insn_number(const struct btrace_insn_iterator *it)
Definition: btrace.c:1705
mach_port_t kern_return_t mach_port_t msgports mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition: gnu-nat.c:1885
void null_cleanup(void *arg)
Definition: cleanups.c:295
static int record_btrace_frame_sniffer(const struct frame_unwind *self, struct frame_info *this_frame, void **this_cache)
#define TRY
#define MSYMBOL_PRINT_NAME(symbol)
Definition: symtab.h:410
int frame_id_eq(struct frame_id l, struct frame_id r)
Definition: frame.c:604
static struct cmd_list_element * show_record_btrace_bts_cmdlist
Definition: record-btrace.c:81
static void record_btrace_goto_begin(struct target_ops *self)
static void record_btrace_open(const char *args, int from_tty)
int * from
Definition: varobj.h:282
#define VEC_iterate(T, V, I, P)
Definition: vec.h:165
#define OPS_MAGIC
Definition: target.h:1233
record_print_flag
Definition: record.h:40
struct frame_id get_frame_id(struct frame_info *fi)
Definition: frame.c:473
#define CATCH(EXCEPTION, MASK)
void(* to_prepare_to_generate_core)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:1217
struct address_space * aspace
Definition: inferior.h:314
static void record_btrace_print_pt_conf(const struct btrace_config_pt *conf)
void(* to_call_history_range)(struct target_ops *, ULONGEST begin, ULONGEST end, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1198
void btrace_teardown(struct thread_info *tp)
Definition: btrace.c:1069
static struct btrace_thread_info * require_btrace(void)
void inferior_event_handler(enum inferior_event_type event_type, gdb_client_data client_data)
Definition: inf-loop.c:40
static void record_btrace_handle_async_inferior_event(gdb_client_data data)
static void init_record_btrace_ops(void)
struct symtab_and_line find_pc_line(CORE_ADDR pc, int notcurrent)
Definition: symtab.c:3315
struct cleanup * make_cleanup_ui_out_tuple_begin_end(struct ui_out *uiout, const char *id)
Definition: ui-out.c:451
struct frame_id step_frame_id
Definition: gdbthread.h:90
const char * btrace_format_string(enum btrace_format format)
Definition: btrace-common.c:27
struct btrace_function * begin
Definition: btrace.h:320
void btrace_insn_end(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:1737
void fprintf_filtered(struct ui_file *stream, const char *format,...)
Definition: utils.c:2351
static void record_btrace_close(struct target_ops *self)
void add_setshow_uinteger_cmd(const char *name, enum command_class theclass, unsigned int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_sfunc_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:694
enum gdb_signal sig
Definition: waitstatus.h:108
static const char *const replay_memory_access_types[]
Definition: record-btrace.c:50
static void cmd_set_record_btrace_bts(char *args, int from_tty)
static void record_btrace_enable_warn(struct thread_info *tp)
static void record_btrace_insn_history_range(struct target_ops *self, ULONGEST from, ULONGEST to, int flags)
static void record_btrace_call_history_from(struct target_ops *self, ULONGEST from, int size, int flags)
mach_port_t mach_port_t name mach_port_t mach_port_t name error_t int status
Definition: gnu-nat.c:1816
int(* to_supports_stopped_by_sw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:500
void(* to_resume)(struct target_ops *, ptid_t, int TARGET_DEBUG_PRINTER(target_debug_print_step), enum gdb_signal) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:464
void observer_detach_new_thread(struct observer *observer)
static void btrace_call_history_src_line(struct ui_out *uiout, const struct btrace_function *bfun)
static void record_btrace_prepare_to_generate_core(struct target_ops *self)
unsigned int btrace_call_prev(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2088
static const char * replay_memory_access
Definition: record-btrace.c:58
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, cmd_cfunc_ftype *fun, const char *doc, struct cmd_list_element **list)
Definition: cli-decode.c:192
static void record_btrace_resume(struct target_ops *ops, ptid_t ptid, int step, enum gdb_signal signal)
async_event_handler * create_async_event_handler(async_event_handler_func *proc, gdb_client_data client_data)
Definition: event-loop.c:988
#define VEC_length(T, V)
Definition: vec.h:124
static void record_btrace_print_bts_conf(const struct btrace_config_bts *conf)
struct cleanup * make_cleanup(make_cleanup_ftype *function, void *arg)
Definition: cleanups.c:117
union target_waitstatus::@161 value
target_xfer_status
Definition: target.h:219
static void cmd_show_record_btrace(char *args, int from_tty)
struct btrace_target_info * target
Definition: btrace.h:309
struct thread_control_state control
Definition: gdbthread.h:198
void btrace_call_end(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2036
static void btrace_compute_src_line_range(const struct btrace_function *bfun, int *pbegin, int *pend)
static void cmd_record_btrace_start(char *args, int from_tty)
#define VEC_index(T, V, I)
Definition: vec.h:151
struct btrace_func_link segment
Definition: btrace.h:137
#define enable()
Definition: ser-go32.c:239
exec_direction_kind
Definition: infrun.h:65
struct btrace_function * up
Definition: btrace.h:143
void observer_notify_record_changed(struct inferior *inferior, int started)
struct frame_id frame_id_build_unavailable_stack_special(CORE_ADDR code_addr, CORE_ADDR special_addr)
Definition: frame.c:540
static struct btrace_insn_iterator * record_btrace_start_replaying(struct thread_info *tp)
#define gdb_assert(expr)
Definition: gdb_assert.h:33
void record_detach(struct target_ops *t, const char *args, int from_tty)
Definition: record.c:148
#define min(a, b)
Definition: defs.h:106
const char * to_doc
Definition: target.h:434
struct cmd_list_element * add_alias_cmd(const char *name, const char *oldname, enum command_class theclass, int abbrev_flag, struct cmd_list_element **list)
Definition: cli-decode.c:286
const struct btrace_config * btrace_conf(const struct btrace_thread_info *btinfo)
Definition: btrace.c:1039
const struct btrace_insn * btrace_insn_get(const struct btrace_insn_iterator *it)
Definition: btrace.c:1682
CORE_ADDR endaddr
Definition: target.h:2260
#define target_has_execution
Definition: target.h:1726
static int record_btrace_can_execute_reverse(struct target_ops *self)
struct frame_info * frame
static const char replay_memory_access_read_only[]
Definition: record-btrace.c:48
enum target_xfer_status(* to_xfer_partial)(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) TARGET_DEFAULT_RETURN(TARGET_XFER_E_IO)
Definition: target.h:724
struct minimal_symbol * msym
Definition: btrace.h:131
int regnum
Definition: aarch64-tdep.c:69
void printf_unfiltered(const char *format,...)
Definition: utils.c:2399
unsigned int ngaps
Definition: btrace.h:329
void(* to_detach)(struct target_ops *ops, const char *, int) TARGET_DEFAULT_IGNORE()
Definition: target.h:460
Definition: symtab.h:925
void(* to_async)(struct target_ops *, int) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:664
struct bfd_section * the_bfd_section
Definition: target.h:2262
static int record_btrace_tailcall_frame_sniffer(const struct frame_unwind *self, struct frame_info *this_frame, void **this_cache)
void * xmalloc(YYSIZE_T)
static void record_btrace_print_conf(const struct btrace_config *conf)
static void record_btrace_stop_recording(struct target_ops *self)
unsigned int insn_offset
Definition: btrace.h:158
target_object
Definition: target.h:136
void * gdb_client_data
Definition: event-loop.h:70
static void show_record_pt_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
void cmd_show_list(struct cmd_list_element *list, int from_tty, const char *prefix)
Definition: cli-setshow.c:672
CORE_ADDR pc
Definition: btrace.h:61
static void record_btrace_clear_histories(struct btrace_thread_info *btinfo)
void record_mourn_inferior(struct target_ops *t)
Definition: record.c:163
#define VEC_last(T, V)
Definition: vec.h:142
void gdb_disassembly(struct gdbarch *gdbarch, struct ui_out *uiout, char *file_string, int flags, int how_many, CORE_ADDR low, CORE_ADDR high)
Definition: disasm.c:406
void(* to_disconnect)(struct target_ops *, const char *, int) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:462
static int record_btrace_generating_corefile
Definition: record-btrace.c:71
Definition: value.c:172
static struct thread_info * record_btrace_find_resume_thread(ptid_t ptid)
int btrace_is_empty(struct thread_info *tp)
Definition: btrace.c:2218
unsigned int btrace_insn_next(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:1762
static void record_btrace_store_registers(struct target_ops *ops, struct regcache *regcache, int regno)
static void record_btrace_auto_enable(void)
static void record_btrace_async(struct target_ops *ops, int enable)
struct btrace_insn_iterator begin
Definition: btrace.h:204
static enum target_xfer_status record_btrace_xfer_partial(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
struct btrace_config_pt pt
static const char replay_memory_access_read_write[]
Definition: record-btrace.c:49
void print_stack_frame(struct frame_info *, int print_level, enum print_what print_what, int set_current_sal)
Definition: stack.c:151
static void record_btrace_call_history_range(struct target_ops *self, ULONGEST from, ULONGEST to, int flags)
static struct cmd_list_element * set_record_btrace_bts_cmdlist
Definition: record-btrace.c:80
void record_kill(struct target_ops *t)
Definition: record.c:179
static int record_btrace_insert_breakpoint(struct target_ops *ops, struct gdbarch *gdbarch, struct bp_target_info *bp_tgt)
static int record_btrace_supports_stopped_by_sw_breakpoint(struct target_ops *ops)
struct cmd_list_element * record_cmdlist
Definition: record.c:50
const struct btrace_function * function
Definition: btrace.h:196
void throw_exception(struct gdb_exception exception)
bfd_byte gdb_byte
Definition: common-types.h:38
void btrace_fetch(struct thread_info *tp)
Definition: btrace.c:1243
struct frame_info * get_next_frame(struct frame_info *this_frame)
Definition: frame.c:1668
enum exec_direction_kind(* to_execution_direction)(struct target_ops *) TARGET_DEFAULT_FUNC(default_execution_direction)
Definition: target.h:807
unsigned int btrace_call_next(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2052
static struct target_ops record_btrace_ops
Definition: record-btrace.c:42
void help_list(struct cmd_list_element *list, const char *cmdtype, enum command_class theclass, struct ui_file *stream)
Definition: cli-decode.c:1023
static void record_btrace_done_generating_core(struct target_ops *self)
void discard_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:213
static void cmd_set_record_btrace(char *args, int from_tty)
int non_stop
Definition: infrun.c:180
void btrace_enable(struct thread_info *tp, const struct btrace_config *conf)
Definition: btrace.c:1018
unsigned int size
void(* to_fetch_registers)(struct target_ops *, struct regcache *, int) TARGET_DEFAULT_IGNORE()
Definition: target.h:472
void(* to_mourn_inferior)(struct target_ops *) TARGET_DEFAULT_FUNC(default_mourn_inferior)
Definition: target.h:607
const struct frame_unwind *(* to_get_unwinder)(struct target_ops *self) TARGET_DEFAULT_RETURN(NULL)
Definition: target.h:1210
void _initialize_record_btrace(void)
#define max(a, b)
Definition: defs.h:109
#define DEBUG(msg, args...)
Definition: record-btrace.c:90
static struct btrace_frame_cache * bfcache_new(struct frame_info *frame)
static ptid_t record_btrace_wait(struct target_ops *ops, ptid_t ptid, struct target_waitstatus *status, int options)
static void record_btrace_info(struct target_ops *self)
#define ALL_NON_EXITED_THREADS(T)
Definition: gdbthread.h:377
int btrace_insn_cmp(const struct btrace_insn_iterator *lhs, const struct btrace_insn_iterator *rhs)
Definition: btrace.c:1902
ptid_t ptid
Definition: gdbthread.h:169
void registers_changed_ptid(ptid_t ptid)
Definition: regcache.c:586
static int record_btrace_stopped_by_hw_breakpoint(struct target_ops *ops)
enum target_waitkind kind
Definition: waitstatus.h:100
void(* to_kill)(struct target_ops *) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:575
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition: regcache.c:1174
static const struct frame_unwind * record_btrace_to_get_unwinder(struct target_ops *self)
ptid_t inferior_ptid
Definition: infcmd.c:124
struct btrace_thread_info btrace
Definition: gdbthread.h:277
enum strata to_stratum
Definition: target.h:650
int(* to_stopped_by_sw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:497
void(* to_goto_record_end)(struct target_ops *) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1154
static struct target_waitstatus record_btrace_step_thread(struct thread_info *tp)
void(* to_open)(const char *, int)
Definition: target.h:443
int offset
Definition: agent.c:65
void(* to_call_history_from)(struct target_ops *, ULONGEST begin, int size, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1192
int code
Definition: ser-unix.c:684
struct target_section * target_section_by_addr(struct target_ops *target, CORE_ADDR addr)
Definition: target.c:990
int may_write_registers
static int record_btrace_thread_alive(struct target_ops *ops, ptid_t ptid)
CORE_ADDR stop_pc
Definition: infcmd.c:128
const struct btrace_function * btrace_call_get(const struct btrace_call_iterator *it)
Definition: btrace.c:1983
static struct cmd_list_element * record_btrace_cmdlist
Definition: record-btrace.c:77
int(* to_stopped_by_hw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:510
int btrace_is_replaying(struct thread_info *tp)
Definition: btrace.c:2210
static void record_btrace_disable_callback(void *arg)
struct inferior * current_inferior(void)
Definition: inferior.c:57
void regcache_raw_supply(struct regcache *regcache, int regnum, const void *buf)
Definition: regcache.c:1041
static enum unwind_stop_reason record_btrace_frame_unwind_stop_reason(struct frame_info *this_frame, void **this_cache)
const char * to_shortname
Definition: target.h:432
static void cmd_set_record_btrace_pt(char *args, int from_tty)
unsigned long long ULONGEST
Definition: common-types.h:53
enum target_stop_reason stop_reason
Definition: btrace.h:346
static const struct btrace_function * btrace_get_frame_function(struct frame_info *frame)
int to_magic
Definition: target.h:1224
struct value * frame_unwind_got_address(struct frame_info *frame, int regnum, CORE_ADDR addr)
Definition: frame-unwind.c:270
static void ui_out_field_uint(struct ui_out *uiout, const char *fld, unsigned int val)
void clear_async_event_handler(async_event_handler *async_handler_ptr)
Definition: event-loop.c:1019
void(* to_insn_history)(struct target_ops *, int size, int flags) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1165
int(* to_record_is_replaying)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:1146
static struct async_event_handler * record_btrace_async_inferior_event_handler
Definition: record-btrace.c:68
static struct value * record_btrace_frame_prev_register(struct frame_info *this_frame, void **this_cache, int regnum)
struct observer * observer_attach_new_thread(observer_new_thread_ftype *f)
Definition: symtab.h:703
struct frame_info * next
Definition: frame.c:138
void btrace_set_insn_history(struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end)
Definition: btrace.c:2180
int gdbarch_pc_regnum(struct gdbarch *gdbarch)
Definition: gdbarch.c:1998
void(* to_done_generating_core)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:1221
void ui_out_field_string(struct ui_out *uiout, const char *fldname, const char *string)
Definition: ui-out.c:541
int btrace_find_insn_by_number(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:1947
enum btrace_format format
int int * to
Definition: varobj.h:282
int(* to_thread_alive)(struct target_ops *, ptid_t ptid) TARGET_DEFAULT_RETURN(0)
Definition: target.h:627
int execution_direction
Definition: infrun.c:7561
unsigned int number
Definition: btrace.h:163
struct ui_out * current_uiout
Definition: ui-out.c:233
static void record_btrace_call_history(struct target_ops *self, int size, int flags)
int record_check_stopped_by_breakpoint(struct address_space *aspace, CORE_ADDR pc, enum target_stop_reason *reason)
Definition: record.c:195
void(* to_update_thread_list)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:629
#define target_async(ENABLE)
Definition: target.h:1754
void(* to_prepare_to_store)(struct target_ops *, struct regcache *) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:476
void(* to_store_registers)(struct target_ops *, struct regcache *, int) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:474
struct btrace_call_iterator end
Definition: btrace.h:214
static struct cmd_list_element * set_record_btrace_pt_cmdlist
Definition: record-btrace.c:84
PTR xcalloc(size_t number, size_t size)
Definition: common-utils.c:71
struct symtab * symbol_symtab(const struct symbol *symbol)
Definition: symtab.c:6250
int btrace_find_call_by_number(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:2151
static void btrace_ui_out_decode_error(struct ui_out *uiout, int errcode, enum btrace_format format)
CORE_ADDR get_frame_func(struct frame_info *this_frame)
Definition: frame.c:920
static void cmd_show_replay_memory_access(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
void set_executing(ptid_t ptid, int executing)
Definition: thread.c:850
void error(const char *fmt,...)
Definition: errors.c:38
size_t size
Definition: go32-nat.c:242
static struct btrace_config record_btrace_conf
Definition: record-btrace.c:74
static void record_btrace_frame_dealloc_cache(struct frame_info *self, void *this_cache)
ptid_t minus_one_ptid
Definition: ptid.c:26
static void record_btrace_set_replay(struct thread_info *tp, const struct btrace_insn_iterator *it)
static struct thread_info * require_btrace_thread(void)
struct thread_info * tp
struct gdbarch * get_frame_arch(struct frame_info *this_frame)
Definition: frame.c:2535
void throw_error(enum errors error, const char *fmt,...)
struct target_ops * beneath
Definition: target.h:431
int(* to_can_execute_reverse)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:801
void do_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:175
int is_executing(ptid_t ptid)
Definition: thread.c:840
static void record_btrace_stop_replaying(struct thread_info *tp)
static const struct frame_unwind * record_btrace_to_get_tailcall_unwinder(struct target_ops *self)
void(* to_goto_record)(struct target_ops *, ULONGEST insn) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:1158
const ULONGEST const LONGEST len
Definition: target.h:309