GDB (xrefs)
/tmp/gdb-7.10/gdb/btrace.c
Go to the documentation of this file.
1 /* Branch trace support for GDB, the GNU debugger.
2 
3  Copyright (C) 2013-2015 Free Software Foundation, Inc.
4 
5  Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7  This file is part of GDB.
8 
9  This program is free software; you can redistribute it and/or modify
10  it under the terms of the GNU General Public License as published by
11  the Free Software Foundation; either version 3 of the License, or
12  (at your option) any later version.
13 
14  This program is distributed in the hope that it will be useful,
15  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  GNU General Public License for more details.
18 
19  You should have received a copy of the GNU General Public License
20  along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37 
38 #include <inttypes.h>
39 #include <ctype.h>
40 
41 /* Command lists for btrace maintenance commands. */
47 
48 /* Control whether to skip PAD packets when computing the packet history. */
49 static int maint_btrace_pt_skip_pad = 1;
50 
51 static void btrace_add_pc (struct thread_info *tp);
52 
53 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
54  when used in if statements. */
55 
56 #define DEBUG(msg, args...) \
57  do \
58  { \
59  if (record_debug != 0) \
60  fprintf_unfiltered (gdb_stdlog, \
61  "[btrace] " msg "\n", ##args); \
62  } \
63  while (0)
64 
65 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66 
67 /* Return the function name of a recorded function segment for printing.
68  This function never returns NULL. */
69 
70 static const char *
72 {
73  struct minimal_symbol *msym;
74  struct symbol *sym;
75 
76  msym = bfun->msym;
77  sym = bfun->sym;
78 
79  if (sym != NULL)
80  return SYMBOL_PRINT_NAME (sym);
81 
82  if (msym != NULL)
83  return MSYMBOL_PRINT_NAME (msym);
84 
85  return "<unknown>";
86 }
87 
88 /* Return the file name of a recorded function segment for printing.
89  This function never returns NULL. */
90 
91 static const char *
93 {
94  struct symbol *sym;
95  const char *filename;
96 
97  sym = bfun->sym;
98 
99  if (sym != NULL)
101  else
102  filename = "<unknown>";
103 
104  return filename;
105 }
106 
107 /* Return a string representation of the address of an instruction.
108  This function never returns NULL. */
109 
110 static const char *
112 {
113  if (insn == NULL)
114  return "<nil>";
115 
116  return core_addr_to_string_nz (insn->pc);
117 }
118 
119 /* Print an ftrace debug status message. */
120 
121 static void
122 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
123 {
124  const char *fun, *file;
125  unsigned int ibegin, iend;
126  int level;
127 
128  fun = ftrace_print_function_name (bfun);
129  file = ftrace_print_filename (bfun);
130  level = bfun->level;
131 
132  ibegin = bfun->insn_offset;
133  iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134 
135  DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136  prefix, fun, file, level, ibegin, iend);
137 }
138 
139 /* Return non-zero if BFUN does not match MFUN and FUN,
140  return zero otherwise. */
141 
142 static int
144  const struct minimal_symbol *mfun,
145  const struct symbol *fun)
146 {
147  struct minimal_symbol *msym;
148  struct symbol *sym;
149 
150  msym = bfun->msym;
151  sym = bfun->sym;
152 
153  /* If the minimal symbol changed, we certainly switched functions. */
154  if (mfun != NULL && msym != NULL
155  && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
156  return 1;
157 
158  /* If the symbol changed, we certainly switched functions. */
159  if (fun != NULL && sym != NULL)
160  {
161  const char *bfname, *fname;
162 
163  /* Check the function name. */
164  if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165  return 1;
166 
167  /* Check the location of those functions, as well. */
168  bfname = symtab_to_fullname (symbol_symtab (sym));
169  fname = symtab_to_fullname (symbol_symtab (fun));
170  if (filename_cmp (fname, bfname) != 0)
171  return 1;
172  }
173 
174  /* If we lost symbol information, we switched functions. */
175  if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176  return 1;
177 
178  /* If we gained symbol information, we switched functions. */
179  if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180  return 1;
181 
182  return 0;
183 }
184 
185 /* Allocate and initialize a new branch trace function segment.
186  PREV is the chronologically preceding function segment.
187  MFUN and FUN are the symbol information we have for this function. */
188 
189 static struct btrace_function *
191  struct minimal_symbol *mfun,
192  struct symbol *fun)
193 {
194  struct btrace_function *bfun;
195 
196  bfun = xzalloc (sizeof (*bfun));
197 
198  bfun->msym = mfun;
199  bfun->sym = fun;
200  bfun->flow.prev = prev;
201 
202  if (prev == NULL)
203  {
204  /* Start counting at one. */
205  bfun->number = 1;
206  bfun->insn_offset = 1;
207  }
208  else
209  {
210  gdb_assert (prev->flow.next == NULL);
211  prev->flow.next = bfun;
212 
213  bfun->number = prev->number + 1;
214  bfun->insn_offset = (prev->insn_offset
215  + VEC_length (btrace_insn_s, prev->insn));
216  bfun->level = prev->level;
217  }
218 
219  return bfun;
220 }
221 
222 /* Update the UP field of a function segment. */
223 
224 static void
226  struct btrace_function *caller,
228 {
229  if (bfun->up != NULL)
230  ftrace_debug (bfun, "updating caller");
231 
232  bfun->up = caller;
233  bfun->flags = flags;
234 
235  ftrace_debug (bfun, "set caller");
236 }
237 
238 /* Fix up the caller for all segments of a function. */
239 
240 static void
242  struct btrace_function *caller,
244 {
245  struct btrace_function *prev, *next;
246 
247  ftrace_update_caller (bfun, caller, flags);
248 
249  /* Update all function segments belonging to the same function. */
250  for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251  ftrace_update_caller (prev, caller, flags);
252 
253  for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254  ftrace_update_caller (next, caller, flags);
255 }
256 
257 /* Add a new function segment for a call.
258  CALLER is the chronologically preceding function segment.
259  MFUN and FUN are the symbol information we have for this function. */
260 
261 static struct btrace_function *
263  struct minimal_symbol *mfun,
264  struct symbol *fun)
265 {
266  struct btrace_function *bfun;
267 
268  bfun = ftrace_new_function (caller, mfun, fun);
269  bfun->up = caller;
270  bfun->level += 1;
271 
272  ftrace_debug (bfun, "new call");
273 
274  return bfun;
275 }
276 
277 /* Add a new function segment for a tail call.
278  CALLER is the chronologically preceding function segment.
279  MFUN and FUN are the symbol information we have for this function. */
280 
281 static struct btrace_function *
283  struct minimal_symbol *mfun,
284  struct symbol *fun)
285 {
286  struct btrace_function *bfun;
287 
288  bfun = ftrace_new_function (caller, mfun, fun);
289  bfun->up = caller;
290  bfun->level += 1;
292 
293  ftrace_debug (bfun, "new tail call");
294 
295  return bfun;
296 }
297 
298 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299  symbol information. */
300 
301 static struct btrace_function *
303  struct minimal_symbol *mfun,
304  struct symbol *fun)
305 {
306  for (; bfun != NULL; bfun = bfun->up)
307  {
308  /* Skip functions with incompatible symbol information. */
309  if (ftrace_function_switched (bfun, mfun, fun))
310  continue;
311 
312  /* This is the function segment we're looking for. */
313  break;
314  }
315 
316  return bfun;
317 }
318 
319 /* Find the innermost caller in the back trace of BFUN, skipping all
320  function segments that do not end with a call instruction (e.g.
321  tail calls ending with a jump). */
322 
323 static struct btrace_function *
325 {
326  for (; bfun != NULL; bfun = bfun->up)
327  {
328  struct btrace_insn *last;
329 
330  /* Skip gaps. */
331  if (bfun->errcode != 0)
332  continue;
333 
334  last = VEC_last (btrace_insn_s, bfun->insn);
335 
336  if (last->iclass == BTRACE_INSN_CALL)
337  break;
338  }
339 
340  return bfun;
341 }
342 
343 /* Add a continuation segment for a function into which we return.
344  PREV is the chronologically preceding function segment.
345  MFUN and FUN are the symbol information we have for this function. */
346 
347 static struct btrace_function *
349  struct minimal_symbol *mfun,
350  struct symbol *fun)
351 {
352  struct btrace_function *bfun, *caller;
353 
354  bfun = ftrace_new_function (prev, mfun, fun);
355 
356  /* It is important to start at PREV's caller. Otherwise, we might find
357  PREV itself, if PREV is a recursive function. */
358  caller = ftrace_find_caller (prev->up, mfun, fun);
359  if (caller != NULL)
360  {
361  /* The caller of PREV is the preceding btrace function segment in this
362  function instance. */
363  gdb_assert (caller->segment.next == NULL);
364 
365  caller->segment.next = bfun;
366  bfun->segment.prev = caller;
367 
368  /* Maintain the function level. */
369  bfun->level = caller->level;
370 
371  /* Maintain the call stack. */
372  bfun->up = caller->up;
373  bfun->flags = caller->flags;
374 
375  ftrace_debug (bfun, "new return");
376  }
377  else
378  {
379  /* We did not find a caller. This could mean that something went
380  wrong or that the call is simply not included in the trace. */
381 
382  /* Let's search for some actual call. */
383  caller = ftrace_find_call (prev->up);
384  if (caller == NULL)
385  {
386  /* There is no call in PREV's back trace. We assume that the
387  branch trace did not include it. */
388 
389  /* Let's find the topmost call function - this skips tail calls. */
390  while (prev->up != NULL)
391  prev = prev->up;
392 
393  /* We maintain levels for a series of returns for which we have
394  not seen the calls.
395  We start at the preceding function's level in case this has
396  already been a return for which we have not seen the call.
397  We start at level 0 otherwise, to handle tail calls correctly. */
398  bfun->level = min (0, prev->level) - 1;
399 
400  /* Fix up the call stack for PREV. */
402 
403  ftrace_debug (bfun, "new return - no caller");
404  }
405  else
406  {
407  /* There is a call in PREV's back trace to which we should have
408  returned. Let's remain at this level. */
409  bfun->level = prev->level;
410 
411  ftrace_debug (bfun, "new return - unknown caller");
412  }
413  }
414 
415  return bfun;
416 }
417 
418 /* Add a new function segment for a function switch.
419  PREV is the chronologically preceding function segment.
420  MFUN and FUN are the symbol information we have for this function. */
421 
422 static struct btrace_function *
424  struct minimal_symbol *mfun,
425  struct symbol *fun)
426 {
427  struct btrace_function *bfun;
428 
429  /* This is an unexplained function switch. The call stack will likely
430  be wrong at this point. */
431  bfun = ftrace_new_function (prev, mfun, fun);
432 
433  ftrace_debug (bfun, "new switch");
434 
435  return bfun;
436 }
437 
438 /* Add a new function segment for a gap in the trace due to a decode error.
439  PREV is the chronologically preceding function segment.
440  ERRCODE is the format-specific error code. */
441 
442 static struct btrace_function *
443 ftrace_new_gap (struct btrace_function *prev, int errcode)
444 {
445  struct btrace_function *bfun;
446 
447  /* We hijack prev if it was empty. */
448  if (prev != NULL && prev->errcode == 0
449  && VEC_empty (btrace_insn_s, prev->insn))
450  bfun = prev;
451  else
452  bfun = ftrace_new_function (prev, NULL, NULL);
453 
454  bfun->errcode = errcode;
455 
456  ftrace_debug (bfun, "new gap");
457 
458  return bfun;
459 }
460 
461 /* Update BFUN with respect to the instruction at PC. This may create new
462  function segments.
463  Return the chronologically latest function segment, never NULL. */
464 
465 static struct btrace_function *
467 {
468  struct bound_minimal_symbol bmfun;
469  struct minimal_symbol *mfun;
470  struct symbol *fun;
471  struct btrace_insn *last;
472 
473  /* Try to determine the function we're in. We use both types of symbols
474  to avoid surprises when we sometimes get a full symbol and sometimes
475  only a minimal symbol. */
476  fun = find_pc_function (pc);
477  bmfun = lookup_minimal_symbol_by_pc (pc);
478  mfun = bmfun.minsym;
479 
480  if (fun == NULL && mfun == NULL)
481  DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482 
483  /* If we didn't have a function or if we had a gap before, we create one. */
484  if (bfun == NULL || bfun->errcode != 0)
485  return ftrace_new_function (bfun, mfun, fun);
486 
487  /* Check the last instruction, if we have one.
488  We do this check first, since it allows us to fill in the call stack
489  links in addition to the normal flow links. */
490  last = NULL;
491  if (!VEC_empty (btrace_insn_s, bfun->insn))
492  last = VEC_last (btrace_insn_s, bfun->insn);
493 
494  if (last != NULL)
495  {
496  switch (last->iclass)
497  {
498  case BTRACE_INSN_RETURN:
499  {
500  const char *fname;
501 
502  /* On some systems, _dl_runtime_resolve returns to the resolved
503  function instead of jumping to it. From our perspective,
504  however, this is a tailcall.
505  If we treated it as return, we wouldn't be able to find the
506  resolved function in our stack back trace. Hence, we would
507  lose the current stack back trace and start anew with an empty
508  back trace. When the resolved function returns, we would then
509  create a stack back trace with the same function names but
510  different frame id's. This will confuse stepping. */
511  fname = ftrace_print_function_name (bfun);
512  if (strcmp (fname, "_dl_runtime_resolve") == 0)
513  return ftrace_new_tailcall (bfun, mfun, fun);
514 
515  return ftrace_new_return (bfun, mfun, fun);
516  }
517 
518  case BTRACE_INSN_CALL:
519  /* Ignore calls to the next instruction. They are used for PIC. */
520  if (last->pc + last->size == pc)
521  break;
522 
523  return ftrace_new_call (bfun, mfun, fun);
524 
525  case BTRACE_INSN_JUMP:
526  {
527  CORE_ADDR start;
528 
529  start = get_pc_function_start (pc);
530 
531  /* If we can't determine the function for PC, we treat a jump at
532  the end of the block as tail call. */
533  if (start == 0 || start == pc)
534  return ftrace_new_tailcall (bfun, mfun, fun);
535  }
536  }
537  }
538 
539  /* Check if we're switching functions for some other reason. */
540  if (ftrace_function_switched (bfun, mfun, fun))
541  {
542  DEBUG_FTRACE ("switching from %s in %s at %s",
543  ftrace_print_insn_addr (last),
545  ftrace_print_filename (bfun));
546 
547  return ftrace_new_switch (bfun, mfun, fun);
548  }
549 
550  return bfun;
551 }
552 
553 /* Add the instruction at PC to BFUN's instructions. */
554 
555 static void
557  const struct btrace_insn *insn)
558 {
559  VEC_safe_push (btrace_insn_s, bfun->insn, insn);
560 
561  if (record_debug > 1)
562  ftrace_debug (bfun, "update insn");
563 }
564 
565 /* Classify the instruction at PC. */
566 
567 static enum btrace_insn_class
569 {
571 
572  iclass = BTRACE_INSN_OTHER;
573  TRY
574  {
575  if (gdbarch_insn_is_call (gdbarch, pc))
576  iclass = BTRACE_INSN_CALL;
577  else if (gdbarch_insn_is_ret (gdbarch, pc))
578  iclass = BTRACE_INSN_RETURN;
579  else if (gdbarch_insn_is_jump (gdbarch, pc))
580  iclass = BTRACE_INSN_JUMP;
581  }
583  {
584  }
585  END_CATCH
586 
587  return iclass;
588 }
589 
590 /* Compute the function branch trace from BTS trace. */
591 
592 static void
594  const struct btrace_data_bts *btrace)
595 {
596  struct btrace_thread_info *btinfo;
597  struct btrace_function *begin, *end;
598  struct gdbarch *gdbarch;
599  unsigned int blk, ngaps;
600  int level;
601 
602  gdbarch = target_gdbarch ();
603  btinfo = &tp->btrace;
604  begin = btinfo->begin;
605  end = btinfo->end;
606  ngaps = btinfo->ngaps;
607  level = begin != NULL ? -btinfo->level : INT_MAX;
608  blk = VEC_length (btrace_block_s, btrace->blocks);
609 
610  while (blk != 0)
611  {
613  CORE_ADDR pc;
614 
615  blk -= 1;
616 
617  block = VEC_index (btrace_block_s, btrace->blocks, blk);
618  pc = block->begin;
619 
620  for (;;)
621  {
622  struct btrace_insn insn;
623  int size;
624 
625  /* We should hit the end of the block. Warn if we went too far. */
626  if (block->end < pc)
627  {
628  /* Indicate the gap in the trace - unless we're at the
629  beginning. */
630  if (begin != NULL)
631  {
632  warning (_("Recorded trace may be corrupted around %s."),
634 
635  end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636  ngaps += 1;
637  }
638  break;
639  }
640 
641  end = ftrace_update_function (end, pc);
642  if (begin == NULL)
643  begin = end;
644 
645  /* Maintain the function level offset.
646  For all but the last block, we do it here. */
647  if (blk != 0)
648  level = min (level, end->level);
649 
650  size = 0;
651  TRY
652  {
653  size = gdb_insn_length (gdbarch, pc);
654  }
656  {
657  }
658  END_CATCH
659 
660  insn.pc = pc;
661  insn.size = size;
662  insn.iclass = ftrace_classify_insn (gdbarch, pc);
663 
664  ftrace_update_insns (end, &insn);
665 
666  /* We're done once we pushed the instruction at the end. */
667  if (block->end == pc)
668  break;
669 
670  /* We can't continue if we fail to compute the size. */
671  if (size <= 0)
672  {
673  warning (_("Recorded trace may be incomplete around %s."),
675 
676  /* Indicate the gap in the trace. We just added INSN so we're
677  not at the beginning. */
678  end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
679  ngaps += 1;
680 
681  break;
682  }
683 
684  pc += size;
685 
686  /* Maintain the function level offset.
687  For the last block, we do it here to not consider the last
688  instruction.
689  Since the last instruction corresponds to the current instruction
690  and is not really part of the execution history, it shouldn't
691  affect the level. */
692  if (blk == 0)
693  level = min (level, end->level);
694  }
695  }
696 
697  btinfo->begin = begin;
698  btinfo->end = end;
699  btinfo->ngaps = ngaps;
700 
701  /* LEVEL is the minimal function level of all btrace function segments.
702  Define the global level offset to -LEVEL so all function levels are
703  normalized to start at zero. */
704  btinfo->level = -level;
705 }
706 
707 #if defined (HAVE_LIBIPT)
708 
709 static enum btrace_insn_class
710 pt_reclassify_insn (enum pt_insn_class iclass)
711 {
712  switch (iclass)
713  {
714  case ptic_call:
715  return BTRACE_INSN_CALL;
716 
717  case ptic_return:
718  return BTRACE_INSN_RETURN;
719 
720  case ptic_jump:
721  return BTRACE_INSN_JUMP;
722 
723  default:
724  return BTRACE_INSN_OTHER;
725  }
726 }
727 
728 /* Add function branch trace using DECODER. */
729 
730 static void
731 ftrace_add_pt (struct pt_insn_decoder *decoder,
732  struct btrace_function **pbegin,
733  struct btrace_function **pend, int *plevel,
734  unsigned int *ngaps)
735 {
736  struct btrace_function *begin, *end, *upd;
737  uint64_t offset;
738  int errcode, nerrors;
739 
740  begin = *pbegin;
741  end = *pend;
742  nerrors = 0;
743  for (;;)
744  {
745  struct btrace_insn btinsn;
746  struct pt_insn insn;
747 
748  errcode = pt_insn_sync_forward (decoder);
749  if (errcode < 0)
750  {
751  if (errcode != -pte_eos)
752  warning (_("Failed to synchronize onto the Intel(R) Processor "
753  "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
754  break;
755  }
756 
757  memset (&btinsn, 0, sizeof (btinsn));
758  for (;;)
759  {
760  errcode = pt_insn_next (decoder, &insn, sizeof(insn));
761  if (errcode < 0)
762  break;
763 
764  /* Look for gaps in the trace - unless we're at the beginning. */
765  if (begin != NULL)
766  {
767  /* Tracing is disabled and re-enabled each time we enter the
768  kernel. Most times, we continue from the same instruction we
769  stopped before. This is indicated via the RESUMED instruction
770  flag. The ENABLED instruction flag means that we continued
771  from some other instruction. Indicate this as a trace gap. */
772  if (insn.enabled)
773  *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
774 
775  /* Indicate trace overflows. */
776  if (insn.resynced)
777  *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
778  }
779 
780  upd = ftrace_update_function (end, insn.ip);
781  if (upd != end)
782  {
783  *pend = end = upd;
784 
785  if (begin == NULL)
786  *pbegin = begin = upd;
787  }
788 
789  /* Maintain the function level offset. */
790  *plevel = min (*plevel, end->level);
791 
792  btinsn.pc = (CORE_ADDR) insn.ip;
793  btinsn.size = (gdb_byte) insn.size;
794  btinsn.iclass = pt_reclassify_insn (insn.iclass);
795 
796  ftrace_update_insns (end, &btinsn);
797  }
798 
799  if (errcode == -pte_eos)
800  break;
801 
802  /* If the gap is at the very beginning, we ignore it - we will have
803  less trace, but we won't have any holes in the trace. */
804  if (begin == NULL)
805  continue;
806 
807  pt_insn_get_offset (decoder, &offset);
808 
809  warning (_("Failed to decode Intel(R) Processor Trace near trace "
810  "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
811  offset, insn.ip, pt_errstr (pt_errcode (errcode)));
812 
813  /* Indicate the gap in the trace. */
814  *pend = end = ftrace_new_gap (end, errcode);
815  *ngaps += 1;
816  }
817 
818  if (nerrors > 0)
819  warning (_("The recorded execution trace may have gaps."));
820 }
821 
822 /* A callback function to allow the trace decoder to read the inferior's
823  memory. */
824 
825 static int
826 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
827  const struct pt_asid *asid, uint64_t pc,
828  void *context)
829 {
830  int errcode;
831 
832  TRY
833  {
834  errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
835  if (errcode != 0)
836  return -pte_nomap;
837  }
839  {
840  return -pte_nomap;
841  }
842  END_CATCH
843 
844  return size;
845 }
846 
847 /* Translate the vendor from one enum to another. */
848 
849 static enum pt_cpu_vendor
850 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
851 {
852  switch (vendor)
853  {
854  default:
855  return pcv_unknown;
856 
857  case CV_INTEL:
858  return pcv_intel;
859  }
860 }
861 
862 /* Finalize the function branch trace after decode. */
863 
864 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
865  struct thread_info *tp, int level)
866 {
867  pt_insn_free_decoder (decoder);
868 
869  /* LEVEL is the minimal function level of all btrace function segments.
870  Define the global level offset to -LEVEL so all function levels are
871  normalized to start at zero. */
872  tp->btrace.level = -level;
873 
874  /* Add a single last instruction entry for the current PC.
875  This allows us to compute the backtrace at the current PC using both
876  standard unwind and btrace unwind.
877  This extra entry is ignored by all record commands. */
878  btrace_add_pc (tp);
879 }
880 
881 /* Compute the function branch trace from Intel(R) Processor Trace. */
882 
883 static void
885  const struct btrace_data_pt *btrace)
886 {
887  struct btrace_thread_info *btinfo;
888  struct pt_insn_decoder *decoder;
889  struct pt_config config;
890  int level, errcode;
891 
892  if (btrace->size == 0)
893  return;
894 
895  btinfo = &tp->btrace;
896  level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
897 
898  pt_config_init(&config);
899  config.begin = btrace->data;
900  config.end = btrace->data + btrace->size;
901 
902  config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
903  config.cpu.family = btrace->config.cpu.family;
904  config.cpu.model = btrace->config.cpu.model;
905  config.cpu.stepping = btrace->config.cpu.stepping;
906 
907  errcode = pt_cpu_errata (&config.errata, &config.cpu);
908  if (errcode < 0)
909  error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
910  pt_errstr (pt_errcode (errcode)));
911 
912  decoder = pt_insn_alloc_decoder (&config);
913  if (decoder == NULL)
914  error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
915 
916  TRY
917  {
918  struct pt_image *image;
919 
920  image = pt_insn_get_image(decoder);
921  if (image == NULL)
922  error (_("Failed to configure the Intel(R) Processor Trace decoder."));
923 
924  errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
925  if (errcode < 0)
926  error (_("Failed to configure the Intel(R) Processor Trace decoder: "
927  "%s."), pt_errstr (pt_errcode (errcode)));
928 
929  ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
930  &btinfo->ngaps);
931  }
933  {
934  /* Indicate a gap in the trace if we quit trace processing. */
935  if (error.reason == RETURN_QUIT && btinfo->end != NULL)
936  {
937  btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
938  btinfo->ngaps++;
939  }
940 
941  btrace_finalize_ftrace_pt (decoder, tp, level);
942 
944  }
945  END_CATCH
946 
947  btrace_finalize_ftrace_pt (decoder, tp, level);
948 }
949 
950 #else /* defined (HAVE_LIBIPT) */
951 
952 static void
954  const struct btrace_data_pt *btrace)
955 {
956  internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
957 }
958 
959 #endif /* defined (HAVE_LIBIPT) */
960 
961 /* Compute the function branch trace from a block branch trace BTRACE for
962  a thread given by BTINFO. */
963 
964 static void
965 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
966 {
967  DEBUG ("compute ftrace");
968 
969  switch (btrace->format)
970  {
971  case BTRACE_FORMAT_NONE:
972  return;
973 
974  case BTRACE_FORMAT_BTS:
975  btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
976  return;
977 
978  case BTRACE_FORMAT_PT:
979  btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
980  return;
981  }
982 
983  internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
984 }
985 
986 /* Add an entry for the current PC. */
987 
988 static void
990 {
991  struct btrace_data btrace;
992  struct btrace_block *block;
993  struct regcache *regcache;
994  struct cleanup *cleanup;
995  CORE_ADDR pc;
996 
997  regcache = get_thread_regcache (tp->ptid);
998  pc = regcache_read_pc (regcache);
999 
1000  btrace_data_init (&btrace);
1001  btrace.format = BTRACE_FORMAT_BTS;
1002  btrace.variant.bts.blocks = NULL;
1003 
1004  cleanup = make_cleanup_btrace_data (&btrace);
1005 
1006  block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1007  block->begin = pc;
1008  block->end = pc;
1009 
1010  btrace_compute_ftrace (tp, &btrace);
1011 
1012  do_cleanups (cleanup);
1013 }
1014 
1015 /* See btrace.h. */
1016 
1017 void
1018 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1019 {
1020  if (tp->btrace.target != NULL)
1021  return;
1022 
1023  if (!target_supports_btrace (conf->format))
1024  error (_("Target does not support branch tracing."));
1025 
1026  DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1027 
1028  tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1029 
1030  /* Add an entry for the current PC so we start tracing from where we
1031  enabled it. */
1032  if (tp->btrace.target != NULL)
1033  btrace_add_pc (tp);
1034 }
1035 
1036 /* See btrace.h. */
1037 
1038 const struct btrace_config *
1039 btrace_conf (const struct btrace_thread_info *btinfo)
1040 {
1041  if (btinfo->target == NULL)
1042  return NULL;
1043 
1044  return target_btrace_conf (btinfo->target);
1045 }
1046 
1047 /* See btrace.h. */
1048 
1049 void
1051 {
1052  struct btrace_thread_info *btp = &tp->btrace;
1053  int errcode = 0;
1054 
1055  if (btp->target == NULL)
1056  return;
1057 
1058  DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1059 
1061  btp->target = NULL;
1062 
1063  btrace_clear (tp);
1064 }
1065 
1066 /* See btrace.h. */
1067 
1068 void
1070 {
1071  struct btrace_thread_info *btp = &tp->btrace;
1072  int errcode = 0;
1073 
1074  if (btp->target == NULL)
1075  return;
1076 
1077  DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1078 
1080  btp->target = NULL;
1081 
1082  btrace_clear (tp);
1083 }
1084 
1085 /* Stitch branch trace in BTS format. */
1086 
1087 static int
1088 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1089 {
1090  struct btrace_thread_info *btinfo;
1091  struct btrace_function *last_bfun;
1092  struct btrace_insn *last_insn;
1093  btrace_block_s *first_new_block;
1094 
1095  btinfo = &tp->btrace;
1096  last_bfun = btinfo->end;
1097  gdb_assert (last_bfun != NULL);
1098  gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1099 
1100  /* If the existing trace ends with a gap, we just glue the traces
1101  together. We need to drop the last (i.e. chronologically first) block
1102  of the new trace, though, since we can't fill in the start address.*/
1103  if (VEC_empty (btrace_insn_s, last_bfun->insn))
1104  {
1105  VEC_pop (btrace_block_s, btrace->blocks);
1106  return 0;
1107  }
1108 
1109  /* Beware that block trace starts with the most recent block, so the
1110  chronologically first block in the new trace is the last block in
1111  the new trace's block vector. */
1112  first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1113  last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1114 
1115  /* If the current PC at the end of the block is the same as in our current
1116  trace, there are two explanations:
1117  1. we executed the instruction and some branch brought us back.
1118  2. we have not made any progress.
1119  In the first case, the delta trace vector should contain at least two
1120  entries.
1121  In the second case, the delta trace vector should contain exactly one
1122  entry for the partial block containing the current PC. Remove it. */
1123  if (first_new_block->end == last_insn->pc
1124  && VEC_length (btrace_block_s, btrace->blocks) == 1)
1125  {
1126  VEC_pop (btrace_block_s, btrace->blocks);
1127  return 0;
1128  }
1129 
1130  DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1131  core_addr_to_string_nz (first_new_block->end));
1132 
1133  /* Do a simple sanity check to make sure we don't accidentally end up
1134  with a bad block. This should not occur in practice. */
1135  if (first_new_block->end < last_insn->pc)
1136  {
1137  warning (_("Error while trying to read delta trace. Falling back to "
1138  "a full read."));
1139  return -1;
1140  }
1141 
1142  /* We adjust the last block to start at the end of our current trace. */
1143  gdb_assert (first_new_block->begin == 0);
1144  first_new_block->begin = last_insn->pc;
1145 
1146  /* We simply pop the last insn so we can insert it again as part of
1147  the normal branch trace computation.
1148  Since instruction iterators are based on indices in the instructions
1149  vector, we don't leave any pointers dangling. */
1150  DEBUG ("pruning insn at %s for stitching",
1151  ftrace_print_insn_addr (last_insn));
1152 
1153  VEC_pop (btrace_insn_s, last_bfun->insn);
1154 
1155  /* The instructions vector may become empty temporarily if this has
1156  been the only instruction in this function segment.
1157  This violates the invariant but will be remedied shortly by
1158  btrace_compute_ftrace when we add the new trace. */
1159 
1160  /* The only case where this would hurt is if the entire trace consisted
1161  of just that one instruction. If we remove it, we might turn the now
1162  empty btrace function segment into a gap. But we don't want gaps at
1163  the beginning. To avoid this, we remove the entire old trace. */
1164  if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1165  btrace_clear (tp);
1166 
1167  return 0;
1168 }
1169 
1170 /* Adjust the block trace in order to stitch old and new trace together.
1171  BTRACE is the new delta trace between the last and the current stop.
1172  TP is the traced thread.
1173  May modifx BTRACE as well as the existing trace in TP.
1174  Return 0 on success, -1 otherwise. */
1175 
1176 static int
1177 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1178 {
1179  /* If we don't have trace, there's nothing to do. */
1180  if (btrace_data_empty (btrace))
1181  return 0;
1182 
1183  switch (btrace->format)
1184  {
1185  case BTRACE_FORMAT_NONE:
1186  return 0;
1187 
1188  case BTRACE_FORMAT_BTS:
1189  return btrace_stitch_bts (&btrace->variant.bts, tp);
1190 
1191  case BTRACE_FORMAT_PT:
1192  /* Delta reads are not supported. */
1193  return -1;
1194  }
1195 
1196  internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1197 }
1198 
1199 /* Clear the branch trace histories in BTINFO. */
1200 
1201 static void
1203 {
1204  xfree (btinfo->insn_history);
1205  xfree (btinfo->call_history);
1206  xfree (btinfo->replay);
1207 
1208  btinfo->insn_history = NULL;
1209  btinfo->call_history = NULL;
1210  btinfo->replay = NULL;
1211 }
1212 
1213 /* Clear the branch trace maintenance histories in BTINFO. */
1214 
1215 static void
1217 {
1218  switch (btinfo->data.format)
1219  {
1220  default:
1221  break;
1222 
1223  case BTRACE_FORMAT_BTS:
1224  btinfo->maint.variant.bts.packet_history.begin = 0;
1225  btinfo->maint.variant.bts.packet_history.end = 0;
1226  break;
1227 
1228 #if defined (HAVE_LIBIPT)
1229  case BTRACE_FORMAT_PT:
1230  xfree (btinfo->maint.variant.pt.packets);
1231 
1232  btinfo->maint.variant.pt.packets = NULL;
1233  btinfo->maint.variant.pt.packet_history.begin = 0;
1234  btinfo->maint.variant.pt.packet_history.end = 0;
1235  break;
1236 #endif /* defined (HAVE_LIBIPT) */
1237  }
1238 }
1239 
1240 /* See btrace.h. */
1241 
1242 void
1244 {
1245  struct btrace_thread_info *btinfo;
1246  struct btrace_target_info *tinfo;
1247  struct btrace_data btrace;
1248  struct cleanup *cleanup;
1249  int errcode;
1250 
1251  DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1252 
1253  btinfo = &tp->btrace;
1254  tinfo = btinfo->target;
1255  if (tinfo == NULL)
1256  return;
1257 
1258  /* There's no way we could get new trace while replaying.
1259  On the other hand, delta trace would return a partial record with the
1260  current PC, which is the replay PC, not the last PC, as expected. */
1261  if (btinfo->replay != NULL)
1262  return;
1263 
1264  btrace_data_init (&btrace);
1265  cleanup = make_cleanup_btrace_data (&btrace);
1266 
1267  /* Let's first try to extend the trace we already have. */
1268  if (btinfo->end != NULL)
1269  {
1270  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1271  if (errcode == 0)
1272  {
1273  /* Success. Let's try to stitch the traces together. */
1274  errcode = btrace_stitch_trace (&btrace, tp);
1275  }
1276  else
1277  {
1278  /* We failed to read delta trace. Let's try to read new trace. */
1279  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1280 
1281  /* If we got any new trace, discard what we have. */
1282  if (errcode == 0 && !btrace_data_empty (&btrace))
1283  btrace_clear (tp);
1284  }
1285 
1286  /* If we were not able to read the trace, we start over. */
1287  if (errcode != 0)
1288  {
1289  btrace_clear (tp);
1290  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1291  }
1292  }
1293  else
1294  errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1295 
1296  /* If we were not able to read the branch trace, signal an error. */
1297  if (errcode != 0)
1298  error (_("Failed to read branch trace."));
1299 
1300  /* Compute the trace, provided we have any. */
1301  if (!btrace_data_empty (&btrace))
1302  {
1303  /* Store the raw trace data. The stored data will be cleared in
1304  btrace_clear, so we always append the new trace. */
1305  btrace_data_append (&btinfo->data, &btrace);
1306  btrace_maint_clear (btinfo);
1307 
1308  btrace_clear_history (btinfo);
1309  btrace_compute_ftrace (tp, &btrace);
1310  }
1311 
1312  do_cleanups (cleanup);
1313 }
1314 
1315 /* See btrace.h. */
1316 
1317 void
1319 {
1320  struct btrace_thread_info *btinfo;
1321  struct btrace_function *it, *trash;
1322 
1323  DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1324 
1325  /* Make sure btrace frames that may hold a pointer into the branch
1326  trace data are destroyed. */
1327  reinit_frame_cache ();
1328 
1329  btinfo = &tp->btrace;
1330 
1331  it = btinfo->begin;
1332  while (it != NULL)
1333  {
1334  trash = it;
1335  it = it->flow.next;
1336 
1337  xfree (trash);
1338  }
1339 
1340  btinfo->begin = NULL;
1341  btinfo->end = NULL;
1342  btinfo->ngaps = 0;
1343 
1344  /* Must clear the maint data before - it depends on BTINFO->DATA. */
1345  btrace_maint_clear (btinfo);
1346  btrace_data_clear (&btinfo->data);
1347  btrace_clear_history (btinfo);
1348 }
1349 
1350 /* See btrace.h. */
1351 
1352 void
1354 {
1355  struct thread_info *tp;
1356 
1357  DEBUG ("free objfile");
1358 
1360  btrace_clear (tp);
1361 }
1362 
1363 #if defined (HAVE_LIBEXPAT)
1364 
1365 /* Check the btrace document version. */
1366 
1367 static void
1369  const struct gdb_xml_element *element,
1370  void *user_data, VEC (gdb_xml_value_s) *attributes)
1371 {
1372  const char *version = xml_find_attribute (attributes, "version")->value;
1373 
1374  if (strcmp (version, "1.0") != 0)
1375  gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1376 }
1377 
1378 /* Parse a btrace "block" xml record. */
1379 
1380 static void
1382  const struct gdb_xml_element *element,
1383  void *user_data, VEC (gdb_xml_value_s) *attributes)
1384 {
1385  struct btrace_data *btrace;
1386  struct btrace_block *block;
1387  ULONGEST *begin, *end;
1388 
1389  btrace = user_data;
1390 
1391  switch (btrace->format)
1392  {
1393  case BTRACE_FORMAT_BTS:
1394  break;
1395 
1396  case BTRACE_FORMAT_NONE:
1397  btrace->format = BTRACE_FORMAT_BTS;
1398  btrace->variant.bts.blocks = NULL;
1399  break;
1400 
1401  default:
1402  gdb_xml_error (parser, _("Btrace format error."));
1403  }
1404 
1405  begin = xml_find_attribute (attributes, "begin")->value;
1406  end = xml_find_attribute (attributes, "end")->value;
1407 
1408  block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1409  block->begin = *begin;
1410  block->end = *end;
1411 }
1412 
1413 /* Parse a "raw" xml record. */
1414 
1415 static void
1416 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1417  gdb_byte **pdata, unsigned long *psize)
1418 {
1419  struct cleanup *cleanup;
1420  gdb_byte *data, *bin;
1421  unsigned long size;
1422  size_t len;
1423 
1424  len = strlen (body_text);
1425  size = len / 2;
1426 
1427  if ((size_t) size * 2 != len)
1428  gdb_xml_error (parser, _("Bad raw data size."));
1429 
1430  bin = data = xmalloc (size);
1431  cleanup = make_cleanup (xfree, data);
1432 
1433  /* We use hex encoding - see common/rsp-low.h. */
1434  while (len > 0)
1435  {
1436  char hi, lo;
1437 
1438  hi = *body_text++;
1439  lo = *body_text++;
1440 
1441  if (hi == 0 || lo == 0)
1442  gdb_xml_error (parser, _("Bad hex encoding."));
1443 
1444  *bin++ = fromhex (hi) * 16 + fromhex (lo);
1445  len -= 2;
1446  }
1447 
1448  discard_cleanups (cleanup);
1449 
1450  *pdata = data;
1451  *psize = size;
1452 }
1453 
1454 /* Parse a btrace pt-config "cpu" xml record. */
1455 
1456 static void
1458  const struct gdb_xml_element *element,
1459  void *user_data,
1460  VEC (gdb_xml_value_s) *attributes)
1461 {
1462  struct btrace_data *btrace;
1463  const char *vendor;
1464  ULONGEST *family, *model, *stepping;
1465 
1466  vendor = xml_find_attribute (attributes, "vendor")->value;
1467  family = xml_find_attribute (attributes, "family")->value;
1468  model = xml_find_attribute (attributes, "model")->value;
1469  stepping = xml_find_attribute (attributes, "stepping")->value;
1470 
1471  btrace = user_data;
1472 
1473  if (strcmp (vendor, "GenuineIntel") == 0)
1474  btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1475 
1476  btrace->variant.pt.config.cpu.family = *family;
1477  btrace->variant.pt.config.cpu.model = *model;
1478  btrace->variant.pt.config.cpu.stepping = *stepping;
1479 }
1480 
1481 /* Parse a btrace pt "raw" xml record. */
1482 
1483 static void
1485  const struct gdb_xml_element *element,
1486  void *user_data, const char *body_text)
1487 {
1488  struct btrace_data *btrace;
1489 
1490  btrace = user_data;
1491  parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1492  &btrace->variant.pt.size);
1493 }
1494 
1495 /* Parse a btrace "pt" xml record. */
1496 
1497 static void
1499  const struct gdb_xml_element *element,
1500  void *user_data, VEC (gdb_xml_value_s) *attributes)
1501 {
1502  struct btrace_data *btrace;
1503 
1504  btrace = user_data;
1505  btrace->format = BTRACE_FORMAT_PT;
1506  btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1507  btrace->variant.pt.data = NULL;
1508  btrace->variant.pt.size = 0;
1509 }
1510 
1511 static const struct gdb_xml_attribute block_attributes[] = {
1512  { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1513  { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1514  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1515 };
1516 
1517 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1518  { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1519  { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1520  { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1521  { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1522  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1523 };
1524 
1525 static const struct gdb_xml_element btrace_pt_config_children[] = {
1528  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1529 };
1530 
1531 static const struct gdb_xml_element btrace_pt_children[] = {
1532  { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1533  NULL },
1534  { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1535  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1536 };
1537 
1538 static const struct gdb_xml_attribute btrace_attributes[] = {
1539  { "version", GDB_XML_AF_NONE, NULL, NULL },
1540  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1541 };
1542 
1543 static const struct gdb_xml_element btrace_children[] = {
1544  { "block", block_attributes, NULL,
1547  NULL },
1548  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1549 };
1550 
1551 static const struct gdb_xml_element btrace_elements[] = {
1553  check_xml_btrace_version, NULL },
1554  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1555 };
1556 
1557 #endif /* defined (HAVE_LIBEXPAT) */
1558 
1559 /* See btrace.h. */
1560 
1561 void
1562 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1563 {
1564  struct cleanup *cleanup;
1565  int errcode;
1566 
1567 #if defined (HAVE_LIBEXPAT)
1568 
1569  btrace->format = BTRACE_FORMAT_NONE;
1570 
1571  cleanup = make_cleanup_btrace_data (btrace);
1572  errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1573  buffer, btrace);
1574  if (errcode != 0)
1575  error (_("Error parsing branch trace."));
1576 
1577  /* Keep parse results. */
1578  discard_cleanups (cleanup);
1579 
1580 #else /* !defined (HAVE_LIBEXPAT) */
1581 
1582  error (_("Cannot process branch trace. XML parsing is not supported."));
1583 
1584 #endif /* !defined (HAVE_LIBEXPAT) */
1585 }
1586 
1587 #if defined (HAVE_LIBEXPAT)
1588 
1589 /* Parse a btrace-conf "bts" xml record. */
1590 
1591 static void
1593  const struct gdb_xml_element *element,
1594  void *user_data, VEC (gdb_xml_value_s) *attributes)
1595 {
1596  struct btrace_config *conf;
1597  struct gdb_xml_value *size;
1598 
1599  conf = user_data;
1600  conf->format = BTRACE_FORMAT_BTS;
1601  conf->bts.size = 0;
1602 
1603  size = xml_find_attribute (attributes, "size");
1604  if (size != NULL)
1605  conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1606 }
1607 
1608 /* Parse a btrace-conf "pt" xml record. */
1609 
1610 static void
1612  const struct gdb_xml_element *element,
1613  void *user_data, VEC (gdb_xml_value_s) *attributes)
1614 {
1615  struct btrace_config *conf;
1616  struct gdb_xml_value *size;
1617 
1618  conf = user_data;
1619  conf->format = BTRACE_FORMAT_PT;
1620  conf->pt.size = 0;
1621 
1622  size = xml_find_attribute (attributes, "size");
1623  if (size != NULL)
1624  conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1625 }
1626 
1627 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1629  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1630 };
1631 
1632 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1634  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1635 };
1636 
1637 static const struct gdb_xml_element btrace_conf_children[] = {
1639  parse_xml_btrace_conf_bts, NULL },
1641  parse_xml_btrace_conf_pt, NULL },
1642  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1643 };
1644 
1645 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1646  { "version", GDB_XML_AF_NONE, NULL, NULL },
1647  { NULL, GDB_XML_AF_NONE, NULL, NULL }
1648 };
1649 
1650 static const struct gdb_xml_element btrace_conf_elements[] = {
1651  { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1652  GDB_XML_EF_NONE, NULL, NULL },
1653  { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1654 };
1655 
1656 #endif /* defined (HAVE_LIBEXPAT) */
1657 
1658 /* See btrace.h. */
1659 
1660 void
1661 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1662 {
1663  int errcode;
1664 
1665 #if defined (HAVE_LIBEXPAT)
1666 
1667  errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1668  btrace_conf_elements, xml, conf);
1669  if (errcode != 0)
1670  error (_("Error parsing branch trace configuration."));
1671 
1672 #else /* !defined (HAVE_LIBEXPAT) */
1673 
1674  error (_("XML parsing is not supported."));
1675 
1676 #endif /* !defined (HAVE_LIBEXPAT) */
1677 }
1678 
1679 /* See btrace.h. */
1680 
1681 const struct btrace_insn *
1683 {
1684  const struct btrace_function *bfun;
1685  unsigned int index, end;
1686 
1687  index = it->index;
1688  bfun = it->function;
1689 
1690  /* Check if the iterator points to a gap in the trace. */
1691  if (bfun->errcode != 0)
1692  return NULL;
1693 
1694  /* The index is within the bounds of this function's instruction vector. */
1695  end = VEC_length (btrace_insn_s, bfun->insn);
1696  gdb_assert (0 < end);
1697  gdb_assert (index < end);
1698 
1699  return VEC_index (btrace_insn_s, bfun->insn, index);
1700 }
1701 
1702 /* See btrace.h. */
1703 
1704 unsigned int
1706 {
1707  const struct btrace_function *bfun;
1708 
1709  bfun = it->function;
1710 
1711  /* Return zero if the iterator points to a gap in the trace. */
1712  if (bfun->errcode != 0)
1713  return 0;
1714 
1715  return bfun->insn_offset + it->index;
1716 }
1717 
1718 /* See btrace.h. */
1719 
1720 void
1722  const struct btrace_thread_info *btinfo)
1723 {
1724  const struct btrace_function *bfun;
1725 
1726  bfun = btinfo->begin;
1727  if (bfun == NULL)
1728  error (_("No trace."));
1729 
1730  it->function = bfun;
1731  it->index = 0;
1732 }
1733 
1734 /* See btrace.h. */
1735 
1736 void
1738  const struct btrace_thread_info *btinfo)
1739 {
1740  const struct btrace_function *bfun;
1741  unsigned int length;
1742 
1743  bfun = btinfo->end;
1744  if (bfun == NULL)
1745  error (_("No trace."));
1746 
1747  length = VEC_length (btrace_insn_s, bfun->insn);
1748 
1749  /* The last function may either be a gap or it contains the current
1750  instruction, which is one past the end of the execution trace; ignore
1751  it. */
1752  if (length > 0)
1753  length -= 1;
1754 
1755  it->function = bfun;
1756  it->index = length;
1757 }
1758 
1759 /* See btrace.h. */
1760 
1761 unsigned int
1762 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1763 {
1764  const struct btrace_function *bfun;
1765  unsigned int index, steps;
1766 
1767  bfun = it->function;
1768  steps = 0;
1769  index = it->index;
1770 
1771  while (stride != 0)
1772  {
1773  unsigned int end, space, adv;
1774 
1775  end = VEC_length (btrace_insn_s, bfun->insn);
1776 
1777  /* An empty function segment represents a gap in the trace. We count
1778  it as one instruction. */
1779  if (end == 0)
1780  {
1781  const struct btrace_function *next;
1782 
1783  next = bfun->flow.next;
1784  if (next == NULL)
1785  break;
1786 
1787  stride -= 1;
1788  steps += 1;
1789 
1790  bfun = next;
1791  index = 0;
1792 
1793  continue;
1794  }
1795 
1796  gdb_assert (0 < end);
1797  gdb_assert (index < end);
1798 
1799  /* Compute the number of instructions remaining in this segment. */
1800  space = end - index;
1801 
1802  /* Advance the iterator as far as possible within this segment. */
1803  adv = min (space, stride);
1804  stride -= adv;
1805  index += adv;
1806  steps += adv;
1807 
1808  /* Move to the next function if we're at the end of this one. */
1809  if (index == end)
1810  {
1811  const struct btrace_function *next;
1812 
1813  next = bfun->flow.next;
1814  if (next == NULL)
1815  {
1816  /* We stepped past the last function.
1817 
1818  Let's adjust the index to point to the last instruction in
1819  the previous function. */
1820  index -= 1;
1821  steps -= 1;
1822  break;
1823  }
1824 
1825  /* We now point to the first instruction in the new function. */
1826  bfun = next;
1827  index = 0;
1828  }
1829 
1830  /* We did make progress. */
1831  gdb_assert (adv > 0);
1832  }
1833 
1834  /* Update the iterator. */
1835  it->function = bfun;
1836  it->index = index;
1837 
1838  return steps;
1839 }
1840 
1841 /* See btrace.h. */
1842 
1843 unsigned int
1844 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1845 {
1846  const struct btrace_function *bfun;
1847  unsigned int index, steps;
1848 
1849  bfun = it->function;
1850  steps = 0;
1851  index = it->index;
1852 
1853  while (stride != 0)
1854  {
1855  unsigned int adv;
1856 
1857  /* Move to the previous function if we're at the start of this one. */
1858  if (index == 0)
1859  {
1860  const struct btrace_function *prev;
1861 
1862  prev = bfun->flow.prev;
1863  if (prev == NULL)
1864  break;
1865 
1866  /* We point to one after the last instruction in the new function. */
1867  bfun = prev;
1868  index = VEC_length (btrace_insn_s, bfun->insn);
1869 
1870  /* An empty function segment represents a gap in the trace. We count
1871  it as one instruction. */
1872  if (index == 0)
1873  {
1874  stride -= 1;
1875  steps += 1;
1876 
1877  continue;
1878  }
1879  }
1880 
1881  /* Advance the iterator as far as possible within this segment. */
1882  adv = min (index, stride);
1883 
1884  stride -= adv;
1885  index -= adv;
1886  steps += adv;
1887 
1888  /* We did make progress. */
1889  gdb_assert (adv > 0);
1890  }
1891 
1892  /* Update the iterator. */
1893  it->function = bfun;
1894  it->index = index;
1895 
1896  return steps;
1897 }
1898 
1899 /* See btrace.h. */
1900 
1901 int
1903  const struct btrace_insn_iterator *rhs)
1904 {
1905  unsigned int lnum, rnum;
1906 
1907  lnum = btrace_insn_number (lhs);
1908  rnum = btrace_insn_number (rhs);
1909 
1910  /* A gap has an instruction number of zero. Things are getting more
1911  complicated if gaps are involved.
1912 
1913  We take the instruction number offset from the iterator's function.
1914  This is the number of the first instruction after the gap.
1915 
1916  This is OK as long as both lhs and rhs point to gaps. If only one of
1917  them does, we need to adjust the number based on the other's regular
1918  instruction number. Otherwise, a gap might compare equal to an
1919  instruction. */
1920 
1921  if (lnum == 0 && rnum == 0)
1922  {
1923  lnum = lhs->function->insn_offset;
1924  rnum = rhs->function->insn_offset;
1925  }
1926  else if (lnum == 0)
1927  {
1928  lnum = lhs->function->insn_offset;
1929 
1930  if (lnum == rnum)
1931  lnum -= 1;
1932  }
1933  else if (rnum == 0)
1934  {
1935  rnum = rhs->function->insn_offset;
1936 
1937  if (rnum == lnum)
1938  rnum -= 1;
1939  }
1940 
1941  return (int) (lnum - rnum);
1942 }
1943 
1944 /* See btrace.h. */
1945 
1946 int
1948  const struct btrace_thread_info *btinfo,
1949  unsigned int number)
1950 {
1951  const struct btrace_function *bfun;
1952  unsigned int end, length;
1953 
1954  for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1955  {
1956  /* Skip gaps. */
1957  if (bfun->errcode != 0)
1958  continue;
1959 
1960  if (bfun->insn_offset <= number)
1961  break;
1962  }
1963 
1964  if (bfun == NULL)
1965  return 0;
1966 
1967  length = VEC_length (btrace_insn_s, bfun->insn);
1968  gdb_assert (length > 0);
1969 
1970  end = bfun->insn_offset + length;
1971  if (end <= number)
1972  return 0;
1973 
1974  it->function = bfun;
1975  it->index = number - bfun->insn_offset;
1976 
1977  return 1;
1978 }
1979 
1980 /* See btrace.h. */
1981 
1982 const struct btrace_function *
1984 {
1985  return it->function;
1986 }
1987 
1988 /* See btrace.h. */
1989 
1990 unsigned int
1992 {
1993  const struct btrace_thread_info *btinfo;
1994  const struct btrace_function *bfun;
1995  unsigned int insns;
1996 
1997  btinfo = it->btinfo;
1998  bfun = it->function;
1999  if (bfun != NULL)
2000  return bfun->number;
2001 
2002  /* For the end iterator, i.e. bfun == NULL, we return one more than the
2003  number of the last function. */
2004  bfun = btinfo->end;
2005  insns = VEC_length (btrace_insn_s, bfun->insn);
2006 
2007  /* If the function contains only a single instruction (i.e. the current
2008  instruction), it will be skipped and its number is already the number
2009  we seek. */
2010  if (insns == 1)
2011  return bfun->number;
2012 
2013  /* Otherwise, return one more than the number of the last function. */
2014  return bfun->number + 1;
2015 }
2016 
2017 /* See btrace.h. */
2018 
2019 void
2021  const struct btrace_thread_info *btinfo)
2022 {
2023  const struct btrace_function *bfun;
2024 
2025  bfun = btinfo->begin;
2026  if (bfun == NULL)
2027  error (_("No trace."));
2028 
2029  it->btinfo = btinfo;
2030  it->function = bfun;
2031 }
2032 
2033 /* See btrace.h. */
2034 
2035 void
2037  const struct btrace_thread_info *btinfo)
2038 {
2039  const struct btrace_function *bfun;
2040 
2041  bfun = btinfo->end;
2042  if (bfun == NULL)
2043  error (_("No trace."));
2044 
2045  it->btinfo = btinfo;
2046  it->function = NULL;
2047 }
2048 
2049 /* See btrace.h. */
2050 
2051 unsigned int
2052 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2053 {
2054  const struct btrace_function *bfun;
2055  unsigned int steps;
2056 
2057  bfun = it->function;
2058  steps = 0;
2059  while (bfun != NULL)
2060  {
2061  const struct btrace_function *next;
2062  unsigned int insns;
2063 
2064  next = bfun->flow.next;
2065  if (next == NULL)
2066  {
2067  /* Ignore the last function if it only contains a single
2068  (i.e. the current) instruction. */
2069  insns = VEC_length (btrace_insn_s, bfun->insn);
2070  if (insns == 1)
2071  steps -= 1;
2072  }
2073 
2074  if (stride == steps)
2075  break;
2076 
2077  bfun = next;
2078  steps += 1;
2079  }
2080 
2081  it->function = bfun;
2082  return steps;
2083 }
2084 
2085 /* See btrace.h. */
2086 
2087 unsigned int
2088 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2089 {
2090  const struct btrace_thread_info *btinfo;
2091  const struct btrace_function *bfun;
2092  unsigned int steps;
2093 
2094  bfun = it->function;
2095  steps = 0;
2096 
2097  if (bfun == NULL)
2098  {
2099  unsigned int insns;
2100 
2101  btinfo = it->btinfo;
2102  bfun = btinfo->end;
2103  if (bfun == NULL)
2104  return 0;
2105 
2106  /* Ignore the last function if it only contains a single
2107  (i.e. the current) instruction. */
2108  insns = VEC_length (btrace_insn_s, bfun->insn);
2109  if (insns == 1)
2110  bfun = bfun->flow.prev;
2111 
2112  if (bfun == NULL)
2113  return 0;
2114 
2115  steps += 1;
2116  }
2117 
2118  while (steps < stride)
2119  {
2120  const struct btrace_function *prev;
2121 
2122  prev = bfun->flow.prev;
2123  if (prev == NULL)
2124  break;
2125 
2126  bfun = prev;
2127  steps += 1;
2128  }
2129 
2130  it->function = bfun;
2131  return steps;
2132 }
2133 
2134 /* See btrace.h. */
2135 
2136 int
2138  const struct btrace_call_iterator *rhs)
2139 {
2140  unsigned int lnum, rnum;
2141 
2142  lnum = btrace_call_number (lhs);
2143  rnum = btrace_call_number (rhs);
2144 
2145  return (int) (lnum - rnum);
2146 }
2147 
2148 /* See btrace.h. */
2149 
2150 int
2152  const struct btrace_thread_info *btinfo,
2153  unsigned int number)
2154 {
2155  const struct btrace_function *bfun;
2156 
2157  for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2158  {
2159  unsigned int bnum;
2160 
2161  bnum = bfun->number;
2162  if (number == bnum)
2163  {
2164  it->btinfo = btinfo;
2165  it->function = bfun;
2166  return 1;
2167  }
2168 
2169  /* Functions are ordered and numbered consecutively. We could bail out
2170  earlier. On the other hand, it is very unlikely that we search for
2171  a nonexistent function. */
2172  }
2173 
2174  return 0;
2175 }
2176 
2177 /* See btrace.h. */
2178 
2179 void
2181  const struct btrace_insn_iterator *begin,
2182  const struct btrace_insn_iterator *end)
2183 {
2184  if (btinfo->insn_history == NULL)
2185  btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
2186 
2187  btinfo->insn_history->begin = *begin;
2188  btinfo->insn_history->end = *end;
2189 }
2190 
2191 /* See btrace.h. */
2192 
2193 void
2195  const struct btrace_call_iterator *begin,
2196  const struct btrace_call_iterator *end)
2197 {
2198  gdb_assert (begin->btinfo == end->btinfo);
2199 
2200  if (btinfo->call_history == NULL)
2201  btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
2202 
2203  btinfo->call_history->begin = *begin;
2204  btinfo->call_history->end = *end;
2205 }
2206 
2207 /* See btrace.h. */
2208 
2209 int
2211 {
2212  return tp->btrace.replay != NULL;
2213 }
2214 
2215 /* See btrace.h. */
2216 
2217 int
2219 {
2220  struct btrace_insn_iterator begin, end;
2221  struct btrace_thread_info *btinfo;
2222 
2223  btinfo = &tp->btrace;
2224 
2225  if (btinfo->begin == NULL)
2226  return 1;
2227 
2228  btrace_insn_begin (&begin, btinfo);
2229  btrace_insn_end (&end, btinfo);
2230 
2231  return btrace_insn_cmp (&begin, &end) == 0;
2232 }
2233 
2234 /* Forward the cleanup request. */
2235 
2236 static void
2238 {
2239  btrace_data_fini (arg);
2240 }
2241 
2242 /* See btrace.h. */
2243 
2244 struct cleanup *
2246 {
2247  return make_cleanup (do_btrace_data_cleanup, data);
2248 }
2249 
2250 #if defined (HAVE_LIBIPT)
2251 
2252 /* Print a single packet. */
2253 
2254 static void
2255 pt_print_packet (const struct pt_packet *packet)
2256 {
2257  switch (packet->type)
2258  {
2259  default:
2260  printf_unfiltered (("[??: %x]"), packet->type);
2261  break;
2262 
2263  case ppt_psb:
2264  printf_unfiltered (("psb"));
2265  break;
2266 
2267  case ppt_psbend:
2268  printf_unfiltered (("psbend"));
2269  break;
2270 
2271  case ppt_pad:
2272  printf_unfiltered (("pad"));
2273  break;
2274 
2275  case ppt_tip:
2276  printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2277  packet->payload.ip.ipc,
2278  packet->payload.ip.ip);
2279  break;
2280 
2281  case ppt_tip_pge:
2282  printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2283  packet->payload.ip.ipc,
2284  packet->payload.ip.ip);
2285  break;
2286 
2287  case ppt_tip_pgd:
2288  printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2289  packet->payload.ip.ipc,
2290  packet->payload.ip.ip);
2291  break;
2292 
2293  case ppt_fup:
2294  printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2295  packet->payload.ip.ipc,
2296  packet->payload.ip.ip);
2297  break;
2298 
2299  case ppt_tnt_8:
2300  printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2301  packet->payload.tnt.bit_size,
2302  packet->payload.tnt.payload);
2303  break;
2304 
2305  case ppt_tnt_64:
2306  printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2307  packet->payload.tnt.bit_size,
2308  packet->payload.tnt.payload);
2309  break;
2310 
2311  case ppt_pip:
2312  printf_unfiltered (("pip %" PRIx64 ""), packet->payload.pip.cr3);
2313  break;
2314 
2315  case ppt_tsc:
2316  printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2317  break;
2318 
2319  case ppt_cbr:
2320  printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2321  break;
2322 
2323  case ppt_mode:
2324  switch (packet->payload.mode.leaf)
2325  {
2326  default:
2327  printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2328  break;
2329 
2330  case pt_mol_exec:
2331  printf_unfiltered (("mode.exec%s%s"),
2332  packet->payload.mode.bits.exec.csl
2333  ? (" cs.l") : (""),
2334  packet->payload.mode.bits.exec.csd
2335  ? (" cs.d") : (""));
2336  break;
2337 
2338  case pt_mol_tsx:
2339  printf_unfiltered (("mode.tsx%s%s"),
2340  packet->payload.mode.bits.tsx.intx
2341  ? (" intx") : (""),
2342  packet->payload.mode.bits.tsx.abrt
2343  ? (" abrt") : (""));
2344  break;
2345  }
2346  break;
2347 
2348  case ppt_ovf:
2349  printf_unfiltered (("ovf"));
2350  break;
2351 
2352  }
2353 }
2354 
2355 /* Decode packets into MAINT using DECODER. */
2356 
2357 static void
2358 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2359  struct pt_packet_decoder *decoder)
2360 {
2361  int errcode;
2362 
2363  for (;;)
2364  {
2365  struct btrace_pt_packet packet;
2366 
2367  errcode = pt_pkt_sync_forward (decoder);
2368  if (errcode < 0)
2369  break;
2370 
2371  for (;;)
2372  {
2373  pt_pkt_get_offset (decoder, &packet.offset);
2374 
2375  errcode = pt_pkt_next (decoder, &packet.packet,
2376  sizeof(packet.packet));
2377  if (errcode < 0)
2378  break;
2379 
2380  if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2381  {
2382  packet.errcode = pt_errcode (errcode);
2383  VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2384  &packet);
2385  }
2386  }
2387 
2388  if (errcode == -pte_eos)
2389  break;
2390 
2391  packet.errcode = pt_errcode (errcode);
2392  VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2393  &packet);
2394 
2395  warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2396  packet.offset, pt_errstr (packet.errcode));
2397  }
2398 
2399  if (errcode != -pte_eos)
2400  warning (_("Failed to synchronize onto the Intel(R) Processor Trace "
2401  "stream: %s."), pt_errstr (pt_errcode (errcode)));
2402 }
2403 
2404 /* Update the packet history in BTINFO. */
2405 
2406 static void
2407 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2408 {
2409  volatile struct gdb_exception except;
2410  struct pt_packet_decoder *decoder;
2411  struct btrace_data_pt *pt;
2412  struct pt_config config;
2413  int errcode;
2414 
2415  pt = &btinfo->data.variant.pt;
2416 
2417  /* Nothing to do if there is no trace. */
2418  if (pt->size == 0)
2419  return;
2420 
2421  memset (&config, 0, sizeof(config));
2422 
2423  config.size = sizeof (config);
2424  config.begin = pt->data;
2425  config.end = pt->data + pt->size;
2426 
2427  config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2428  config.cpu.family = pt->config.cpu.family;
2429  config.cpu.model = pt->config.cpu.model;
2430  config.cpu.stepping = pt->config.cpu.stepping;
2431 
2432  errcode = pt_cpu_errata (&config.errata, &config.cpu);
2433  if (errcode < 0)
2434  error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
2435  pt_errstr (pt_errcode (errcode)));
2436 
2437  decoder = pt_pkt_alloc_decoder (&config);
2438  if (decoder == NULL)
2439  error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
2440 
2441  TRY
2442  {
2443  btrace_maint_decode_pt (&btinfo->maint, decoder);
2444  }
2445  CATCH (except, RETURN_MASK_ALL)
2446  {
2447  pt_pkt_free_decoder (decoder);
2448 
2449  if (except.reason < 0)
2450  throw_exception (except);
2451  }
2452  END_CATCH
2453 
2454  pt_pkt_free_decoder (decoder);
2455 }
2456 
2457 #endif /* !defined (HAVE_LIBIPT) */
2458 
2459 /* Update the packet maintenance information for BTINFO and store the
2460  low and high bounds into BEGIN and END, respectively.
2461  Store the current iterator state into FROM and TO. */
2462 
2463 static void
2465  unsigned int *begin, unsigned int *end,
2466  unsigned int *from, unsigned int *to)
2467 {
2468  switch (btinfo->data.format)
2469  {
2470  default:
2471  *begin = 0;
2472  *end = 0;
2473  *from = 0;
2474  *to = 0;
2475  break;
2476 
2477  case BTRACE_FORMAT_BTS:
2478  /* Nothing to do - we operate directly on BTINFO->DATA. */
2479  *begin = 0;
2480  *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2481  *from = btinfo->maint.variant.bts.packet_history.begin;
2482  *to = btinfo->maint.variant.bts.packet_history.end;
2483  break;
2484 
2485 #if defined (HAVE_LIBIPT)
2486  case BTRACE_FORMAT_PT:
2487  if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2488  btrace_maint_update_pt_packets (btinfo);
2489 
2490  *begin = 0;
2491  *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2492  *from = btinfo->maint.variant.pt.packet_history.begin;
2493  *to = btinfo->maint.variant.pt.packet_history.end;
2494  break;
2495 #endif /* defined (HAVE_LIBIPT) */
2496  }
2497 }
2498 
2499 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2500  update the current iterator position. */
2501 
2502 static void
2504  unsigned int begin, unsigned int end)
2505 {
2506  switch (btinfo->data.format)
2507  {
2508  default:
2509  break;
2510 
2511  case BTRACE_FORMAT_BTS:
2512  {
2513  VEC (btrace_block_s) *blocks;
2514  unsigned int blk;
2515 
2516  blocks = btinfo->data.variant.bts.blocks;
2517  for (blk = begin; blk < end; ++blk)
2518  {
2519  const btrace_block_s *block;
2520 
2521  block = VEC_index (btrace_block_s, blocks, blk);
2522 
2523  printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2524  core_addr_to_string_nz (block->begin),
2525  core_addr_to_string_nz (block->end));
2526  }
2527 
2528  btinfo->maint.variant.bts.packet_history.begin = begin;
2529  btinfo->maint.variant.bts.packet_history.end = end;
2530  }
2531  break;
2532 
2533 #if defined (HAVE_LIBIPT)
2534  case BTRACE_FORMAT_PT:
2535  {
2536  VEC (btrace_pt_packet_s) *packets;
2537  unsigned int pkt;
2538 
2539  packets = btinfo->maint.variant.pt.packets;
2540  for (pkt = begin; pkt < end; ++pkt)
2541  {
2542  const struct btrace_pt_packet *packet;
2543 
2544  packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2545 
2546  printf_unfiltered ("%u\t", pkt);
2547  printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2548 
2549  if (packet->errcode == pte_ok)
2550  pt_print_packet (&packet->packet);
2551  else
2552  printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2553 
2554  printf_unfiltered ("\n");
2555  }
2556 
2557  btinfo->maint.variant.pt.packet_history.begin = begin;
2558  btinfo->maint.variant.pt.packet_history.end = end;
2559  }
2560  break;
2561 #endif /* defined (HAVE_LIBIPT) */
2562  }
2563 }
2564 
2565 /* Read a number from an argument string. */
2566 
2567 static unsigned int
2568 get_uint (char **arg)
2569 {
2570  char *begin, *end, *pos;
2571  unsigned long number;
2572 
2573  begin = *arg;
2574  pos = skip_spaces (begin);
2575 
2576  if (!isdigit (*pos))
2577  error (_("Expected positive number, got: %s."), pos);
2578 
2579  number = strtoul (pos, &end, 10);
2580  if (number > UINT_MAX)
2581  error (_("Number too big."));
2582 
2583  *arg += (end - begin);
2584 
2585  return (unsigned int) number;
2586 }
2587 
2588 /* Read a context size from an argument string. */
2589 
2590 static int
2591 get_context_size (char **arg)
2592 {
2593  char *pos;
2594  int number;
2595 
2596  pos = skip_spaces (*arg);
2597 
2598  if (!isdigit (*pos))
2599  error (_("Expected positive number, got: %s."), pos);
2600 
2601  return strtol (pos, arg, 10);
2602 }
2603 
2604 /* Complain about junk at the end of an argument string. */
2605 
2606 static void
2607 no_chunk (char *arg)
2608 {
2609  if (*arg != 0)
2610  error (_("Junk after argument: %s."), arg);
2611 }
2612 
2613 /* The "maintenance btrace packet-history" command. */
2614 
2615 static void
2616 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2617 {
2618  struct btrace_thread_info *btinfo;
2619  struct thread_info *tp;
2620  unsigned int size, begin, end, from, to;
2621 
2623  if (tp == NULL)
2624  error (_("No thread."));
2625 
2626  size = 10;
2627  btinfo = &tp->btrace;
2628 
2629  btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2630  if (begin == end)
2631  {
2632  printf_unfiltered (_("No trace.\n"));
2633  return;
2634  }
2635 
2636  if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2637  {
2638  from = to;
2639 
2640  if (end - from < size)
2641  size = end - from;
2642  to = from + size;
2643  }
2644  else if (strcmp (arg, "-") == 0)
2645  {
2646  to = from;
2647 
2648  if (to - begin < size)
2649  size = to - begin;
2650  from = to - size;
2651  }
2652  else
2653  {
2654  from = get_uint (&arg);
2655  if (end <= from)
2656  error (_("'%u' is out of range."), from);
2657 
2658  arg = skip_spaces (arg);
2659  if (*arg == ',')
2660  {
2661  arg = skip_spaces (++arg);
2662 
2663  if (*arg == '+')
2664  {
2665  arg += 1;
2666  size = get_context_size (&arg);
2667 
2668  no_chunk (arg);
2669 
2670  if (end - from < size)
2671  size = end - from;
2672  to = from + size;
2673  }
2674  else if (*arg == '-')
2675  {
2676  arg += 1;
2677  size = get_context_size (&arg);
2678 
2679  no_chunk (arg);
2680 
2681  /* Include the packet given as first argument. */
2682  from += 1;
2683  to = from;
2684 
2685  if (to - begin < size)
2686  size = to - begin;
2687  from = to - size;
2688  }
2689  else
2690  {
2691  to = get_uint (&arg);
2692 
2693  /* Include the packet at the second argument and silently
2694  truncate the range. */
2695  if (to < end)
2696  to += 1;
2697  else
2698  to = end;
2699 
2700  no_chunk (arg);
2701  }
2702  }
2703  else
2704  {
2705  no_chunk (arg);
2706 
2707  if (end - from < size)
2708  size = end - from;
2709  to = from + size;
2710  }
2711 
2712  dont_repeat ();
2713  }
2714 
2715  btrace_maint_print_packets (btinfo, from, to);
2716 }
2717 
2718 /* The "maintenance btrace clear-packet-history" command. */
2719 
2720 static void
2721 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2722 {
2723  struct btrace_thread_info *btinfo;
2724  struct thread_info *tp;
2725 
2726  if (args != NULL && *args != 0)
2727  error (_("Invalid argument."));
2728 
2730  if (tp == NULL)
2731  error (_("No thread."));
2732 
2733  btinfo = &tp->btrace;
2734 
2735  /* Must clear the maint data before - it depends on BTINFO->DATA. */
2736  btrace_maint_clear (btinfo);
2737  btrace_data_clear (&btinfo->data);
2738 }
2739 
2740 /* The "maintenance btrace clear" command. */
2741 
2742 static void
2743 maint_btrace_clear_cmd (char *args, int from_tty)
2744 {
2745  struct btrace_thread_info *btinfo;
2746  struct thread_info *tp;
2747 
2748  if (args != NULL && *args != 0)
2749  error (_("Invalid argument."));
2750 
2752  if (tp == NULL)
2753  error (_("No thread."));
2754 
2755  btrace_clear (tp);
2756 }
2757 
2758 /* The "maintenance btrace" command. */
2759 
2760 static void
2761 maint_btrace_cmd (char *args, int from_tty)
2762 {
2763  help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2764  gdb_stdout);
2765 }
2766 
2767 /* The "maintenance set btrace" command. */
2768 
2769 static void
2770 maint_btrace_set_cmd (char *args, int from_tty)
2771 {
2772  help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2773  gdb_stdout);
2774 }
2775 
2776 /* The "maintenance show btrace" command. */
2777 
2778 static void
2779 maint_btrace_show_cmd (char *args, int from_tty)
2780 {
2781  help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2783 }
2784 
2785 /* The "maintenance set btrace pt" command. */
2786 
2787 static void
2788 maint_btrace_pt_set_cmd (char *args, int from_tty)
2789 {
2790  help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2792 }
2793 
2794 /* The "maintenance show btrace pt" command. */
2795 
2796 static void
2797 maint_btrace_pt_show_cmd (char *args, int from_tty)
2798 {
2799  help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2801 }
2802 
2803 /* The "maintenance info btrace" command. */
2804 
2805 static void
2806 maint_info_btrace_cmd (char *args, int from_tty)
2807 {
2808  struct btrace_thread_info *btinfo;
2809  struct thread_info *tp;
2810  const struct btrace_config *conf;
2811 
2812  if (args != NULL && *args != 0)
2813  error (_("Invalid argument."));
2814 
2816  if (tp == NULL)
2817  error (_("No thread."));
2818 
2819  btinfo = &tp->btrace;
2820 
2821  conf = btrace_conf (btinfo);
2822  if (conf == NULL)
2823  error (_("No btrace configuration."));
2824 
2825  printf_unfiltered (_("Format: %s.\n"),
2826  btrace_format_string (conf->format));
2827 
2828  switch (conf->format)
2829  {
2830  default:
2831  break;
2832 
2833  case BTRACE_FORMAT_BTS:
2834  printf_unfiltered (_("Number of packets: %u.\n"),
2836  btinfo->data.variant.bts.blocks));
2837  break;
2838 
2839 #if defined (HAVE_LIBIPT)
2840  case BTRACE_FORMAT_PT:
2841  {
2842  struct pt_version version;
2843 
2844  version = pt_library_version ();
2845  printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2846  version.minor, version.build,
2847  version.ext != NULL ? version.ext : "");
2848 
2849  btrace_maint_update_pt_packets (btinfo);
2850  printf_unfiltered (_("Number of packets: %u.\n"),
2851  VEC_length (btrace_pt_packet_s,
2852  btinfo->maint.variant.pt.packets));
2853  }
2854  break;
2855 #endif /* defined (HAVE_LIBIPT) */
2856  }
2857 }
2858 
2859 /* The "maint show btrace pt skip-pad" show value function. */
2860 
2861 static void
2862 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2863  struct cmd_list_element *c,
2864  const char *value)
2865 {
2866  fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2867 }
2868 
2869 
2870 /* Initialize btrace maintenance commands. */
2871 
2872 void _initialize_btrace (void);
2873 void
2875 {
2877  _("Info about branch tracing data."), &maintenanceinfolist);
2878 
2880  _("Branch tracing maintenance commands."),
2881  &maint_btrace_cmdlist, "maintenance btrace ",
2882  0, &maintenancelist);
2883 
2885 Set branch tracing specific variables."),
2886  &maint_btrace_set_cmdlist, "maintenance set btrace ",
2888 
2890 Set Intel(R) Processor Trace specific variables."),
2891  &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2892  0, &maint_btrace_set_cmdlist);
2893 
2895 Show branch tracing specific variables."),
2896  &maint_btrace_show_cmdlist, "maintenance show btrace ",
2898 
2900 Show Intel(R) Processor Trace specific variables."),
2901  &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2902  0, &maint_btrace_show_cmdlist);
2903 
2905  &maint_btrace_pt_skip_pad, _("\
2906 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2907 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2908 When enabled, PAD packets are ignored in the btrace packet history."),
2910  &maint_btrace_pt_set_cmdlist,
2911  &maint_btrace_pt_show_cmdlist);
2912 
2914  _("Print the raw branch tracing data.\n\
2915 With no argument, print ten more packets after the previous ten-line print.\n\
2916 With '-' as argument print ten packets before a previous ten-line print.\n\
2917 One argument specifies the starting packet of a ten-line print.\n\
2918 Two arguments with comma between specify starting and ending packets to \
2919 print.\n\
2920 Preceded with '+'/'-' the second argument specifies the distance from the \
2921 first.\n"),
2922  &maint_btrace_cmdlist);
2923 
2924  add_cmd ("clear-packet-history", class_maintenance,
2926  _("Clears the branch tracing packet history.\n\
2927 Discards the raw branch tracing data but not the execution history data.\n\
2928 "),
2929  &maint_btrace_cmdlist);
2930 
2932  _("Clears the branch tracing data.\n\
2933 Discards the raw branch tracing data and the execution history data.\n\
2934 The next 'record' command will fetch the branch tracing data anew.\n\
2935 "),
2936  &maint_btrace_cmdlist);
2937 
2938 }
static const char * ftrace_print_function_name(const struct btrace_function *bfun)
Definition: btrace.c:71
struct gdbarch * target_gdbarch(void)
Definition: gdbarch.c:5143
static struct cmd_list_element * maint_btrace_cmdlist
Definition: btrace.c:42
static const char * ftrace_print_insn_addr(const struct btrace_insn *insn)
Definition: btrace.c:111
static void no_chunk(char *arg)
Definition: btrace.c:2607
struct btrace_config_bts bts
struct cmd_list_element * add_prefix_cmd(const char *name, enum command_class theclass, cmd_cfunc_ftype *fun, const char *doc, struct cmd_list_element **prefixlist, const char *prefixname, int allow_unknown, struct cmd_list_element **list)
Definition: cli-decode.c:338
unsigned int btrace_insn_prev(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:1844
const char * symtab_to_filename_for_display(struct symtab *symtab)
Definition: source.c:1171
void target_disable_btrace(struct btrace_target_info *btinfo)
Definition: target.c:3551
static const struct gdb_xml_attribute btrace_conf_bts_attributes[]
Definition: btrace.c:1632
static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[]
Definition: btrace.c:1517
struct thread_info * find_thread_ptid(ptid_t ptid)
Definition: thread.c:393
static struct cmd_list_element * maint_btrace_set_cmdlist
Definition: btrace.c:43
static void parse_xml_btrace_pt(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1498
static struct cmd_list_element * maint_btrace_show_cmdlist
Definition: btrace.c:44
struct btrace_cpu cpu
void target_teardown_btrace(struct btrace_target_info *btinfo)
Definition: target.c:3559
#define SYMBOL_PRINT_NAME(symbol)
Definition: symtab.h:260
unsigned short family
Definition: btrace-common.h:86
void parse_xml_btrace(struct btrace_data *btrace, const char *buffer)
Definition: btrace.c:1562
#define MSYMBOL_LINKAGE_NAME(symbol)
Definition: symtab.h:409
bfd_vma CORE_ADDR
Definition: common-types.h:41
void * value
Definition: xml-support.h:76
struct regcache * get_thread_regcache(ptid_t ptid)
Definition: regcache.c:529
static void maint_btrace_clear_cmd(char *args, int from_tty)
Definition: btrace.c:2743
CORE_ADDR begin
Definition: btrace-common.h:42
void xfree(void *)
Definition: common-utils.c:97
struct btrace_target_info * target_enable_btrace(ptid_t ptid, const struct btrace_config *conf)
Definition: target.c:3543
struct btrace_function * end
Definition: btrace.h:321
struct symbol * sym
Definition: btrace.h:132
#define INT_MAX
Definition: defs.h:509
void warning(const char *fmt,...)
Definition: errors.c:26
static int btrace_stitch_bts(struct btrace_data_bts *btrace, struct thread_info *tp)
Definition: btrace.c:1088
int target_supports_btrace(enum btrace_format format)
Definition: target.c:3535
static void parse_xml_btrace_conf_bts(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1592
static void btrace_clear_history(struct btrace_thread_info *btinfo)
Definition: btrace.c:1202
static void parse_xml_btrace_block(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1381
void btrace_data_fini(struct btrace_data *data)
Definition: btrace-common.c:55
static const struct gdb_xml_attribute block_attributes[]
Definition: btrace.c:1511
struct btrace_insn_iterator * replay
Definition: btrace.h:343
unsigned int index
Definition: btrace.h:185
struct cmd_list_element * maintenanceinfolist
Definition: cli-cmds.c:163
struct btrace_call_history * call_history
Definition: btrace.h:338
static int ftrace_function_switched(const struct btrace_function *bfun, const struct minimal_symbol *mfun, const struct symbol *fun)
Definition: btrace.c:143
int gdb_insn_length(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: disasm.c:472
struct ui_file * gdb_stdout
Definition: main.c:71
static int btrace_stitch_trace(struct btrace_data *btrace, struct thread_info *tp)
Definition: btrace.c:1177
unsigned int size
struct btrace_data_bts bts
void btrace_clear(struct thread_info *tp)
Definition: btrace.c:1318
CORE_ADDR end
Definition: btrace-common.h:45
void internal_error(const char *file, int line, const char *fmt,...)
Definition: errors.c:50
unsigned int record_debug
Definition: record.c:33
static struct cmd_list_element * maint_btrace_pt_set_cmdlist
Definition: btrace.c:45
static void parse_xml_btrace_pt_config_cpu(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1457
unsigned int btrace_call_number(const struct btrace_call_iterator *it)
Definition: btrace.c:1991
btrace_insn_class
Definition: btrace.h:40
enum btrace_function_flag flags
Definition: btrace.h:174
static struct btrace_function * ftrace_new_gap(struct btrace_function *prev, int errcode)
Definition: btrace.c:443
struct btrace_maint_info::@32::@33 bts
const struct btrace_thread_info * btinfo
Definition: btrace.h:192
static void parse_xml_btrace_conf_pt(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1611
static struct btrace_function * ftrace_new_call(struct btrace_function *caller, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:262
static void maint_btrace_clear_packet_history_cmd(char *args, int from_tty)
Definition: btrace.c:2721
#define VEC_safe_push(T, V, O)
Definition: vec.h:260
const struct btrace_function * function
Definition: btrace.h:182
#define VEC(T)
Definition: vec.h:398
static void btrace_maint_print_packets(struct btrace_thread_info *btinfo, unsigned int begin, unsigned int end)
Definition: btrace.c:2503
char * target_pid_to_str(ptid_t ptid)
Definition: target.c:2233
char * skip_spaces(char *chp)
Definition: common-utils.c:259
struct btrace_call_iterator begin
Definition: btrace.h:213
#define DEBUG_FTRACE(msg, args...)
Definition: btrace.c:65
#define _(String)
Definition: gdb_locale.h:40
struct btrace_insn_history * insn_history
Definition: btrace.h:335
int btrace_call_cmp(const struct btrace_call_iterator *lhs, const struct btrace_call_iterator *rhs)
Definition: btrace.c:2137
int gdbarch_insn_is_jump(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: gdbarch.c:4614
void btrace_insn_begin(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:1721
void btrace_set_call_history(struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end)
Definition: btrace.c:2194
union btrace_data::@40 variant
#define END_CATCH
struct btrace_func_link flow
Definition: btrace.h:140
void btrace_disable(struct thread_info *tp)
Definition: btrace.c:1050
static void maint_btrace_cmd(char *args, int from_tty)
Definition: btrace.c:2761
struct cmd_list_element * maintenance_set_cmdlist
Definition: maint.c:646
static void parse_xml_raw(struct gdb_xml_parser *parser, const char *body_text, gdb_byte **pdata, unsigned long *psize)
Definition: btrace.c:1416
static void maint_btrace_set_cmd(char *args, int from_tty)
Definition: btrace.c:2770
struct btrace_insn_iterator end
Definition: btrace.h:205
static struct btrace_function * ftrace_find_call(struct btrace_function *bfun)
Definition: btrace.c:324
const char * symtab_to_fullname(struct symtab *s)
Definition: source.c:1131
int target_read_code(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition: target.c:1456
unsigned int btrace_insn_number(const struct btrace_insn_iterator *it)
Definition: btrace.c:1705
mach_port_t kern_return_t mach_port_t msgports mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition: gnu-nat.c:1885
enum btrace_cpu_vendor vendor
Definition: btrace-common.h:83
static enum btrace_insn_class ftrace_classify_insn(struct gdbarch *gdbarch, CORE_ADDR pc)
Definition: btrace.c:568
#define TRY
#define MSYMBOL_PRINT_NAME(symbol)
Definition: symtab.h:410
static const char * ftrace_print_filename(const struct btrace_function *bfun)
Definition: btrace.c:92
int * from
Definition: varobj.h:282
static void btrace_compute_ftrace(struct thread_info *tp, struct btrace_data *btrace)
Definition: btrace.c:965
static void maint_btrace_show_cmd(char *args, int from_tty)
Definition: btrace.c:2779
#define CATCH(EXCEPTION, MASK)
void btrace_teardown(struct thread_info *tp)
Definition: btrace.c:1069
int gdb_xml_parse_quick(const char *name, const char *dtd_name, const struct gdb_xml_element *elements, const char *document, void *user_data)
Definition: xml-support.c:599
struct cleanup * make_cleanup_btrace_data(struct btrace_data *data)
Definition: btrace.c:2245
void btrace_data_clear(struct btrace_data *data)
Definition: btrace-common.c:98
const char * btrace_format_string(enum btrace_format format)
Definition: btrace-common.c:27
#define UINT_MAX
Definition: defs.h:505
struct btrace_function * begin
Definition: btrace.h:320
void btrace_insn_end(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:1737
void fprintf_filtered(struct ui_file *stream, const char *format,...)
Definition: utils.c:2351
void * xzalloc(size_t size)
Definition: common-utils.c:91
static struct btrace_function * ftrace_update_function(struct btrace_function *bfun, CORE_ADDR pc)
Definition: btrace.c:466
struct symbol * find_pc_function(CORE_ADDR pc)
Definition: blockframe.c:150
struct btrace_data data
Definition: btrace.h:312
enum btrace_format format
unsigned int btrace_call_prev(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2088
struct btrace_maint_packet_history packet_history
Definition: btrace.h:278
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, cmd_cfunc_ftype *fun, const char *doc, struct cmd_list_element **list)
Definition: cli-decode.c:192
struct btrace_maint_info maint
Definition: btrace.h:349
#define VEC_length(T, V)
Definition: vec.h:124
static struct btrace_function * ftrace_new_function(struct btrace_function *prev, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:190
struct cleanup * make_cleanup(make_cleanup_ftype *function, void *arg)
Definition: cleanups.c:117
static void btrace_maint_clear(struct btrace_thread_info *btinfo)
Definition: btrace.c:1216
static void do_btrace_data_cleanup(void *arg)
Definition: btrace.c:2237
const char version[]
Definition: version.c:2
static void ftrace_update_caller(struct btrace_function *bfun, struct btrace_function *caller, enum btrace_function_flag flags)
Definition: btrace.c:225
static const struct gdb_xml_element btrace_children[]
Definition: btrace.c:1543
struct btrace_target_info * target
Definition: btrace.h:309
void btrace_call_end(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2036
unsigned char model
Definition: btrace-common.h:89
#define VEC_index(T, V, I)
Definition: vec.h:151
struct btrace_data_pt pt
static struct cmd_list_element * maint_btrace_pt_show_cmdlist
Definition: btrace.c:46
unsigned long size
struct btrace_func_link segment
Definition: btrace.h:137
struct btrace_function * up
Definition: btrace.h:143
static int get_context_size(char **arg)
Definition: btrace.c:2591
#define gdb_assert(expr)
Definition: gdb_assert.h:33
static void ftrace_update_insns(struct btrace_function *bfun, const struct btrace_insn *insn)
Definition: btrace.c:556
#define SYMBOL_LINKAGE_NAME(symbol)
Definition: symtab.h:241
#define min(a, b)
Definition: defs.h:106
static void btrace_add_pc(struct thread_info *tp)
Definition: btrace.c:989
#define VEC_pop(T, V)
Definition: vec.h:270
const struct btrace_config * btrace_conf(const struct btrace_thread_info *btinfo)
Definition: btrace.c:1039
const struct btrace_insn * btrace_insn_get(const struct btrace_insn_iterator *it)
Definition: btrace.c:1682
static const struct gdb_xml_element btrace_pt_config_children[]
Definition: btrace.c:1525
struct minimal_symbol * msym
Definition: btrace.h:131
void printf_unfiltered(const char *format,...)
Definition: utils.c:2399
unsigned int ngaps
Definition: btrace.h:329
int fromhex(int a)
Definition: rsp-low.c:26
void * xmalloc(YYSIZE_T)
unsigned int insn_offset
Definition: btrace.h:158
static void show_maint_btrace_pt_skip_pad(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition: btrace.c:2862
enum btrace_error target_read_btrace(struct btrace_data *btrace, struct btrace_target_info *btinfo, enum btrace_read_type type)
Definition: target.c:3567
int btrace_data_empty(struct btrace_data *data)
Definition: btrace-common.c:78
CORE_ADDR pc
Definition: btrace.h:61
static struct btrace_function * ftrace_new_switch(struct btrace_function *prev, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:423
static unsigned int get_uint(char **arg)
Definition: btrace.c:2568
void gdb_xml_error(struct gdb_xml_parser *parser, const char *format,...)
Definition: xml-support.c:128
#define VEC_last(T, V)
Definition: vec.h:142
static int maint_btrace_pt_skip_pad
Definition: btrace.c:49
Definition: block.h:60
#define VEC_empty(T, V)
Definition: vec.h:132
Definition: value.c:172
int btrace_is_empty(struct thread_info *tp)
Definition: btrace.c:2218
unsigned int btrace_insn_next(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:1762
enum btrace_insn_class iclass
Definition: btrace.h:67
struct btrace_insn_iterator begin
Definition: btrace.h:204
static const struct gdb_xml_attribute btrace_attributes[]
Definition: btrace.c:1538
struct btrace_config_pt pt
static struct btrace_function * ftrace_new_return(struct btrace_function *prev, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:348
const struct btrace_function * function
Definition: btrace.h:196
void throw_exception(struct gdb_exception exception)
void _initialize_btrace(void)
Definition: btrace.c:2874
static void maint_btrace_pt_set_cmd(char *args, int from_tty)
Definition: btrace.c:2788
const char const char int
Definition: command.h:229
bfd_byte gdb_byte
Definition: common-types.h:38
void btrace_fetch(struct thread_info *tp)
Definition: btrace.c:1243
unsigned int btrace_call_next(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2052
void help_list(struct cmd_list_element *list, const char *cmdtype, enum command_class theclass, struct ui_file *stream)
Definition: cli-decode.c:1023
struct gdb_xml_value * xml_find_attribute(VEC(gdb_xml_value_s)*attributes, const char *name)
Definition: xml-support.c:142
void discard_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:213
void btrace_enable(struct thread_info *tp, const struct btrace_config *conf)
Definition: btrace.c:1018
unsigned int size
struct bound_minimal_symbol lookup_minimal_symbol_by_pc(CORE_ADDR pc)
Definition: minsyms.c:801
btrace_cpu_vendor
Definition: btrace-common.h:69
#define ALL_NON_EXITED_THREADS(T)
Definition: gdbthread.h:377
int btrace_insn_cmp(const struct btrace_insn_iterator *lhs, const struct btrace_insn_iterator *rhs)
Definition: btrace.c:1902
void btrace_call_begin(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2020
int gdbarch_insn_is_call(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: gdbarch.c:4580
ptid_t ptid
Definition: gdbthread.h:169
static void check_xml_btrace_version(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, VEC(gdb_xml_value_s)*attributes)
Definition: btrace.c:1368
static const struct gdb_xml_element btrace_pt_children[]
Definition: btrace.c:1531
static const struct gdb_xml_attribute btrace_conf_attributes[]
Definition: btrace.c:1645
void btrace_free_objfile(struct objfile *objfile)
Definition: btrace.c:1353
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition: regcache.c:1174
ptid_t inferior_ptid
Definition: infcmd.c:124
struct btrace_thread_info btrace
Definition: gdbthread.h:277
static struct btrace_function * ftrace_find_caller(struct btrace_function *bfun, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:302
struct btrace_data_pt_config config
struct minimal_symbol * minsym
Definition: minsyms.h:32
static void ftrace_fixup_caller(struct btrace_function *bfun, struct btrace_function *caller, enum btrace_function_flag flags)
Definition: btrace.c:241
int offset
Definition: agent.c:65
gdb_byte * data
static void maint_btrace_packet_history_cmd(char *arg, int from_tty)
Definition: btrace.c:2616
const struct btrace_function * btrace_call_get(const struct btrace_call_iterator *it)
Definition: btrace.c:1983
union btrace_maint_info::@32 variant
void parse_xml_btrace_conf(struct btrace_config *conf, const char *xml)
Definition: btrace.c:1661
int btrace_is_replaying(struct thread_info *tp)
Definition: btrace.c:2210
static struct btrace_function * ftrace_new_tailcall(struct btrace_function *caller, struct minimal_symbol *mfun, struct symbol *fun)
Definition: btrace.c:282
gdb_xml_attribute_handler gdb_xml_parse_attr_ulongest
unsigned char stepping
Definition: btrace-common.h:92
unsigned long long ULONGEST
Definition: common-types.h:53
gdb_byte size
Definition: btrace.h:64
Definition: symtab.h:703
static void maint_info_btrace_cmd(char *args, int from_tty)
Definition: btrace.c:2806
struct cmd_list_element * maintenancelist
Definition: cli-cmds.c:159
void btrace_set_insn_history(struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end)
Definition: btrace.c:2180
int btrace_find_insn_by_number(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:1947
enum btrace_format format
int int * to
Definition: varobj.h:282
static void btrace_compute_ftrace_pt(struct thread_info *tp, const struct btrace_data_pt *btrace)
Definition: btrace.c:953
btrace_function_flag
Definition: btrace.h:82
unsigned int number
Definition: btrace.h:163
void dont_repeat(void)
Definition: top.c:582
void reinit_frame_cache(void)
Definition: frame.c:1687
static void btrace_compute_ftrace_bts(struct thread_info *tp, const struct btrace_data_bts *btrace)
Definition: btrace.c:593
CORE_ADDR get_pc_function_start(CORE_ADDR pc)
Definition: blockframe.c:86
void btrace_data_init(struct btrace_data *data)
Definition: btrace-common.c:47
static void ftrace_debug(const struct btrace_function *bfun, const char *prefix)
Definition: btrace.c:122
static void maint_btrace_pt_show_cmd(char *args, int from_tty)
Definition: btrace.c:2797
static void btrace_maint_update_packets(struct btrace_thread_info *btinfo, unsigned int *begin, unsigned int *end, unsigned int *from, unsigned int *to)
Definition: btrace.c:2464
struct btrace_call_iterator end
Definition: btrace.h:214
struct symtab * symbol_symtab(const struct symbol *symbol)
Definition: symtab.c:6250
int btrace_find_call_by_number(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:2151
struct cmd_list_element * maintenance_show_cmdlist
Definition: maint.c:647
static const struct gdb_xml_element btrace_conf_children[]
Definition: btrace.c:1637
const struct btrace_config * target_btrace_conf(const struct btrace_target_info *btinfo)
Definition: target.c:3577
#define DEBUG(msg, args...)
Definition: btrace.c:56
void error(const char *fmt,...)
Definition: errors.c:38
size_t size
Definition: go32-nat.c:242
void do_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:175
void add_setshow_boolean_cmd(const char *name, enum command_class theclass, int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_sfunc_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:541
static void parse_xml_btrace_pt_raw(struct gdb_xml_parser *parser, const struct gdb_xml_element *element, void *user_data, const char *body_text)
Definition: btrace.c:1484
int gdbarch_insn_is_ret(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: gdbarch.c:4597
int btrace_data_append(struct btrace_data *dst, const struct btrace_data *src)
static const struct gdb_xml_attribute btrace_conf_pt_attributes[]
Definition: btrace.c:1627
const ULONGEST const LONGEST len
Definition: target.h:309