GDBserver
linux-x86-low.c
Go to the documentation of this file.
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2  for GDB.
3  Copyright (C) 2002-2015 Free Software Foundation, Inc.
4 
5  This file is part of GDB.
6 
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of the GNU General Public License as published by
9  the Free Software Foundation; either version 3 of the License, or
10  (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31  gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35 
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
43 
44 #ifdef __x86_64__
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc *tdesc_amd64_linux;
48 
49 /* Defined in auto-generated file amd64-avx-linux.c. */
51 extern const struct target_desc *tdesc_amd64_avx_linux;
52 
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
55 extern const struct target_desc *tdesc_amd64_avx512_linux;
56 
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
59 extern const struct target_desc *tdesc_amd64_mpx_linux;
60 
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc *tdesc_x32_linux;
64 
65 /* Defined in auto-generated file x32-avx-linux.c. */
67 extern const struct target_desc *tdesc_x32_avx_linux;
68 
69 /* Defined in auto-generated file x32-avx512-linux.c. */
71 extern const struct target_desc *tdesc_x32_avx512_linux;
72 
73 #endif
74 
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc *tdesc_i386_linux;
78 
79 /* Defined in auto-generated file i386-mmx-linux.c. */
81 extern const struct target_desc *tdesc_i386_mmx_linux;
82 
83 /* Defined in auto-generated file i386-avx-linux.c. */
85 extern const struct target_desc *tdesc_i386_avx_linux;
86 
87 /* Defined in auto-generated file i386-avx512-linux.c. */
89 extern const struct target_desc *tdesc_i386_avx512_linux;
90 
91 /* Defined in auto-generated file i386-mpx-linux.c. */
93 extern const struct target_desc *tdesc_i386_mpx_linux;
94 
95 #ifdef __x86_64__
96 static struct target_desc *tdesc_amd64_linux_no_xml;
97 #endif
99 
100 
101 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
103 
104 /* Backward compatibility for gdb without XML support. */
105 
106 static const char *xmltarget_i386_linux_no_xml = "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
109 </target>";
110 
111 #ifdef __x86_64__
112 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
115 </target>";
116 #endif
117 
118 #include <sys/reg.h>
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
121 #include <sys/uio.h>
122 
123 #ifndef PTRACE_GET_THREAD_AREA
124 #define PTRACE_GET_THREAD_AREA 25
125 #endif
126 
127 /* This definition comes from prctl.h, but some kernels may not have it. */
128 #ifndef PTRACE_ARCH_PRCTL
129 #define PTRACE_ARCH_PRCTL 30
130 #endif
131 
132 /* The following definitions come from prctl.h, but may be absent
133  for certain configurations. */
134 #ifndef ARCH_GET_FS
135 #define ARCH_SET_GS 0x1001
136 #define ARCH_SET_FS 0x1002
137 #define ARCH_GET_FS 0x1003
138 #define ARCH_GET_GS 0x1004
139 #endif
140 
141 /* Per-process arch-specific data we want to keep. */
142 
143 struct arch_process_info
144 {
146 };
147 
148 #ifdef __x86_64__
149 
150 /* Mapping between the general-purpose registers in `struct user'
151  format and GDB's register array layout.
152  Note that the transfer layout uses 64-bit regs. */
153 static /*const*/ int i386_regmap[] =
154 {
155  RAX * 8, RCX * 8, RDX * 8, RBX * 8,
156  RSP * 8, RBP * 8, RSI * 8, RDI * 8,
157  RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
158  DS * 8, ES * 8, FS * 8, GS * 8
159 };
160 
161 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
162 
163 /* So code below doesn't have to care, i386 or amd64. */
164 #define ORIG_EAX ORIG_RAX
165 #define REGSIZE 8
166 
167 static const int x86_64_regmap[] =
168 {
169  RAX * 8, RBX * 8, RCX * 8, RDX * 8,
170  RSI * 8, RDI * 8, RBP * 8, RSP * 8,
171  R8 * 8, R9 * 8, R10 * 8, R11 * 8,
172  R12 * 8, R13 * 8, R14 * 8, R15 * 8,
173  RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
174  DS * 8, ES * 8, FS * 8, GS * 8,
175  -1, -1, -1, -1, -1, -1, -1, -1,
176  -1, -1, -1, -1, -1, -1, -1, -1,
177  -1, -1, -1, -1, -1, -1, -1, -1,
178  -1,
179  -1, -1, -1, -1, -1, -1, -1, -1,
180  ORIG_RAX * 8,
181  -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
182  -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
183  -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
184  -1, -1, -1, -1, -1, -1, -1, -1,
185  -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
186  -1, -1, -1, -1, -1, -1, -1, -1,
187  -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
188  -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
189  -1, -1, -1, -1, -1, -1, -1, -1,
190  -1, -1, -1, -1, -1, -1, -1, -1,
191  -1, -1, -1, -1, -1, -1, -1, -1
192 };
193 
194 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
195 #define X86_64_USER_REGS (GS + 1)
196 
197 #else /* ! __x86_64__ */
198 
199 /* Mapping between the general-purpose registers in `struct user'
200  format and GDB's register array layout. */
201 static /*const*/ int i386_regmap[] =
202 {
203  EAX * 4, ECX * 4, EDX * 4, EBX * 4,
204  UESP * 4, EBP * 4, ESI * 4, EDI * 4,
205  EIP * 4, EFL * 4, CS * 4, SS * 4,
206  DS * 4, ES * 4, FS * 4, GS * 4
207 };
208 
209 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
210 
211 #define REGSIZE 4
212 
213 #endif
214 
215 #ifdef __x86_64__
216 
217 /* Returns true if the current inferior belongs to a x86-64 process,
218  per the tdesc. */
219 
220 static int
221 is_64bit_tdesc (void)
222 {
224 
225  return register_size (regcache->tdesc, 0) == 8;
226 }
227 
228 #endif
229 
230 
231 /* Called by libthread_db. */
232 
233 ps_err_e
235  lwpid_t lwpid, int idx, void **base)
236 {
237 #ifdef __x86_64__
238  int use_64bit = is_64bit_tdesc ();
239 
240  if (use_64bit)
241  {
242  switch (idx)
243  {
244  case FS:
245  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
246  return PS_OK;
247  break;
248  case GS:
249  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
250  return PS_OK;
251  break;
252  default:
253  return PS_BADADDR;
254  }
255  return PS_ERR;
256  }
257 #endif
258 
259  {
260  unsigned int desc[4];
261 
262  if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
263  (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
264  return PS_ERR;
265 
266  /* Ensure we properly extend the value to 64-bits for x86_64. */
267  *base = (void *) (uintptr_t) desc[1];
268  return PS_OK;
269  }
270 }
271 
272 /* Get the thread area address. This is used to recognize which
273  thread is which when tracing with the in-process agent library. We
274  don't read anything from the address, and treat it as opaque; it's
275  the address itself that we assume is unique per-thread. */
276 
277 static int
278 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
279 {
280 #ifdef __x86_64__
281  int use_64bit = is_64bit_tdesc ();
282 
283  if (use_64bit)
284  {
285  void *base;
286  if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
287  {
288  *addr = (CORE_ADDR) (uintptr_t) base;
289  return 0;
290  }
291 
292  return -1;
293  }
294 #endif
295 
296  {
297  struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
298  struct thread_info *thr = get_lwp_thread (lwp);
299  struct regcache *regcache = get_thread_regcache (thr, 1);
300  unsigned int desc[4];
301  ULONGEST gs = 0;
302  const int reg_thread_area = 3; /* bits to scale down register value. */
303  int idx;
304 
305  collect_register_by_name (regcache, "gs", &gs);
306 
307  idx = gs >> reg_thread_area;
308 
309  if (ptrace (PTRACE_GET_THREAD_AREA,
310  lwpid_of (thr),
311  (void *) (long) idx, (unsigned long) &desc) < 0)
312  return -1;
313 
314  *addr = desc[1];
315  return 0;
316  }
317 }
318 
319 
320 
321 static int
323 {
324 #ifdef __x86_64__
325  if (is_64bit_tdesc ())
326  return 0;
327 #endif
328 
329  return regno >= I386_NUM_REGS;
330 }
331 
332 static int
334 {
335 #ifdef __x86_64__
336  if (is_64bit_tdesc ())
337  return 0;
338 #endif
339 
340  return regno >= I386_NUM_REGS;
341 }
342 
343 static void
344 x86_fill_gregset (struct regcache *regcache, void *buf)
345 {
346  int i;
347 
348 #ifdef __x86_64__
349  if (register_size (regcache->tdesc, 0) == 8)
350  {
351  for (i = 0; i < X86_64_NUM_REGS; i++)
352  if (x86_64_regmap[i] != -1)
353  collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
354  return;
355  }
356 
357  /* 32-bit inferior registers need to be zero-extended.
358  Callers would read uninitialized memory otherwise. */
359  memset (buf, 0x00, X86_64_USER_REGS * 8);
360 #endif
361 
362  for (i = 0; i < I386_NUM_REGS; i++)
363  collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
364 
365  collect_register_by_name (regcache, "orig_eax",
366  ((char *) buf) + ORIG_EAX * REGSIZE);
367 }
368 
369 static void
370 x86_store_gregset (struct regcache *regcache, const void *buf)
371 {
372  int i;
373 
374 #ifdef __x86_64__
375  if (register_size (regcache->tdesc, 0) == 8)
376  {
377  for (i = 0; i < X86_64_NUM_REGS; i++)
378  if (x86_64_regmap[i] != -1)
379  supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
380  return;
381  }
382 #endif
383 
384  for (i = 0; i < I386_NUM_REGS; i++)
385  supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
386 
387  supply_register_by_name (regcache, "orig_eax",
388  ((char *) buf) + ORIG_EAX * REGSIZE);
389 }
390 
391 static void
392 x86_fill_fpregset (struct regcache *regcache, void *buf)
393 {
394 #ifdef __x86_64__
395  i387_cache_to_fxsave (regcache, buf);
396 #else
397  i387_cache_to_fsave (regcache, buf);
398 #endif
399 }
400 
401 static void
402 x86_store_fpregset (struct regcache *regcache, const void *buf)
403 {
404 #ifdef __x86_64__
405  i387_fxsave_to_cache (regcache, buf);
406 #else
407  i387_fsave_to_cache (regcache, buf);
408 #endif
409 }
410 
411 #ifndef __x86_64__
412 
413 static void
415 {
416  i387_cache_to_fxsave (regcache, buf);
417 }
418 
419 static void
420 x86_store_fpxregset (struct regcache *regcache, const void *buf)
421 {
422  i387_fxsave_to_cache (regcache, buf);
423 }
424 
425 #endif
426 
427 static void
429 {
430  i387_cache_to_xsave (regcache, buf);
431 }
432 
433 static void
434 x86_store_xstateregset (struct regcache *regcache, const void *buf)
435 {
436  i387_xsave_to_cache (regcache, buf);
437 }
438 
439 /* ??? The non-biarch i386 case stores all the i387 regs twice.
440  Once in i387_.*fsave.* and once in i387_.*fxsave.*.
441  This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
442  doesn't work. IWBN to avoid the duplication in the case where it
443  does work. Maybe the arch_setup routine could check whether it works
444  and update the supported regsets accordingly. */
445 
446 static struct regset_info x86_regsets[] =
447 {
448 #ifdef HAVE_PTRACE_GETREGS
449  { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
450  GENERAL_REGS,
452  { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
454 # ifndef __x86_64__
455 # ifdef HAVE_PTRACE_GETFPXREGS
456  { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
457  EXTENDED_REGS,
459 # endif
460 # endif
461  { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
462  FP_REGS,
464 #endif /* HAVE_PTRACE_GETREGS */
465  { 0, 0, 0, -1, -1, NULL, NULL }
466 };
467 
468 static CORE_ADDR
470 {
471  int use_64bit = register_size (regcache->tdesc, 0) == 8;
472 
473  if (use_64bit)
474  {
475  unsigned long pc;
476  collect_register_by_name (regcache, "rip", &pc);
477  return (CORE_ADDR) pc;
478  }
479  else
480  {
481  unsigned int pc;
482  collect_register_by_name (regcache, "eip", &pc);
483  return (CORE_ADDR) pc;
484  }
485 }
486 
487 static void
489 {
490  int use_64bit = register_size (regcache->tdesc, 0) == 8;
491 
492  if (use_64bit)
493  {
494  unsigned long newpc = pc;
495  supply_register_by_name (regcache, "rip", &newpc);
496  }
497  else
498  {
499  unsigned int newpc = pc;
500  supply_register_by_name (regcache, "eip", &newpc);
501  }
502 }
503 
504 static const unsigned char x86_breakpoint[] = { 0xCC };
505 #define x86_breakpoint_len 1
506 
507 static int
509 {
510  unsigned char c;
511 
512  (*the_target->read_memory) (pc, &c, 1);
513  if (c == 0xCC)
514  return 1;
515 
516  return 0;
517 }
518 
519 /* Low-level function vector. */
520 struct x86_dr_low_type x86_dr_low =
521  {
527  sizeof (void *),
528  };
529 
530 /* Breakpoint/Watchpoint support. */
531 
532 static int
534 {
535  switch (z_type)
536  {
537  case Z_PACKET_SW_BP:
538  case Z_PACKET_HW_BP:
539  case Z_PACKET_WRITE_WP:
540  case Z_PACKET_ACCESS_WP:
541  return 1;
542  default:
543  return 0;
544  }
545 }
546 
547 static int
549  int size, struct raw_breakpoint *bp)
550 {
551  struct process_info *proc = current_process ();
552 
553  switch (type)
554  {
555  case raw_bkpt_type_hw:
558  {
559  enum target_hw_bp_type hw_type
561  struct x86_debug_reg_state *state
562  = &proc->priv->arch_private->debug_reg_state;
563 
564  return x86_dr_insert_watchpoint (state, hw_type, addr, size);
565  }
566 
567  default:
568  /* Unsupported. */
569  return 1;
570  }
571 }
572 
573 static int
575  int size, struct raw_breakpoint *bp)
576 {
577  struct process_info *proc = current_process ();
578 
579  switch (type)
580  {
581  case raw_bkpt_type_hw:
584  {
585  enum target_hw_bp_type hw_type
587  struct x86_debug_reg_state *state
588  = &proc->priv->arch_private->debug_reg_state;
589 
590  return x86_dr_remove_watchpoint (state, hw_type, addr, size);
591  }
592  default:
593  /* Unsupported. */
594  return 1;
595  }
596 }
597 
598 static int
600 {
601  struct process_info *proc = current_process ();
603 }
604 
605 static CORE_ADDR
607 {
608  struct process_info *proc = current_process ();
609  CORE_ADDR addr;
611  &addr))
612  return addr;
613  return 0;
614 }
615 
616 /* Called when a new process is created. */
617 
618 static struct arch_process_info *
620 {
621  struct arch_process_info *info = XCNEW (struct arch_process_info);
622 
624 
625  return info;
626 }
627 
628 /* Target routine for linux_new_fork. */
629 
630 static void
631 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
632 {
633  /* These are allocated by linux_add_process. */
634  gdb_assert (parent->priv != NULL
635  && parent->priv->arch_private != NULL);
636  gdb_assert (child->priv != NULL
637  && child->priv->arch_private != NULL);
638 
639  /* Linux kernel before 2.6.33 commit
640  72f674d203cd230426437cdcf7dd6f681dad8b0d
641  will inherit hardware debug registers from parent
642  on fork/vfork/clone. Newer Linux kernels create such tasks with
643  zeroed debug registers.
644 
645  GDB core assumes the child inherits the watchpoints/hw
646  breakpoints of the parent, and will remove them all from the
647  forked off process. Copy the debug registers mirrors into the
648  new process so that all breakpoints and watchpoints can be
649  removed together. The debug registers mirror will become zeroed
650  in the end before detaching the forked off process, thus making
651  this compatible with older Linux kernels too. */
652 
653  *child->priv->arch_private = *parent->priv->arch_private;
654 }
655 
656 /* See nat/x86-dregs.h. */
657 
658 struct x86_debug_reg_state *
660 {
661  struct process_info *proc = find_process_pid (pid);
662 
663  return &proc->priv->arch_private->debug_reg_state;
664 }
665 
666 /* When GDBSERVER is built as a 64-bit application on linux, the
667  PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
668  debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
669  as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
670  conversion in-place ourselves. */
671 
672 /* These types below (compat_*) define a siginfo type that is layout
673  compatible with the siginfo type exported by the 32-bit userspace
674  support. */
675 
676 #ifdef __x86_64__
677 
678 typedef int compat_int_t;
679 typedef unsigned int compat_uptr_t;
680 
681 typedef int compat_time_t;
682 typedef int compat_timer_t;
683 typedef int compat_clock_t;
684 
685 struct compat_timeval
686 {
687  compat_time_t tv_sec;
688  int tv_usec;
689 };
690 
691 typedef union compat_sigval
692 {
693  compat_int_t sival_int;
694  compat_uptr_t sival_ptr;
695 } compat_sigval_t;
696 
697 typedef struct compat_siginfo
698 {
699  int si_signo;
700  int si_errno;
701  int si_code;
702 
703  union
704  {
705  int _pad[((128 / sizeof (int)) - 3)];
706 
707  /* kill() */
708  struct
709  {
710  unsigned int _pid;
711  unsigned int _uid;
712  } _kill;
713 
714  /* POSIX.1b timers */
715  struct
716  {
717  compat_timer_t _tid;
718  int _overrun;
719  compat_sigval_t _sigval;
720  } _timer;
721 
722  /* POSIX.1b signals */
723  struct
724  {
725  unsigned int _pid;
726  unsigned int _uid;
727  compat_sigval_t _sigval;
728  } _rt;
729 
730  /* SIGCHLD */
731  struct
732  {
733  unsigned int _pid;
734  unsigned int _uid;
735  int _status;
736  compat_clock_t _utime;
737  compat_clock_t _stime;
738  } _sigchld;
739 
740  /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
741  struct
742  {
743  unsigned int _addr;
744  } _sigfault;
745 
746  /* SIGPOLL */
747  struct
748  {
749  int _band;
750  int _fd;
751  } _sigpoll;
752  } _sifields;
753 } compat_siginfo_t;
754 
755 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
756 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
757 
758 typedef struct compat_x32_siginfo
759 {
760  int si_signo;
761  int si_errno;
762  int si_code;
763 
764  union
765  {
766  int _pad[((128 / sizeof (int)) - 3)];
767 
768  /* kill() */
769  struct
770  {
771  unsigned int _pid;
772  unsigned int _uid;
773  } _kill;
774 
775  /* POSIX.1b timers */
776  struct
777  {
778  compat_timer_t _tid;
779  int _overrun;
780  compat_sigval_t _sigval;
781  } _timer;
782 
783  /* POSIX.1b signals */
784  struct
785  {
786  unsigned int _pid;
787  unsigned int _uid;
788  compat_sigval_t _sigval;
789  } _rt;
790 
791  /* SIGCHLD */
792  struct
793  {
794  unsigned int _pid;
795  unsigned int _uid;
796  int _status;
797  compat_x32_clock_t _utime;
798  compat_x32_clock_t _stime;
799  } _sigchld;
800 
801  /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
802  struct
803  {
804  unsigned int _addr;
805  } _sigfault;
806 
807  /* SIGPOLL */
808  struct
809  {
810  int _band;
811  int _fd;
812  } _sigpoll;
813  } _sifields;
814 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
815 
816 #define cpt_si_pid _sifields._kill._pid
817 #define cpt_si_uid _sifields._kill._uid
818 #define cpt_si_timerid _sifields._timer._tid
819 #define cpt_si_overrun _sifields._timer._overrun
820 #define cpt_si_status _sifields._sigchld._status
821 #define cpt_si_utime _sifields._sigchld._utime
822 #define cpt_si_stime _sifields._sigchld._stime
823 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
824 #define cpt_si_addr _sifields._sigfault._addr
825 #define cpt_si_band _sifields._sigpoll._band
826 #define cpt_si_fd _sifields._sigpoll._fd
827 
828 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
829  In their place is si_timer1,si_timer2. */
830 #ifndef si_timerid
831 #define si_timerid si_timer1
832 #endif
833 #ifndef si_overrun
834 #define si_overrun si_timer2
835 #endif
836 
837 static void
838 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
839 {
840  memset (to, 0, sizeof (*to));
841 
842  to->si_signo = from->si_signo;
843  to->si_errno = from->si_errno;
844  to->si_code = from->si_code;
845 
846  if (to->si_code == SI_TIMER)
847  {
848  to->cpt_si_timerid = from->si_timerid;
849  to->cpt_si_overrun = from->si_overrun;
850  to->cpt_si_ptr = (intptr_t) from->si_ptr;
851  }
852  else if (to->si_code == SI_USER)
853  {
854  to->cpt_si_pid = from->si_pid;
855  to->cpt_si_uid = from->si_uid;
856  }
857  else if (to->si_code < 0)
858  {
859  to->cpt_si_pid = from->si_pid;
860  to->cpt_si_uid = from->si_uid;
861  to->cpt_si_ptr = (intptr_t) from->si_ptr;
862  }
863  else
864  {
865  switch (to->si_signo)
866  {
867  case SIGCHLD:
868  to->cpt_si_pid = from->si_pid;
869  to->cpt_si_uid = from->si_uid;
870  to->cpt_si_status = from->si_status;
871  to->cpt_si_utime = from->si_utime;
872  to->cpt_si_stime = from->si_stime;
873  break;
874  case SIGILL:
875  case SIGFPE:
876  case SIGSEGV:
877  case SIGBUS:
878  to->cpt_si_addr = (intptr_t) from->si_addr;
879  break;
880  case SIGPOLL:
881  to->cpt_si_band = from->si_band;
882  to->cpt_si_fd = from->si_fd;
883  break;
884  default:
885  to->cpt_si_pid = from->si_pid;
886  to->cpt_si_uid = from->si_uid;
887  to->cpt_si_ptr = (intptr_t) from->si_ptr;
888  break;
889  }
890  }
891 }
892 
893 static void
894 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
895 {
896  memset (to, 0, sizeof (*to));
897 
898  to->si_signo = from->si_signo;
899  to->si_errno = from->si_errno;
900  to->si_code = from->si_code;
901 
902  if (to->si_code == SI_TIMER)
903  {
904  to->si_timerid = from->cpt_si_timerid;
905  to->si_overrun = from->cpt_si_overrun;
906  to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
907  }
908  else if (to->si_code == SI_USER)
909  {
910  to->si_pid = from->cpt_si_pid;
911  to->si_uid = from->cpt_si_uid;
912  }
913  else if (to->si_code < 0)
914  {
915  to->si_pid = from->cpt_si_pid;
916  to->si_uid = from->cpt_si_uid;
917  to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
918  }
919  else
920  {
921  switch (to->si_signo)
922  {
923  case SIGCHLD:
924  to->si_pid = from->cpt_si_pid;
925  to->si_uid = from->cpt_si_uid;
926  to->si_status = from->cpt_si_status;
927  to->si_utime = from->cpt_si_utime;
928  to->si_stime = from->cpt_si_stime;
929  break;
930  case SIGILL:
931  case SIGFPE:
932  case SIGSEGV:
933  case SIGBUS:
934  to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
935  break;
936  case SIGPOLL:
937  to->si_band = from->cpt_si_band;
938  to->si_fd = from->cpt_si_fd;
939  break;
940  default:
941  to->si_pid = from->cpt_si_pid;
942  to->si_uid = from->cpt_si_uid;
943  to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
944  break;
945  }
946  }
947 }
948 
949 static void
950 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
951  siginfo_t *from)
952 {
953  memset (to, 0, sizeof (*to));
954 
955  to->si_signo = from->si_signo;
956  to->si_errno = from->si_errno;
957  to->si_code = from->si_code;
958 
959  if (to->si_code == SI_TIMER)
960  {
961  to->cpt_si_timerid = from->si_timerid;
962  to->cpt_si_overrun = from->si_overrun;
963  to->cpt_si_ptr = (intptr_t) from->si_ptr;
964  }
965  else if (to->si_code == SI_USER)
966  {
967  to->cpt_si_pid = from->si_pid;
968  to->cpt_si_uid = from->si_uid;
969  }
970  else if (to->si_code < 0)
971  {
972  to->cpt_si_pid = from->si_pid;
973  to->cpt_si_uid = from->si_uid;
974  to->cpt_si_ptr = (intptr_t) from->si_ptr;
975  }
976  else
977  {
978  switch (to->si_signo)
979  {
980  case SIGCHLD:
981  to->cpt_si_pid = from->si_pid;
982  to->cpt_si_uid = from->si_uid;
983  to->cpt_si_status = from->si_status;
984  to->cpt_si_utime = from->si_utime;
985  to->cpt_si_stime = from->si_stime;
986  break;
987  case SIGILL:
988  case SIGFPE:
989  case SIGSEGV:
990  case SIGBUS:
991  to->cpt_si_addr = (intptr_t) from->si_addr;
992  break;
993  case SIGPOLL:
994  to->cpt_si_band = from->si_band;
995  to->cpt_si_fd = from->si_fd;
996  break;
997  default:
998  to->cpt_si_pid = from->si_pid;
999  to->cpt_si_uid = from->si_uid;
1000  to->cpt_si_ptr = (intptr_t) from->si_ptr;
1001  break;
1002  }
1003  }
1004 }
1005 
1006 static void
1007 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1008  compat_x32_siginfo_t *from)
1009 {
1010  memset (to, 0, sizeof (*to));
1011 
1012  to->si_signo = from->si_signo;
1013  to->si_errno = from->si_errno;
1014  to->si_code = from->si_code;
1015 
1016  if (to->si_code == SI_TIMER)
1017  {
1018  to->si_timerid = from->cpt_si_timerid;
1019  to->si_overrun = from->cpt_si_overrun;
1020  to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1021  }
1022  else if (to->si_code == SI_USER)
1023  {
1024  to->si_pid = from->cpt_si_pid;
1025  to->si_uid = from->cpt_si_uid;
1026  }
1027  else if (to->si_code < 0)
1028  {
1029  to->si_pid = from->cpt_si_pid;
1030  to->si_uid = from->cpt_si_uid;
1031  to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1032  }
1033  else
1034  {
1035  switch (to->si_signo)
1036  {
1037  case SIGCHLD:
1038  to->si_pid = from->cpt_si_pid;
1039  to->si_uid = from->cpt_si_uid;
1040  to->si_status = from->cpt_si_status;
1041  to->si_utime = from->cpt_si_utime;
1042  to->si_stime = from->cpt_si_stime;
1043  break;
1044  case SIGILL:
1045  case SIGFPE:
1046  case SIGSEGV:
1047  case SIGBUS:
1048  to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1049  break;
1050  case SIGPOLL:
1051  to->si_band = from->cpt_si_band;
1052  to->si_fd = from->cpt_si_fd;
1053  break;
1054  default:
1055  to->si_pid = from->cpt_si_pid;
1056  to->si_uid = from->cpt_si_uid;
1057  to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1058  break;
1059  }
1060  }
1061 }
1062 
1063 #endif /* __x86_64__ */
1064 
1065 /* Convert a native/host siginfo object, into/from the siginfo in the
1066  layout of the inferiors' architecture. Returns true if any
1067  conversion was done; false otherwise. If DIRECTION is 1, then copy
1068  from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1069  INF. */
1070 
1071 static int
1072 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1073 {
1074 #ifdef __x86_64__
1075  unsigned int machine;
1076  int tid = lwpid_of (current_thread);
1077  int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1078 
1079  /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1080  if (!is_64bit_tdesc ())
1081  {
1082  gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1083 
1084  if (direction == 0)
1085  compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1086  else
1087  siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1088 
1089  return 1;
1090  }
1091  /* No fixup for native x32 GDB. */
1092  else if (!is_elf64 && sizeof (void *) == 8)
1093  {
1094  gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1095 
1096  if (direction == 0)
1097  compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1098  native);
1099  else
1100  siginfo_from_compat_x32_siginfo (native,
1101  (struct compat_x32_siginfo *) inf);
1102 
1103  return 1;
1104  }
1105 #endif
1106 
1107  return 0;
1108 }
1109 
1110 static int use_xml;
1111 
1112 /* Format of XSAVE extended state is:
1113  struct
1114  {
1115  fxsave_bytes[0..463]
1116  sw_usable_bytes[464..511]
1117  xstate_hdr_bytes[512..575]
1118  avx_bytes[576..831]
1119  future_state etc
1120  };
1121 
1122  Same memory layout will be used for the coredump NT_X86_XSTATE
1123  representing the XSAVE extended state registers.
1124 
1125  The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1126  extended state mask, which is the same as the extended control register
1127  0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1128  together with the mask saved in the xstate_hdr_bytes to determine what
1129  states the processor/OS supports and what state, used or initialized,
1130  the process/thread is in. */
1131 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1132 
1133 /* Does the current host support the GETFPXREGS request? The header
1134  file may or may not define it, and even if it is defined, the
1135  kernel will return EIO if it's running on a pre-SSE processor. */
1136 int have_ptrace_getfpxregs =
1137 #ifdef HAVE_PTRACE_GETFPXREGS
1138  -1
1139 #else
1140  0
1141 #endif
1142 ;
1143 
1144 /* Does the current host support PTRACE_GETREGSET? */
1145 static int have_ptrace_getregset = -1;
1146 
1147 /* Get Linux/x86 target description from running target. */
1148 
1149 static const struct target_desc *
1151 {
1152  unsigned int machine;
1153  int is_elf64;
1154  int xcr0_features;
1155  int tid;
1156  static uint64_t xcr0;
1157  struct regset_info *regset;
1158 
1159  tid = lwpid_of (current_thread);
1160 
1161  is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1162 
1163  if (sizeof (void *) == 4)
1164  {
1165  if (is_elf64 > 0)
1166  error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1167 #ifndef __x86_64__
1168  else if (machine == EM_X86_64)
1169  error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1170 #endif
1171  }
1172 
1173 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1174  if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1175  {
1176  elf_fpxregset_t fpxregs;
1177 
1178  if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1179  {
1180  have_ptrace_getfpxregs = 0;
1181  have_ptrace_getregset = 0;
1182  return tdesc_i386_mmx_linux;
1183  }
1184  else
1185  have_ptrace_getfpxregs = 1;
1186  }
1187 #endif
1188 
1189  if (!use_xml)
1190  {
1192 
1193  /* Don't use XML. */
1194 #ifdef __x86_64__
1195  if (machine == EM_X86_64)
1196  return tdesc_amd64_linux_no_xml;
1197  else
1198 #endif
1199  return tdesc_i386_linux_no_xml;
1200  }
1201 
1202  if (have_ptrace_getregset == -1)
1203  {
1204  uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1205  struct iovec iov;
1206 
1207  iov.iov_base = xstateregs;
1208  iov.iov_len = sizeof (xstateregs);
1209 
1210  /* Check if PTRACE_GETREGSET works. */
1211  if (ptrace (PTRACE_GETREGSET, tid,
1212  (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1213  have_ptrace_getregset = 0;
1214  else
1215  {
1216  have_ptrace_getregset = 1;
1217 
1218  /* Get XCR0 from XSAVE extended state. */
1219  xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1220  / sizeof (uint64_t))];
1221 
1222  /* Use PTRACE_GETREGSET if it is available. */
1223  for (regset = x86_regsets;
1224  regset->fill_function != NULL; regset++)
1225  if (regset->get_request == PTRACE_GETREGSET)
1226  regset->size = X86_XSTATE_SIZE (xcr0);
1227  else if (regset->type != GENERAL_REGS)
1228  regset->size = 0;
1229  }
1230  }
1231 
1232  /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1233  xcr0_features = (have_ptrace_getregset
1234  && (xcr0 & X86_XSTATE_ALL_MASK));
1235 
1236  if (xcr0_features)
1237  x86_xcr0 = xcr0;
1238 
1239  if (machine == EM_X86_64)
1240  {
1241 #ifdef __x86_64__
1242  if (is_elf64)
1243  {
1244  if (xcr0_features)
1245  {
1246  switch (xcr0 & X86_XSTATE_ALL_MASK)
1247  {
1249  return tdesc_amd64_avx512_linux;
1250 
1251  case X86_XSTATE_MPX_MASK:
1252  return tdesc_amd64_mpx_linux;
1253 
1254  case X86_XSTATE_AVX_MASK:
1255  return tdesc_amd64_avx_linux;
1256 
1257  default:
1258  return tdesc_amd64_linux;
1259  }
1260  }
1261  else
1262  return tdesc_amd64_linux;
1263  }
1264  else
1265  {
1266  if (xcr0_features)
1267  {
1268  switch (xcr0 & X86_XSTATE_ALL_MASK)
1269  {
1271  return tdesc_x32_avx512_linux;
1272 
1273  case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1274  case X86_XSTATE_AVX_MASK:
1275  return tdesc_x32_avx_linux;
1276 
1277  default:
1278  return tdesc_x32_linux;
1279  }
1280  }
1281  else
1282  return tdesc_x32_linux;
1283  }
1284 #endif
1285  }
1286  else
1287  {
1288  if (xcr0_features)
1289  {
1290  switch (xcr0 & X86_XSTATE_ALL_MASK)
1291  {
1292  case (X86_XSTATE_AVX512_MASK):
1293  return tdesc_i386_avx512_linux;
1294 
1295  case (X86_XSTATE_MPX_MASK):
1296  return tdesc_i386_mpx_linux;
1297 
1298  case (X86_XSTATE_AVX_MASK):
1299  return tdesc_i386_avx_linux;
1300 
1301  default:
1302  return tdesc_i386_linux;
1303  }
1304  }
1305  else
1306  return tdesc_i386_linux;
1307  }
1308 
1309  gdb_assert_not_reached ("failed to return tdesc");
1310 }
1311 
1312 /* Callback for find_inferior. Stops iteration when a thread with a
1313  given PID is found. */
1314 
1315 static int
1316 same_process_callback (struct inferior_list_entry *entry, void *data)
1317 {
1318  int pid = *(int *) data;
1319 
1320  return (ptid_get_pid (entry->id) == pid);
1321 }
1322 
1323 /* Callback for for_each_inferior. Calls the arch_setup routine for
1324  each process. */
1325 
1326 static void
1328 {
1329  int pid = ptid_get_pid (entry->id);
1330 
1331  /* Look up any thread of this processes. */
1333  = (struct thread_info *) find_inferior (&all_threads,
1334  same_process_callback, &pid);
1335 
1336  the_low_target.arch_setup ();
1337 }
1338 
1339 /* Update all the target description of all processes; a new GDB
1340  connected, and it may or not support xml target descriptions. */
1341 
1342 static void
1344 {
1345  struct thread_info *saved_thread = current_thread;
1346 
1347  /* Before changing the register cache's internal layout, flush the
1348  contents of the current valid caches back to the threads, and
1349  release the current regcache objects. */
1350  regcache_release ();
1351 
1353 
1354  current_thread = saved_thread;
1355 }
1356 
1357 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1358  PTRACE_GETREGSET. */
1359 
1360 static void
1361 x86_linux_process_qsupported (const char *query)
1362 {
1363  /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1364  with "i386" in qSupported query, it supports x86 XML target
1365  descriptions. */
1366  use_xml = 0;
1367  if (query != NULL && startswith (query, "xmlRegisters="))
1368  {
1369  char *copy = xstrdup (query + 13);
1370  char *p;
1371 
1372  for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1373  {
1374  if (strcmp (p, "i386") == 0)
1375  {
1376  use_xml = 1;
1377  break;
1378  }
1379  }
1380 
1381  free (copy);
1382  }
1383 
1385 }
1386 
1387 /* Common for x86/x86-64. */
1388 
1389 static struct regsets_info x86_regsets_info =
1390  {
1391  x86_regsets, /* regsets */
1392  0, /* num_regsets */
1393  NULL, /* disabled_regsets */
1394  };
1395 
1396 #ifdef __x86_64__
1397 static struct regs_info amd64_linux_regs_info =
1398  {
1399  NULL, /* regset_bitmap */
1400  NULL, /* usrregs_info */
1401  &x86_regsets_info
1402  };
1403 #endif
1404 static struct usrregs_info i386_linux_usrregs_info =
1405  {
1406  I386_NUM_REGS,
1407  i386_regmap,
1408  };
1409 
1410 static struct regs_info i386_linux_regs_info =
1411  {
1412  NULL, /* regset_bitmap */
1414  &x86_regsets_info
1415  };
1416 
1417 const struct regs_info *
1419 {
1420 #ifdef __x86_64__
1421  if (is_64bit_tdesc ())
1422  return &amd64_linux_regs_info;
1423  else
1424 #endif
1425  return &i386_linux_regs_info;
1426 }
1427 
1428 /* Initialize the target description for the architecture of the
1429  inferior. */
1430 
1431 static void
1433 {
1435 }
1436 
1437 static int
1439 {
1440  return 1;
1441 }
1442 
1443 static void
1444 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1445 {
1446  write_inferior_memory (*to, buf, len);
1447  *to += len;
1448 }
1449 
1450 static int
1451 push_opcode (unsigned char *buf, char *op)
1452 {
1453  unsigned char *buf_org = buf;
1454 
1455  while (1)
1456  {
1457  char *endptr;
1458  unsigned long ul = strtoul (op, &endptr, 16);
1459 
1460  if (endptr == op)
1461  break;
1462 
1463  *buf++ = ul;
1464  op = endptr;
1465  }
1466 
1467  return buf - buf_org;
1468 }
1469 
1470 #ifdef __x86_64__
1471 
1472 /* Build a jump pad that saves registers and calls a collection
1473  function. Writes a jump instruction to the jump pad to
1474  JJUMPAD_INSN. The caller is responsible to write it in at the
1475  tracepoint address. */
1476 
1477 static int
1478 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1479  CORE_ADDR collector,
1480  CORE_ADDR lockaddr,
1481  ULONGEST orig_size,
1482  CORE_ADDR *jump_entry,
1483  CORE_ADDR *trampoline,
1484  ULONGEST *trampoline_size,
1485  unsigned char *jjump_pad_insn,
1486  ULONGEST *jjump_pad_insn_size,
1487  CORE_ADDR *adjusted_insn_addr,
1488  CORE_ADDR *adjusted_insn_addr_end,
1489  char *err)
1490 {
1491  unsigned char buf[40];
1492  int i, offset;
1493  int64_t loffset;
1494 
1495  CORE_ADDR buildaddr = *jump_entry;
1496 
1497  /* Build the jump pad. */
1498 
1499  /* First, do tracepoint data collection. Save registers. */
1500  i = 0;
1501  /* Need to ensure stack pointer saved first. */
1502  buf[i++] = 0x54; /* push %rsp */
1503  buf[i++] = 0x55; /* push %rbp */
1504  buf[i++] = 0x57; /* push %rdi */
1505  buf[i++] = 0x56; /* push %rsi */
1506  buf[i++] = 0x52; /* push %rdx */
1507  buf[i++] = 0x51; /* push %rcx */
1508  buf[i++] = 0x53; /* push %rbx */
1509  buf[i++] = 0x50; /* push %rax */
1510  buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1511  buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1512  buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1513  buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1514  buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1515  buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1516  buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1517  buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1518  buf[i++] = 0x9c; /* pushfq */
1519  buf[i++] = 0x48; /* movl <addr>,%rdi */
1520  buf[i++] = 0xbf;
1521  *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1522  i += sizeof (unsigned long);
1523  buf[i++] = 0x57; /* push %rdi */
1524  append_insns (&buildaddr, i, buf);
1525 
1526  /* Stack space for the collecting_t object. */
1527  i = 0;
1528  i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1529  i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1530  memcpy (buf + i, &tpoint, 8);
1531  i += 8;
1532  i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1533  i += push_opcode (&buf[i],
1534  "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1535  i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1536  append_insns (&buildaddr, i, buf);
1537 
1538  /* spin-lock. */
1539  i = 0;
1540  i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1541  memcpy (&buf[i], (void *) &lockaddr, 8);
1542  i += 8;
1543  i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1544  i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1545  i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1546  i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1547  i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1548  append_insns (&buildaddr, i, buf);
1549 
1550  /* Set up the gdb_collect call. */
1551  /* At this point, (stack pointer + 0x18) is the base of our saved
1552  register block. */
1553 
1554  i = 0;
1555  i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1556  i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1557 
1558  /* tpoint address may be 64-bit wide. */
1559  i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1560  memcpy (buf + i, &tpoint, 8);
1561  i += 8;
1562  append_insns (&buildaddr, i, buf);
1563 
1564  /* The collector function being in the shared library, may be
1565  >31-bits away off the jump pad. */
1566  i = 0;
1567  i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1568  memcpy (buf + i, &collector, 8);
1569  i += 8;
1570  i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1571  append_insns (&buildaddr, i, buf);
1572 
1573  /* Clear the spin-lock. */
1574  i = 0;
1575  i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1576  i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1577  memcpy (buf + i, &lockaddr, 8);
1578  i += 8;
1579  append_insns (&buildaddr, i, buf);
1580 
1581  /* Remove stack that had been used for the collect_t object. */
1582  i = 0;
1583  i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1584  append_insns (&buildaddr, i, buf);
1585 
1586  /* Restore register state. */
1587  i = 0;
1588  buf[i++] = 0x48; /* add $0x8,%rsp */
1589  buf[i++] = 0x83;
1590  buf[i++] = 0xc4;
1591  buf[i++] = 0x08;
1592  buf[i++] = 0x9d; /* popfq */
1593  buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1594  buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1595  buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1596  buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1597  buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1598  buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1599  buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1600  buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1601  buf[i++] = 0x58; /* pop %rax */
1602  buf[i++] = 0x5b; /* pop %rbx */
1603  buf[i++] = 0x59; /* pop %rcx */
1604  buf[i++] = 0x5a; /* pop %rdx */
1605  buf[i++] = 0x5e; /* pop %rsi */
1606  buf[i++] = 0x5f; /* pop %rdi */
1607  buf[i++] = 0x5d; /* pop %rbp */
1608  buf[i++] = 0x5c; /* pop %rsp */
1609  append_insns (&buildaddr, i, buf);
1610 
1611  /* Now, adjust the original instruction to execute in the jump
1612  pad. */
1613  *adjusted_insn_addr = buildaddr;
1614  relocate_instruction (&buildaddr, tpaddr);
1615  *adjusted_insn_addr_end = buildaddr;
1616 
1617  /* Finally, write a jump back to the program. */
1618 
1619  loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1620  if (loffset > INT_MAX || loffset < INT_MIN)
1621  {
1622  sprintf (err,
1623  "E.Jump back from jump pad too far from tracepoint "
1624  "(offset 0x%" PRIx64 " > int32).", loffset);
1625  return 1;
1626  }
1627 
1628  offset = (int) loffset;
1629  memcpy (buf, jump_insn, sizeof (jump_insn));
1630  memcpy (buf + 1, &offset, 4);
1631  append_insns (&buildaddr, sizeof (jump_insn), buf);
1632 
1633  /* The jump pad is now built. Wire in a jump to our jump pad. This
1634  is always done last (by our caller actually), so that we can
1635  install fast tracepoints with threads running. This relies on
1636  the agent's atomic write support. */
1637  loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1638  if (loffset > INT_MAX || loffset < INT_MIN)
1639  {
1640  sprintf (err,
1641  "E.Jump pad too far from tracepoint "
1642  "(offset 0x%" PRIx64 " > int32).", loffset);
1643  return 1;
1644  }
1645 
1646  offset = (int) loffset;
1647 
1648  memcpy (buf, jump_insn, sizeof (jump_insn));
1649  memcpy (buf + 1, &offset, 4);
1650  memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1651  *jjump_pad_insn_size = sizeof (jump_insn);
1652 
1653  /* Return the end address of our pad. */
1654  *jump_entry = buildaddr;
1655 
1656  return 0;
1657 }
1658 
1659 #endif /* __x86_64__ */
1660 
1661 /* Build a jump pad that saves registers and calls a collection
1662  function. Writes a jump instruction to the jump pad to
1663  JJUMPAD_INSN. The caller is responsible to write it in at the
1664  tracepoint address. */
1665 
1666 static int
1668  CORE_ADDR collector,
1669  CORE_ADDR lockaddr,
1670  ULONGEST orig_size,
1671  CORE_ADDR *jump_entry,
1672  CORE_ADDR *trampoline,
1673  ULONGEST *trampoline_size,
1674  unsigned char *jjump_pad_insn,
1675  ULONGEST *jjump_pad_insn_size,
1676  CORE_ADDR *adjusted_insn_addr,
1677  CORE_ADDR *adjusted_insn_addr_end,
1678  char *err)
1679 {
1680  unsigned char buf[0x100];
1681  int i, offset;
1682  CORE_ADDR buildaddr = *jump_entry;
1683 
1684  /* Build the jump pad. */
1685 
1686  /* First, do tracepoint data collection. Save registers. */
1687  i = 0;
1688  buf[i++] = 0x60; /* pushad */
1689  buf[i++] = 0x68; /* push tpaddr aka $pc */
1690  *((int *)(buf + i)) = (int) tpaddr;
1691  i += 4;
1692  buf[i++] = 0x9c; /* pushf */
1693  buf[i++] = 0x1e; /* push %ds */
1694  buf[i++] = 0x06; /* push %es */
1695  buf[i++] = 0x0f; /* push %fs */
1696  buf[i++] = 0xa0;
1697  buf[i++] = 0x0f; /* push %gs */
1698  buf[i++] = 0xa8;
1699  buf[i++] = 0x16; /* push %ss */
1700  buf[i++] = 0x0e; /* push %cs */
1701  append_insns (&buildaddr, i, buf);
1702 
1703  /* Stack space for the collecting_t object. */
1704  i = 0;
1705  i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1706 
1707  /* Build the object. */
1708  i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1709  memcpy (buf + i, &tpoint, 4);
1710  i += 4;
1711  i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1712 
1713  i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1714  i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1715  append_insns (&buildaddr, i, buf);
1716 
1717  /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1718  If we cared for it, this could be using xchg alternatively. */
1719 
1720  i = 0;
1721  i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1722  i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1723  %esp,<lockaddr> */
1724  memcpy (&buf[i], (void *) &lockaddr, 4);
1725  i += 4;
1726  i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1727  i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1728  append_insns (&buildaddr, i, buf);
1729 
1730 
1731  /* Set up arguments to the gdb_collect call. */
1732  i = 0;
1733  i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1734  i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1735  i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1736  append_insns (&buildaddr, i, buf);
1737 
1738  i = 0;
1739  i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1740  append_insns (&buildaddr, i, buf);
1741 
1742  i = 0;
1743  i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1744  memcpy (&buf[i], (void *) &tpoint, 4);
1745  i += 4;
1746  append_insns (&buildaddr, i, buf);
1747 
1748  buf[0] = 0xe8; /* call <reladdr> */
1749  offset = collector - (buildaddr + sizeof (jump_insn));
1750  memcpy (buf + 1, &offset, 4);
1751  append_insns (&buildaddr, 5, buf);
1752  /* Clean up after the call. */
1753  buf[0] = 0x83; /* add $0x8,%esp */
1754  buf[1] = 0xc4;
1755  buf[2] = 0x08;
1756  append_insns (&buildaddr, 3, buf);
1757 
1758 
1759  /* Clear the spin-lock. This would need the LOCK prefix on older
1760  broken archs. */
1761  i = 0;
1762  i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1763  i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1764  memcpy (buf + i, &lockaddr, 4);
1765  i += 4;
1766  append_insns (&buildaddr, i, buf);
1767 
1768 
1769  /* Remove stack that had been used for the collect_t object. */
1770  i = 0;
1771  i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1772  append_insns (&buildaddr, i, buf);
1773 
1774  i = 0;
1775  buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1776  buf[i++] = 0xc4;
1777  buf[i++] = 0x04;
1778  buf[i++] = 0x17; /* pop %ss */
1779  buf[i++] = 0x0f; /* pop %gs */
1780  buf[i++] = 0xa9;
1781  buf[i++] = 0x0f; /* pop %fs */
1782  buf[i++] = 0xa1;
1783  buf[i++] = 0x07; /* pop %es */
1784  buf[i++] = 0x1f; /* pop %ds */
1785  buf[i++] = 0x9d; /* popf */
1786  buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1787  buf[i++] = 0xc4;
1788  buf[i++] = 0x04;
1789  buf[i++] = 0x61; /* popad */
1790  append_insns (&buildaddr, i, buf);
1791 
1792  /* Now, adjust the original instruction to execute in the jump
1793  pad. */
1794  *adjusted_insn_addr = buildaddr;
1795  relocate_instruction (&buildaddr, tpaddr);
1796  *adjusted_insn_addr_end = buildaddr;
1797 
1798  /* Write the jump back to the program. */
1799  offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1800  memcpy (buf, jump_insn, sizeof (jump_insn));
1801  memcpy (buf + 1, &offset, 4);
1802  append_insns (&buildaddr, sizeof (jump_insn), buf);
1803 
1804  /* The jump pad is now built. Wire in a jump to our jump pad. This
1805  is always done last (by our caller actually), so that we can
1806  install fast tracepoints with threads running. This relies on
1807  the agent's atomic write support. */
1808  if (orig_size == 4)
1809  {
1810  /* Create a trampoline. */
1811  *trampoline_size = sizeof (jump_insn);
1812  if (!claim_trampoline_space (*trampoline_size, trampoline))
1813  {
1814  /* No trampoline space available. */
1815  strcpy (err,
1816  "E.Cannot allocate trampoline space needed for fast "
1817  "tracepoints on 4-byte instructions.");
1818  return 1;
1819  }
1820 
1821  offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1822  memcpy (buf, jump_insn, sizeof (jump_insn));
1823  memcpy (buf + 1, &offset, 4);
1824  write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1825 
1826  /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1827  offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1828  memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1829  memcpy (buf + 2, &offset, 2);
1830  memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1831  *jjump_pad_insn_size = sizeof (small_jump_insn);
1832  }
1833  else
1834  {
1835  /* Else use a 32-bit relative jump instruction. */
1836  offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1837  memcpy (buf, jump_insn, sizeof (jump_insn));
1838  memcpy (buf + 1, &offset, 4);
1839  memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1840  *jjump_pad_insn_size = sizeof (jump_insn);
1841  }
1842 
1843  /* Return the end address of our pad. */
1844  *jump_entry = buildaddr;
1845 
1846  return 0;
1847 }
1848 
1849 static int
1851  CORE_ADDR collector,
1852  CORE_ADDR lockaddr,
1853  ULONGEST orig_size,
1854  CORE_ADDR *jump_entry,
1855  CORE_ADDR *trampoline,
1856  ULONGEST *trampoline_size,
1857  unsigned char *jjump_pad_insn,
1858  ULONGEST *jjump_pad_insn_size,
1859  CORE_ADDR *adjusted_insn_addr,
1860  CORE_ADDR *adjusted_insn_addr_end,
1861  char *err)
1862 {
1863 #ifdef __x86_64__
1864  if (is_64bit_tdesc ())
1865  return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1866  collector, lockaddr,
1867  orig_size, jump_entry,
1868  trampoline, trampoline_size,
1869  jjump_pad_insn,
1870  jjump_pad_insn_size,
1871  adjusted_insn_addr,
1872  adjusted_insn_addr_end,
1873  err);
1874 #endif
1875 
1876  return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1877  collector, lockaddr,
1878  orig_size, jump_entry,
1879  trampoline, trampoline_size,
1880  jjump_pad_insn,
1881  jjump_pad_insn_size,
1882  adjusted_insn_addr,
1883  adjusted_insn_addr_end,
1884  err);
1885 }
1886 
1887 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1888  architectures. */
1889 
1890 static int
1892 {
1893  static int warned_about_fast_tracepoints = 0;
1894 
1895 #ifdef __x86_64__
1896  /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1897  used for fast tracepoints. */
1898  if (is_64bit_tdesc ())
1899  return 5;
1900 #endif
1901 
1902  if (agent_loaded_p ())
1903  {
1904  char errbuf[IPA_BUFSIZ];
1905 
1906  errbuf[0] = '\0';
1907 
1908  /* On x86, if trampolines are available, then 4-byte jump instructions
1909  with a 2-byte offset may be used, otherwise 5-byte jump instructions
1910  with a 4-byte offset are used instead. */
1912  return 4;
1913  else
1914  {
1915  /* GDB has no channel to explain to user why a shorter fast
1916  tracepoint is not possible, but at least make GDBserver
1917  mention that something has gone awry. */
1918  if (!warned_about_fast_tracepoints)
1919  {
1920  warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1921  warned_about_fast_tracepoints = 1;
1922  }
1923  return 5;
1924  }
1925  }
1926  else
1927  {
1928  /* Indicate that the minimum length is currently unknown since the IPA
1929  has not loaded yet. */
1930  return 0;
1931  }
1932 }
1933 
1934 static void
1935 add_insns (unsigned char *start, int len)
1936 {
1937  CORE_ADDR buildaddr = current_insn_ptr;
1938 
1939  if (debug_threads)
1940  debug_printf ("Adding %d bytes of insn at %s\n",
1941  len, paddress (buildaddr));
1942 
1943  append_insns (&buildaddr, len, start);
1944  current_insn_ptr = buildaddr;
1945 }
1946 
1947 /* Our general strategy for emitting code is to avoid specifying raw
1948  bytes whenever possible, and instead copy a block of inline asm
1949  that is embedded in the function. This is a little messy, because
1950  we need to keep the compiler from discarding what looks like dead
1951  code, plus suppress various warnings. */
1952 
1953 #define EMIT_ASM(NAME, INSNS) \
1954  do \
1955  { \
1956  extern unsigned char start_ ## NAME, end_ ## NAME; \
1957  add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1958  __asm__ ("jmp end_" #NAME "\n" \
1959  "\t" "start_" #NAME ":" \
1960  "\t" INSNS "\n" \
1961  "\t" "end_" #NAME ":"); \
1962  } while (0)
1963 
1964 #ifdef __x86_64__
1965 
1966 #define EMIT_ASM32(NAME,INSNS) \
1967  do \
1968  { \
1969  extern unsigned char start_ ## NAME, end_ ## NAME; \
1970  add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1971  __asm__ (".code32\n" \
1972  "\t" "jmp end_" #NAME "\n" \
1973  "\t" "start_" #NAME ":\n" \
1974  "\t" INSNS "\n" \
1975  "\t" "end_" #NAME ":\n" \
1976  ".code64\n"); \
1977  } while (0)
1978 
1979 #else
1980 
1981 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1982 
1983 #endif
1984 
1985 #ifdef __x86_64__
1986 
1987 static void
1988 amd64_emit_prologue (void)
1989 {
1990  EMIT_ASM (amd64_prologue,
1991  "pushq %rbp\n\t"
1992  "movq %rsp,%rbp\n\t"
1993  "sub $0x20,%rsp\n\t"
1994  "movq %rdi,-8(%rbp)\n\t"
1995  "movq %rsi,-16(%rbp)");
1996 }
1997 
1998 
1999 static void
2000 amd64_emit_epilogue (void)
2001 {
2002  EMIT_ASM (amd64_epilogue,
2003  "movq -16(%rbp),%rdi\n\t"
2004  "movq %rax,(%rdi)\n\t"
2005  "xor %rax,%rax\n\t"
2006  "leave\n\t"
2007  "ret");
2008 }
2009 
2010 static void
2011 amd64_emit_add (void)
2012 {
2013  EMIT_ASM (amd64_add,
2014  "add (%rsp),%rax\n\t"
2015  "lea 0x8(%rsp),%rsp");
2016 }
2017 
2018 static void
2019 amd64_emit_sub (void)
2020 {
2021  EMIT_ASM (amd64_sub,
2022  "sub %rax,(%rsp)\n\t"
2023  "pop %rax");
2024 }
2025 
2026 static void
2027 amd64_emit_mul (void)
2028 {
2029  emit_error = 1;
2030 }
2031 
2032 static void
2033 amd64_emit_lsh (void)
2034 {
2035  emit_error = 1;
2036 }
2037 
2038 static void
2039 amd64_emit_rsh_signed (void)
2040 {
2041  emit_error = 1;
2042 }
2043 
2044 static void
2045 amd64_emit_rsh_unsigned (void)
2046 {
2047  emit_error = 1;
2048 }
2049 
2050 static void
2051 amd64_emit_ext (int arg)
2052 {
2053  switch (arg)
2054  {
2055  case 8:
2056  EMIT_ASM (amd64_ext_8,
2057  "cbtw\n\t"
2058  "cwtl\n\t"
2059  "cltq");
2060  break;
2061  case 16:
2062  EMIT_ASM (amd64_ext_16,
2063  "cwtl\n\t"
2064  "cltq");
2065  break;
2066  case 32:
2067  EMIT_ASM (amd64_ext_32,
2068  "cltq");
2069  break;
2070  default:
2071  emit_error = 1;
2072  }
2073 }
2074 
2075 static void
2076 amd64_emit_log_not (void)
2077 {
2078  EMIT_ASM (amd64_log_not,
2079  "test %rax,%rax\n\t"
2080  "sete %cl\n\t"
2081  "movzbq %cl,%rax");
2082 }
2083 
2084 static void
2085 amd64_emit_bit_and (void)
2086 {
2087  EMIT_ASM (amd64_and,
2088  "and (%rsp),%rax\n\t"
2089  "lea 0x8(%rsp),%rsp");
2090 }
2091 
2092 static void
2093 amd64_emit_bit_or (void)
2094 {
2095  EMIT_ASM (amd64_or,
2096  "or (%rsp),%rax\n\t"
2097  "lea 0x8(%rsp),%rsp");
2098 }
2099 
2100 static void
2101 amd64_emit_bit_xor (void)
2102 {
2103  EMIT_ASM (amd64_xor,
2104  "xor (%rsp),%rax\n\t"
2105  "lea 0x8(%rsp),%rsp");
2106 }
2107 
2108 static void
2109 amd64_emit_bit_not (void)
2110 {
2111  EMIT_ASM (amd64_bit_not,
2112  "xorq $0xffffffffffffffff,%rax");
2113 }
2114 
2115 static void
2116 amd64_emit_equal (void)
2117 {
2118  EMIT_ASM (amd64_equal,
2119  "cmp %rax,(%rsp)\n\t"
2120  "je .Lamd64_equal_true\n\t"
2121  "xor %rax,%rax\n\t"
2122  "jmp .Lamd64_equal_end\n\t"
2123  ".Lamd64_equal_true:\n\t"
2124  "mov $0x1,%rax\n\t"
2125  ".Lamd64_equal_end:\n\t"
2126  "lea 0x8(%rsp),%rsp");
2127 }
2128 
2129 static void
2130 amd64_emit_less_signed (void)
2131 {
2132  EMIT_ASM (amd64_less_signed,
2133  "cmp %rax,(%rsp)\n\t"
2134  "jl .Lamd64_less_signed_true\n\t"
2135  "xor %rax,%rax\n\t"
2136  "jmp .Lamd64_less_signed_end\n\t"
2137  ".Lamd64_less_signed_true:\n\t"
2138  "mov $1,%rax\n\t"
2139  ".Lamd64_less_signed_end:\n\t"
2140  "lea 0x8(%rsp),%rsp");
2141 }
2142 
2143 static void
2144 amd64_emit_less_unsigned (void)
2145 {
2146  EMIT_ASM (amd64_less_unsigned,
2147  "cmp %rax,(%rsp)\n\t"
2148  "jb .Lamd64_less_unsigned_true\n\t"
2149  "xor %rax,%rax\n\t"
2150  "jmp .Lamd64_less_unsigned_end\n\t"
2151  ".Lamd64_less_unsigned_true:\n\t"
2152  "mov $1,%rax\n\t"
2153  ".Lamd64_less_unsigned_end:\n\t"
2154  "lea 0x8(%rsp),%rsp");
2155 }
2156 
2157 static void
2158 amd64_emit_ref (int size)
2159 {
2160  switch (size)
2161  {
2162  case 1:
2163  EMIT_ASM (amd64_ref1,
2164  "movb (%rax),%al");
2165  break;
2166  case 2:
2167  EMIT_ASM (amd64_ref2,
2168  "movw (%rax),%ax");
2169  break;
2170  case 4:
2171  EMIT_ASM (amd64_ref4,
2172  "movl (%rax),%eax");
2173  break;
2174  case 8:
2175  EMIT_ASM (amd64_ref8,
2176  "movq (%rax),%rax");
2177  break;
2178  }
2179 }
2180 
2181 static void
2182 amd64_emit_if_goto (int *offset_p, int *size_p)
2183 {
2184  EMIT_ASM (amd64_if_goto,
2185  "mov %rax,%rcx\n\t"
2186  "pop %rax\n\t"
2187  "cmp $0,%rcx\n\t"
2188  ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2189  if (offset_p)
2190  *offset_p = 10;
2191  if (size_p)
2192  *size_p = 4;
2193 }
2194 
2195 static void
2196 amd64_emit_goto (int *offset_p, int *size_p)
2197 {
2198  EMIT_ASM (amd64_goto,
2199  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2200  if (offset_p)
2201  *offset_p = 1;
2202  if (size_p)
2203  *size_p = 4;
2204 }
2205 
2206 static void
2207 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2208 {
2209  int diff = (to - (from + size));
2210  unsigned char buf[sizeof (int)];
2211 
2212  if (size != 4)
2213  {
2214  emit_error = 1;
2215  return;
2216  }
2217 
2218  memcpy (buf, &diff, sizeof (int));
2219  write_inferior_memory (from, buf, sizeof (int));
2220 }
2221 
2222 static void
2223 amd64_emit_const (LONGEST num)
2224 {
2225  unsigned char buf[16];
2226  int i;
2227  CORE_ADDR buildaddr = current_insn_ptr;
2228 
2229  i = 0;
2230  buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2231  memcpy (&buf[i], &num, sizeof (num));
2232  i += 8;
2233  append_insns (&buildaddr, i, buf);
2234  current_insn_ptr = buildaddr;
2235 }
2236 
2237 static void
2238 amd64_emit_call (CORE_ADDR fn)
2239 {
2240  unsigned char buf[16];
2241  int i;
2242  CORE_ADDR buildaddr;
2243  LONGEST offset64;
2244 
2245  /* The destination function being in the shared library, may be
2246  >31-bits away off the compiled code pad. */
2247 
2248  buildaddr = current_insn_ptr;
2249 
2250  offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2251 
2252  i = 0;
2253 
2254  if (offset64 > INT_MAX || offset64 < INT_MIN)
2255  {
2256  /* Offset is too large for a call. Use callq, but that requires
2257  a register, so avoid it if possible. Use r10, since it is
2258  call-clobbered, we don't have to push/pop it. */
2259  buf[i++] = 0x48; /* mov $fn,%r10 */
2260  buf[i++] = 0xba;
2261  memcpy (buf + i, &fn, 8);
2262  i += 8;
2263  buf[i++] = 0xff; /* callq *%r10 */
2264  buf[i++] = 0xd2;
2265  }
2266  else
2267  {
2268  int offset32 = offset64; /* we know we can't overflow here. */
2269  memcpy (buf + i, &offset32, 4);
2270  i += 4;
2271  }
2272 
2273  append_insns (&buildaddr, i, buf);
2274  current_insn_ptr = buildaddr;
2275 }
2276 
2277 static void
2278 amd64_emit_reg (int reg)
2279 {
2280  unsigned char buf[16];
2281  int i;
2282  CORE_ADDR buildaddr;
2283 
2284  /* Assume raw_regs is still in %rdi. */
2285  buildaddr = current_insn_ptr;
2286  i = 0;
2287  buf[i++] = 0xbe; /* mov $<n>,%esi */
2288  memcpy (&buf[i], &reg, sizeof (reg));
2289  i += 4;
2290  append_insns (&buildaddr, i, buf);
2291  current_insn_ptr = buildaddr;
2292  amd64_emit_call (get_raw_reg_func_addr ());
2293 }
2294 
2295 static void
2296 amd64_emit_pop (void)
2297 {
2298  EMIT_ASM (amd64_pop,
2299  "pop %rax");
2300 }
2301 
2302 static void
2303 amd64_emit_stack_flush (void)
2304 {
2305  EMIT_ASM (amd64_stack_flush,
2306  "push %rax");
2307 }
2308 
2309 static void
2310 amd64_emit_zero_ext (int arg)
2311 {
2312  switch (arg)
2313  {
2314  case 8:
2315  EMIT_ASM (amd64_zero_ext_8,
2316  "and $0xff,%rax");
2317  break;
2318  case 16:
2319  EMIT_ASM (amd64_zero_ext_16,
2320  "and $0xffff,%rax");
2321  break;
2322  case 32:
2323  EMIT_ASM (amd64_zero_ext_32,
2324  "mov $0xffffffff,%rcx\n\t"
2325  "and %rcx,%rax");
2326  break;
2327  default:
2328  emit_error = 1;
2329  }
2330 }
2331 
2332 static void
2333 amd64_emit_swap (void)
2334 {
2335  EMIT_ASM (amd64_swap,
2336  "mov %rax,%rcx\n\t"
2337  "pop %rax\n\t"
2338  "push %rcx");
2339 }
2340 
2341 static void
2342 amd64_emit_stack_adjust (int n)
2343 {
2344  unsigned char buf[16];
2345  int i;
2346  CORE_ADDR buildaddr = current_insn_ptr;
2347 
2348  i = 0;
2349  buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2350  buf[i++] = 0x8d;
2351  buf[i++] = 0x64;
2352  buf[i++] = 0x24;
2353  /* This only handles adjustments up to 16, but we don't expect any more. */
2354  buf[i++] = n * 8;
2355  append_insns (&buildaddr, i, buf);
2356  current_insn_ptr = buildaddr;
2357 }
2358 
2359 /* FN's prototype is `LONGEST(*fn)(int)'. */
2360 
2361 static void
2362 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2363 {
2364  unsigned char buf[16];
2365  int i;
2366  CORE_ADDR buildaddr;
2367 
2368  buildaddr = current_insn_ptr;
2369  i = 0;
2370  buf[i++] = 0xbf; /* movl $<n>,%edi */
2371  memcpy (&buf[i], &arg1, sizeof (arg1));
2372  i += 4;
2373  append_insns (&buildaddr, i, buf);
2374  current_insn_ptr = buildaddr;
2375  amd64_emit_call (fn);
2376 }
2377 
2378 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2379 
2380 static void
2381 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2382 {
2383  unsigned char buf[16];
2384  int i;
2385  CORE_ADDR buildaddr;
2386 
2387  buildaddr = current_insn_ptr;
2388  i = 0;
2389  buf[i++] = 0xbf; /* movl $<n>,%edi */
2390  memcpy (&buf[i], &arg1, sizeof (arg1));
2391  i += 4;
2392  append_insns (&buildaddr, i, buf);
2393  current_insn_ptr = buildaddr;
2394  EMIT_ASM (amd64_void_call_2_a,
2395  /* Save away a copy of the stack top. */
2396  "push %rax\n\t"
2397  /* Also pass top as the second argument. */
2398  "mov %rax,%rsi");
2399  amd64_emit_call (fn);
2400  EMIT_ASM (amd64_void_call_2_b,
2401  /* Restore the stack top, %rax may have been trashed. */
2402  "pop %rax");
2403 }
2404 
2405 void
2406 amd64_emit_eq_goto (int *offset_p, int *size_p)
2407 {
2408  EMIT_ASM (amd64_eq,
2409  "cmp %rax,(%rsp)\n\t"
2410  "jne .Lamd64_eq_fallthru\n\t"
2411  "lea 0x8(%rsp),%rsp\n\t"
2412  "pop %rax\n\t"
2413  /* jmp, but don't trust the assembler to choose the right jump */
2414  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2415  ".Lamd64_eq_fallthru:\n\t"
2416  "lea 0x8(%rsp),%rsp\n\t"
2417  "pop %rax");
2418 
2419  if (offset_p)
2420  *offset_p = 13;
2421  if (size_p)
2422  *size_p = 4;
2423 }
2424 
2425 void
2426 amd64_emit_ne_goto (int *offset_p, int *size_p)
2427 {
2428  EMIT_ASM (amd64_ne,
2429  "cmp %rax,(%rsp)\n\t"
2430  "je .Lamd64_ne_fallthru\n\t"
2431  "lea 0x8(%rsp),%rsp\n\t"
2432  "pop %rax\n\t"
2433  /* jmp, but don't trust the assembler to choose the right jump */
2434  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2435  ".Lamd64_ne_fallthru:\n\t"
2436  "lea 0x8(%rsp),%rsp\n\t"
2437  "pop %rax");
2438 
2439  if (offset_p)
2440  *offset_p = 13;
2441  if (size_p)
2442  *size_p = 4;
2443 }
2444 
2445 void
2446 amd64_emit_lt_goto (int *offset_p, int *size_p)
2447 {
2448  EMIT_ASM (amd64_lt,
2449  "cmp %rax,(%rsp)\n\t"
2450  "jnl .Lamd64_lt_fallthru\n\t"
2451  "lea 0x8(%rsp),%rsp\n\t"
2452  "pop %rax\n\t"
2453  /* jmp, but don't trust the assembler to choose the right jump */
2454  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2455  ".Lamd64_lt_fallthru:\n\t"
2456  "lea 0x8(%rsp),%rsp\n\t"
2457  "pop %rax");
2458 
2459  if (offset_p)
2460  *offset_p = 13;
2461  if (size_p)
2462  *size_p = 4;
2463 }
2464 
2465 void
2466 amd64_emit_le_goto (int *offset_p, int *size_p)
2467 {
2468  EMIT_ASM (amd64_le,
2469  "cmp %rax,(%rsp)\n\t"
2470  "jnle .Lamd64_le_fallthru\n\t"
2471  "lea 0x8(%rsp),%rsp\n\t"
2472  "pop %rax\n\t"
2473  /* jmp, but don't trust the assembler to choose the right jump */
2474  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2475  ".Lamd64_le_fallthru:\n\t"
2476  "lea 0x8(%rsp),%rsp\n\t"
2477  "pop %rax");
2478 
2479  if (offset_p)
2480  *offset_p = 13;
2481  if (size_p)
2482  *size_p = 4;
2483 }
2484 
2485 void
2486 amd64_emit_gt_goto (int *offset_p, int *size_p)
2487 {
2488  EMIT_ASM (amd64_gt,
2489  "cmp %rax,(%rsp)\n\t"
2490  "jng .Lamd64_gt_fallthru\n\t"
2491  "lea 0x8(%rsp),%rsp\n\t"
2492  "pop %rax\n\t"
2493  /* jmp, but don't trust the assembler to choose the right jump */
2494  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2495  ".Lamd64_gt_fallthru:\n\t"
2496  "lea 0x8(%rsp),%rsp\n\t"
2497  "pop %rax");
2498 
2499  if (offset_p)
2500  *offset_p = 13;
2501  if (size_p)
2502  *size_p = 4;
2503 }
2504 
2505 void
2506 amd64_emit_ge_goto (int *offset_p, int *size_p)
2507 {
2508  EMIT_ASM (amd64_ge,
2509  "cmp %rax,(%rsp)\n\t"
2510  "jnge .Lamd64_ge_fallthru\n\t"
2511  ".Lamd64_ge_jump:\n\t"
2512  "lea 0x8(%rsp),%rsp\n\t"
2513  "pop %rax\n\t"
2514  /* jmp, but don't trust the assembler to choose the right jump */
2515  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2516  ".Lamd64_ge_fallthru:\n\t"
2517  "lea 0x8(%rsp),%rsp\n\t"
2518  "pop %rax");
2519 
2520  if (offset_p)
2521  *offset_p = 13;
2522  if (size_p)
2523  *size_p = 4;
2524 }
2525 
2526 struct emit_ops amd64_emit_ops =
2527  {
2528  amd64_emit_prologue,
2529  amd64_emit_epilogue,
2530  amd64_emit_add,
2531  amd64_emit_sub,
2532  amd64_emit_mul,
2533  amd64_emit_lsh,
2534  amd64_emit_rsh_signed,
2535  amd64_emit_rsh_unsigned,
2536  amd64_emit_ext,
2537  amd64_emit_log_not,
2538  amd64_emit_bit_and,
2539  amd64_emit_bit_or,
2540  amd64_emit_bit_xor,
2541  amd64_emit_bit_not,
2542  amd64_emit_equal,
2543  amd64_emit_less_signed,
2544  amd64_emit_less_unsigned,
2545  amd64_emit_ref,
2546  amd64_emit_if_goto,
2547  amd64_emit_goto,
2548  amd64_write_goto_address,
2549  amd64_emit_const,
2550  amd64_emit_call,
2551  amd64_emit_reg,
2552  amd64_emit_pop,
2553  amd64_emit_stack_flush,
2554  amd64_emit_zero_ext,
2555  amd64_emit_swap,
2556  amd64_emit_stack_adjust,
2557  amd64_emit_int_call_1,
2558  amd64_emit_void_call_2,
2559  amd64_emit_eq_goto,
2560  amd64_emit_ne_goto,
2561  amd64_emit_lt_goto,
2562  amd64_emit_le_goto,
2563  amd64_emit_gt_goto,
2564  amd64_emit_ge_goto
2565  };
2566 
2567 #endif /* __x86_64__ */
2568 
2569 static void
2571 {
2572  EMIT_ASM32 (i386_prologue,
2573  "push %ebp\n\t"
2574  "mov %esp,%ebp\n\t"
2575  "push %ebx");
2576  /* At this point, the raw regs base address is at 8(%ebp), and the
2577  value pointer is at 12(%ebp). */
2578 }
2579 
2580 static void
2582 {
2583  EMIT_ASM32 (i386_epilogue,
2584  "mov 12(%ebp),%ecx\n\t"
2585  "mov %eax,(%ecx)\n\t"
2586  "mov %ebx,0x4(%ecx)\n\t"
2587  "xor %eax,%eax\n\t"
2588  "pop %ebx\n\t"
2589  "pop %ebp\n\t"
2590  "ret");
2591 }
2592 
2593 static void
2595 {
2596  EMIT_ASM32 (i386_add,
2597  "add (%esp),%eax\n\t"
2598  "adc 0x4(%esp),%ebx\n\t"
2599  "lea 0x8(%esp),%esp");
2600 }
2601 
2602 static void
2604 {
2605  EMIT_ASM32 (i386_sub,
2606  "subl %eax,(%esp)\n\t"
2607  "sbbl %ebx,4(%esp)\n\t"
2608  "pop %eax\n\t"
2609  "pop %ebx\n\t");
2610 }
2611 
2612 static void
2614 {
2615  emit_error = 1;
2616 }
2617 
2618 static void
2620 {
2621  emit_error = 1;
2622 }
2623 
2624 static void
2626 {
2627  emit_error = 1;
2628 }
2629 
2630 static void
2632 {
2633  emit_error = 1;
2634 }
2635 
2636 static void
2637 i386_emit_ext (int arg)
2638 {
2639  switch (arg)
2640  {
2641  case 8:
2642  EMIT_ASM32 (i386_ext_8,
2643  "cbtw\n\t"
2644  "cwtl\n\t"
2645  "movl %eax,%ebx\n\t"
2646  "sarl $31,%ebx");
2647  break;
2648  case 16:
2649  EMIT_ASM32 (i386_ext_16,
2650  "cwtl\n\t"
2651  "movl %eax,%ebx\n\t"
2652  "sarl $31,%ebx");
2653  break;
2654  case 32:
2655  EMIT_ASM32 (i386_ext_32,
2656  "movl %eax,%ebx\n\t"
2657  "sarl $31,%ebx");
2658  break;
2659  default:
2660  emit_error = 1;
2661  }
2662 }
2663 
2664 static void
2666 {
2667  EMIT_ASM32 (i386_log_not,
2668  "or %ebx,%eax\n\t"
2669  "test %eax,%eax\n\t"
2670  "sete %cl\n\t"
2671  "xor %ebx,%ebx\n\t"
2672  "movzbl %cl,%eax");
2673 }
2674 
2675 static void
2677 {
2678  EMIT_ASM32 (i386_and,
2679  "and (%esp),%eax\n\t"
2680  "and 0x4(%esp),%ebx\n\t"
2681  "lea 0x8(%esp),%esp");
2682 }
2683 
2684 static void
2686 {
2687  EMIT_ASM32 (i386_or,
2688  "or (%esp),%eax\n\t"
2689  "or 0x4(%esp),%ebx\n\t"
2690  "lea 0x8(%esp),%esp");
2691 }
2692 
2693 static void
2695 {
2696  EMIT_ASM32 (i386_xor,
2697  "xor (%esp),%eax\n\t"
2698  "xor 0x4(%esp),%ebx\n\t"
2699  "lea 0x8(%esp),%esp");
2700 }
2701 
2702 static void
2704 {
2705  EMIT_ASM32 (i386_bit_not,
2706  "xor $0xffffffff,%eax\n\t"
2707  "xor $0xffffffff,%ebx\n\t");
2708 }
2709 
2710 static void
2712 {
2713  EMIT_ASM32 (i386_equal,
2714  "cmpl %ebx,4(%esp)\n\t"
2715  "jne .Li386_equal_false\n\t"
2716  "cmpl %eax,(%esp)\n\t"
2717  "je .Li386_equal_true\n\t"
2718  ".Li386_equal_false:\n\t"
2719  "xor %eax,%eax\n\t"
2720  "jmp .Li386_equal_end\n\t"
2721  ".Li386_equal_true:\n\t"
2722  "mov $1,%eax\n\t"
2723  ".Li386_equal_end:\n\t"
2724  "xor %ebx,%ebx\n\t"
2725  "lea 0x8(%esp),%esp");
2726 }
2727 
2728 static void
2730 {
2731  EMIT_ASM32 (i386_less_signed,
2732  "cmpl %ebx,4(%esp)\n\t"
2733  "jl .Li386_less_signed_true\n\t"
2734  "jne .Li386_less_signed_false\n\t"
2735  "cmpl %eax,(%esp)\n\t"
2736  "jl .Li386_less_signed_true\n\t"
2737  ".Li386_less_signed_false:\n\t"
2738  "xor %eax,%eax\n\t"
2739  "jmp .Li386_less_signed_end\n\t"
2740  ".Li386_less_signed_true:\n\t"
2741  "mov $1,%eax\n\t"
2742  ".Li386_less_signed_end:\n\t"
2743  "xor %ebx,%ebx\n\t"
2744  "lea 0x8(%esp),%esp");
2745 }
2746 
2747 static void
2749 {
2750  EMIT_ASM32 (i386_less_unsigned,
2751  "cmpl %ebx,4(%esp)\n\t"
2752  "jb .Li386_less_unsigned_true\n\t"
2753  "jne .Li386_less_unsigned_false\n\t"
2754  "cmpl %eax,(%esp)\n\t"
2755  "jb .Li386_less_unsigned_true\n\t"
2756  ".Li386_less_unsigned_false:\n\t"
2757  "xor %eax,%eax\n\t"
2758  "jmp .Li386_less_unsigned_end\n\t"
2759  ".Li386_less_unsigned_true:\n\t"
2760  "mov $1,%eax\n\t"
2761  ".Li386_less_unsigned_end:\n\t"
2762  "xor %ebx,%ebx\n\t"
2763  "lea 0x8(%esp),%esp");
2764 }
2765 
2766 static void
2767 i386_emit_ref (int size)
2768 {
2769  switch (size)
2770  {
2771  case 1:
2772  EMIT_ASM32 (i386_ref1,
2773  "movb (%eax),%al");
2774  break;
2775  case 2:
2776  EMIT_ASM32 (i386_ref2,
2777  "movw (%eax),%ax");
2778  break;
2779  case 4:
2780  EMIT_ASM32 (i386_ref4,
2781  "movl (%eax),%eax");
2782  break;
2783  case 8:
2784  EMIT_ASM32 (i386_ref8,
2785  "movl 4(%eax),%ebx\n\t"
2786  "movl (%eax),%eax");
2787  break;
2788  }
2789 }
2790 
2791 static void
2792 i386_emit_if_goto (int *offset_p, int *size_p)
2793 {
2794  EMIT_ASM32 (i386_if_goto,
2795  "mov %eax,%ecx\n\t"
2796  "or %ebx,%ecx\n\t"
2797  "pop %eax\n\t"
2798  "pop %ebx\n\t"
2799  "cmpl $0,%ecx\n\t"
2800  /* Don't trust the assembler to choose the right jump */
2801  ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2802 
2803  if (offset_p)
2804  *offset_p = 11; /* be sure that this matches the sequence above */
2805  if (size_p)
2806  *size_p = 4;
2807 }
2808 
2809 static void
2810 i386_emit_goto (int *offset_p, int *size_p)
2811 {
2812  EMIT_ASM32 (i386_goto,
2813  /* Don't trust the assembler to choose the right jump */
2814  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2815  if (offset_p)
2816  *offset_p = 1;
2817  if (size_p)
2818  *size_p = 4;
2819 }
2820 
2821 static void
2823 {
2824  int diff = (to - (from + size));
2825  unsigned char buf[sizeof (int)];
2826 
2827  /* We're only doing 4-byte sizes at the moment. */
2828  if (size != 4)
2829  {
2830  emit_error = 1;
2831  return;
2832  }
2833 
2834  memcpy (buf, &diff, sizeof (int));
2835  write_inferior_memory (from, buf, sizeof (int));
2836 }
2837 
2838 static void
2840 {
2841  unsigned char buf[16];
2842  int i, hi, lo;
2843  CORE_ADDR buildaddr = current_insn_ptr;
2844 
2845  i = 0;
2846  buf[i++] = 0xb8; /* mov $<n>,%eax */
2847  lo = num & 0xffffffff;
2848  memcpy (&buf[i], &lo, sizeof (lo));
2849  i += 4;
2850  hi = ((num >> 32) & 0xffffffff);
2851  if (hi)
2852  {
2853  buf[i++] = 0xbb; /* mov $<n>,%ebx */
2854  memcpy (&buf[i], &hi, sizeof (hi));
2855  i += 4;
2856  }
2857  else
2858  {
2859  buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2860  }
2861  append_insns (&buildaddr, i, buf);
2862  current_insn_ptr = buildaddr;
2863 }
2864 
2865 static void
2867 {
2868  unsigned char buf[16];
2869  int i, offset;
2870  CORE_ADDR buildaddr;
2871 
2872  buildaddr = current_insn_ptr;
2873  i = 0;
2874  buf[i++] = 0xe8; /* call <reladdr> */
2875  offset = ((int) fn) - (buildaddr + 5);
2876  memcpy (buf + 1, &offset, 4);
2877  append_insns (&buildaddr, 5, buf);
2878  current_insn_ptr = buildaddr;
2879 }
2880 
2881 static void
2882 i386_emit_reg (int reg)
2883 {
2884  unsigned char buf[16];
2885  int i;
2886  CORE_ADDR buildaddr;
2887 
2888  EMIT_ASM32 (i386_reg_a,
2889  "sub $0x8,%esp");
2890  buildaddr = current_insn_ptr;
2891  i = 0;
2892  buf[i++] = 0xb8; /* mov $<n>,%eax */
2893  memcpy (&buf[i], &reg, sizeof (reg));
2894  i += 4;
2895  append_insns (&buildaddr, i, buf);
2896  current_insn_ptr = buildaddr;
2897  EMIT_ASM32 (i386_reg_b,
2898  "mov %eax,4(%esp)\n\t"
2899  "mov 8(%ebp),%eax\n\t"
2900  "mov %eax,(%esp)");
2902  EMIT_ASM32 (i386_reg_c,
2903  "xor %ebx,%ebx\n\t"
2904  "lea 0x8(%esp),%esp");
2905 }
2906 
2907 static void
2909 {
2910  EMIT_ASM32 (i386_pop,
2911  "pop %eax\n\t"
2912  "pop %ebx");
2913 }
2914 
2915 static void
2917 {
2918  EMIT_ASM32 (i386_stack_flush,
2919  "push %ebx\n\t"
2920  "push %eax");
2921 }
2922 
2923 static void
2925 {
2926  switch (arg)
2927  {
2928  case 8:
2929  EMIT_ASM32 (i386_zero_ext_8,
2930  "and $0xff,%eax\n\t"
2931  "xor %ebx,%ebx");
2932  break;
2933  case 16:
2934  EMIT_ASM32 (i386_zero_ext_16,
2935  "and $0xffff,%eax\n\t"
2936  "xor %ebx,%ebx");
2937  break;
2938  case 32:
2939  EMIT_ASM32 (i386_zero_ext_32,
2940  "xor %ebx,%ebx");
2941  break;
2942  default:
2943  emit_error = 1;
2944  }
2945 }
2946 
2947 static void
2949 {
2950  EMIT_ASM32 (i386_swap,
2951  "mov %eax,%ecx\n\t"
2952  "mov %ebx,%edx\n\t"
2953  "pop %eax\n\t"
2954  "pop %ebx\n\t"
2955  "push %edx\n\t"
2956  "push %ecx");
2957 }
2958 
2959 static void
2961 {
2962  unsigned char buf[16];
2963  int i;
2964  CORE_ADDR buildaddr = current_insn_ptr;
2965 
2966  i = 0;
2967  buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2968  buf[i++] = 0x64;
2969  buf[i++] = 0x24;
2970  buf[i++] = n * 8;
2971  append_insns (&buildaddr, i, buf);
2972  current_insn_ptr = buildaddr;
2973 }
2974 
2975 /* FN's prototype is `LONGEST(*fn)(int)'. */
2976 
2977 static void
2979 {
2980  unsigned char buf[16];
2981  int i;
2982  CORE_ADDR buildaddr;
2983 
2984  EMIT_ASM32 (i386_int_call_1_a,
2985  /* Reserve a bit of stack space. */
2986  "sub $0x8,%esp");
2987  /* Put the one argument on the stack. */
2988  buildaddr = current_insn_ptr;
2989  i = 0;
2990  buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2991  buf[i++] = 0x04;
2992  buf[i++] = 0x24;
2993  memcpy (&buf[i], &arg1, sizeof (arg1));
2994  i += 4;
2995  append_insns (&buildaddr, i, buf);
2996  current_insn_ptr = buildaddr;
2997  i386_emit_call (fn);
2998  EMIT_ASM32 (i386_int_call_1_c,
2999  "mov %edx,%ebx\n\t"
3000  "lea 0x8(%esp),%esp");
3001 }
3002 
3003 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3004 
3005 static void
3007 {
3008  unsigned char buf[16];
3009  int i;
3010  CORE_ADDR buildaddr;
3011 
3012  EMIT_ASM32 (i386_void_call_2_a,
3013  /* Preserve %eax only; we don't have to worry about %ebx. */
3014  "push %eax\n\t"
3015  /* Reserve a bit of stack space for arguments. */
3016  "sub $0x10,%esp\n\t"
3017  /* Copy "top" to the second argument position. (Note that
3018  we can't assume function won't scribble on its
3019  arguments, so don't try to restore from this.) */
3020  "mov %eax,4(%esp)\n\t"
3021  "mov %ebx,8(%esp)");
3022  /* Put the first argument on the stack. */
3023  buildaddr = current_insn_ptr;
3024  i = 0;
3025  buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3026  buf[i++] = 0x04;
3027  buf[i++] = 0x24;
3028  memcpy (&buf[i], &arg1, sizeof (arg1));
3029  i += 4;
3030  append_insns (&buildaddr, i, buf);
3031  current_insn_ptr = buildaddr;
3032  i386_emit_call (fn);
3033  EMIT_ASM32 (i386_void_call_2_b,
3034  "lea 0x10(%esp),%esp\n\t"
3035  /* Restore original stack top. */
3036  "pop %eax");
3037 }
3038 
3039 
3040 void
3041 i386_emit_eq_goto (int *offset_p, int *size_p)
3042 {
3043  EMIT_ASM32 (eq,
3044  /* Check low half first, more likely to be decider */
3045  "cmpl %eax,(%esp)\n\t"
3046  "jne .Leq_fallthru\n\t"
3047  "cmpl %ebx,4(%esp)\n\t"
3048  "jne .Leq_fallthru\n\t"
3049  "lea 0x8(%esp),%esp\n\t"
3050  "pop %eax\n\t"
3051  "pop %ebx\n\t"
3052  /* jmp, but don't trust the assembler to choose the right jump */
3053  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3054  ".Leq_fallthru:\n\t"
3055  "lea 0x8(%esp),%esp\n\t"
3056  "pop %eax\n\t"
3057  "pop %ebx");
3058 
3059  if (offset_p)
3060  *offset_p = 18;
3061  if (size_p)
3062  *size_p = 4;
3063 }
3064 
3065 void
3066 i386_emit_ne_goto (int *offset_p, int *size_p)
3067 {
3068  EMIT_ASM32 (ne,
3069  /* Check low half first, more likely to be decider */
3070  "cmpl %eax,(%esp)\n\t"
3071  "jne .Lne_jump\n\t"
3072  "cmpl %ebx,4(%esp)\n\t"
3073  "je .Lne_fallthru\n\t"
3074  ".Lne_jump:\n\t"
3075  "lea 0x8(%esp),%esp\n\t"
3076  "pop %eax\n\t"
3077  "pop %ebx\n\t"
3078  /* jmp, but don't trust the assembler to choose the right jump */
3079  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3080  ".Lne_fallthru:\n\t"
3081  "lea 0x8(%esp),%esp\n\t"
3082  "pop %eax\n\t"
3083  "pop %ebx");
3084 
3085  if (offset_p)
3086  *offset_p = 18;
3087  if (size_p)
3088  *size_p = 4;
3089 }
3090 
3091 void
3092 i386_emit_lt_goto (int *offset_p, int *size_p)
3093 {
3094  EMIT_ASM32 (lt,
3095  "cmpl %ebx,4(%esp)\n\t"
3096  "jl .Llt_jump\n\t"
3097  "jne .Llt_fallthru\n\t"
3098  "cmpl %eax,(%esp)\n\t"
3099  "jnl .Llt_fallthru\n\t"
3100  ".Llt_jump:\n\t"
3101  "lea 0x8(%esp),%esp\n\t"
3102  "pop %eax\n\t"
3103  "pop %ebx\n\t"
3104  /* jmp, but don't trust the assembler to choose the right jump */
3105  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3106  ".Llt_fallthru:\n\t"
3107  "lea 0x8(%esp),%esp\n\t"
3108  "pop %eax\n\t"
3109  "pop %ebx");
3110 
3111  if (offset_p)
3112  *offset_p = 20;
3113  if (size_p)
3114  *size_p = 4;
3115 }
3116 
3117 void
3118 i386_emit_le_goto (int *offset_p, int *size_p)
3119 {
3120  EMIT_ASM32 (le,
3121  "cmpl %ebx,4(%esp)\n\t"
3122  "jle .Lle_jump\n\t"
3123  "jne .Lle_fallthru\n\t"
3124  "cmpl %eax,(%esp)\n\t"
3125  "jnle .Lle_fallthru\n\t"
3126  ".Lle_jump:\n\t"
3127  "lea 0x8(%esp),%esp\n\t"
3128  "pop %eax\n\t"
3129  "pop %ebx\n\t"
3130  /* jmp, but don't trust the assembler to choose the right jump */
3131  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3132  ".Lle_fallthru:\n\t"
3133  "lea 0x8(%esp),%esp\n\t"
3134  "pop %eax\n\t"
3135  "pop %ebx");
3136 
3137  if (offset_p)
3138  *offset_p = 20;
3139  if (size_p)
3140  *size_p = 4;
3141 }
3142 
3143 void
3144 i386_emit_gt_goto (int *offset_p, int *size_p)
3145 {
3146  EMIT_ASM32 (gt,
3147  "cmpl %ebx,4(%esp)\n\t"
3148  "jg .Lgt_jump\n\t"
3149  "jne .Lgt_fallthru\n\t"
3150  "cmpl %eax,(%esp)\n\t"
3151  "jng .Lgt_fallthru\n\t"
3152  ".Lgt_jump:\n\t"
3153  "lea 0x8(%esp),%esp\n\t"
3154  "pop %eax\n\t"
3155  "pop %ebx\n\t"
3156  /* jmp, but don't trust the assembler to choose the right jump */
3157  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3158  ".Lgt_fallthru:\n\t"
3159  "lea 0x8(%esp),%esp\n\t"
3160  "pop %eax\n\t"
3161  "pop %ebx");
3162 
3163  if (offset_p)
3164  *offset_p = 20;
3165  if (size_p)
3166  *size_p = 4;
3167 }
3168 
3169 void
3170 i386_emit_ge_goto (int *offset_p, int *size_p)
3171 {
3172  EMIT_ASM32 (ge,
3173  "cmpl %ebx,4(%esp)\n\t"
3174  "jge .Lge_jump\n\t"
3175  "jne .Lge_fallthru\n\t"
3176  "cmpl %eax,(%esp)\n\t"
3177  "jnge .Lge_fallthru\n\t"
3178  ".Lge_jump:\n\t"
3179  "lea 0x8(%esp),%esp\n\t"
3180  "pop %eax\n\t"
3181  "pop %ebx\n\t"
3182  /* jmp, but don't trust the assembler to choose the right jump */
3183  ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3184  ".Lge_fallthru:\n\t"
3185  "lea 0x8(%esp),%esp\n\t"
3186  "pop %eax\n\t"
3187  "pop %ebx");
3188 
3189  if (offset_p)
3190  *offset_p = 20;
3191  if (size_p)
3192  *size_p = 4;
3193 }
3194 
3195 struct emit_ops i386_emit_ops =
3196  {
3199  i386_emit_add,
3200  i386_emit_sub,
3201  i386_emit_mul,
3202  i386_emit_lsh,
3205  i386_emit_ext,
3214  i386_emit_ref,
3220  i386_emit_reg,
3221  i386_emit_pop,
3234  };
3235 
3236 
3237 static struct emit_ops *
3239 {
3240 #ifdef __x86_64__
3241  if (is_64bit_tdesc ())
3242  return &amd64_emit_ops;
3243  else
3244 #endif
3245  return &i386_emit_ops;
3246 }
3247 
3248 static int
3250 {
3251  return 1;
3252 }
3253 
3254 /* This is initialized assuming an amd64 target.
3255  x86_arch_setup will correct it for i386 or amd64 targets. */
3256 
3257 struct linux_target_ops the_low_target =
3258 {
3263  NULL, /* fetch_register */
3264  x86_get_pc,
3265  x86_set_pc,
3268  NULL,
3269  1,
3276  /* collect_ptrace_register/supply_ptrace_register are not needed in the
3277  native i386 case (no registers smaller than an xfer unit), and are not
3278  used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3279  NULL,
3280  NULL,
3281  /* need to fix up i386 siginfo if host is amd64 */
3291  x86_emit_ops,
3294 };
3295 
3296 void
3298 {
3299  /* Initialize the Linux target descriptions. */
3300 #ifdef __x86_64__
3305 
3309 
3310  tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3311  copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3312  tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3313 #endif
3319 
3320  tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3321  copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3322  tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3323 
3324  initialize_regsets_info (&x86_regsets_info);
3325 }
int debug_threads
Definition: debug.c:24
unsigned int lwpid_t
const struct target_desc * tdesc
Definition: regcache.h:41
void i387_xsave_to_cache(struct regcache *regcache, const void *buf)
Definition: i387-fp.c:640
#define X86_XSTATE_AVX_MASK
Definition: x86-xstate.h:41
enum target_hw_bp_type raw_bkpt_type_to_target_hw_bp_type(enum raw_bkpt_type raw_type)
Definition: mem-break.c:195
unsigned long x86_linux_dr_get_status(void)
struct thread_info * current_thread
Definition: inferiors.c:28
void collect_register(struct regcache *regcache, int n, void *buf)
Definition: regcache.c:414
static void i386_emit_bit_or(void)
static void i386_emit_reg(int reg)
static int x86_stopped_by_watchpoint(void)
struct emit_ops i386_emit_ops
static void x86_store_fpregset(struct regcache *regcache, const void *buf)
static void i386_emit_bit_not(void)
const struct target_desc * tdesc_i386_linux
Definition: i386-linux.c:75
static void i386_write_goto_address(CORE_ADDR from, CORE_ADDR to, int size)
static void x86_fill_fpxregset(struct regcache *regcache, void *buf)
void init_registers_i386_mpx_linux(void)
#define X86_XSTATE_SSE_MASK
Definition: x86-xstate.h:40
bfd_vma CORE_ADDR
Definition: common-types.h:41
void supply_register_by_name(struct regcache *regcache, const char *name, const void *buf)
Definition: regcache.c:405
#define ARCH_GET_FS
static void i386_emit_rsh_signed(void)
void i387_fxsave_to_cache(struct regcache *regcache, const void *buf)
Definition: i387-fp.c:587
static void x86_linux_new_fork(struct process_info *parent, struct process_info *child)
void initialize_low_arch(void)
struct aarch64_debug_reg_state debug_reg_state
#define X86_XSTATE_MPX_MASK
Definition: x86-xstate.h:42
static void i386_emit_stack_flush(void)
void warning(const char *fmt,...)
Definition: errors.c:26
void regcache_release(void)
Definition: regcache.c:300
static void x86_fill_xstateregset(struct regcache *regcache, void *buf)
void init_registers_amd64_avx512_linux(void)
struct lwp_info * find_lwp_pid(ptid_t ptid)
Definition: linux-low.c:1531
int emit_error
Definition: ax.c:141
#define EMIT_ASM32(NAME, INSNS)
const struct target_desc * tdesc_i386_avx_linux
static void i386_emit_ext(int arg)
const struct target_desc * tdesc_i386_avx512_linux
#define Z_PACKET_HW_BP
Definition: mem-break.h:33
struct process_info * find_process_pid(int pid)
Definition: inferiors.c:302
static void i386_emit_goto(int *offset_p, int *size_p)
static void x86_store_gregset(struct regcache *regcache, const void *buf)
void i387_fsave_to_cache(struct regcache *regcache, const void *buf)
Definition: i387-fp.c:182
char * paddress(CORE_ADDR addr)
Definition: utils.c:124
void for_each_inferior(struct inferior_list *list, void(*action)(struct inferior_list_entry *))
Definition: inferiors.c:47
static void x86_fill_gregset(struct regcache *regcache, void *buf)
ps_err_e
ptid_t id
Definition: inferiors.h:31
static unsigned char jump_insn[]
int x86_dr_remove_watchpoint(struct x86_debug_reg_state *state, enum target_hw_bp_type type, CORE_ADDR addr, int len)
Definition: x86-dregs.c:516
void i386_emit_eq_goto(int *offset_p, int *size_p)
#define _(String)
Definition: gdb_locale.h:40
const char * xmltarget
Definition: tdesc.h:47
int x86_dr_stopped_data_address(struct x86_debug_reg_state *state, CORE_ADDR *addr_p)
Definition: x86-dregs.c:571
static void i386_emit_if_goto(int *offset_p, int *size_p)
static struct regset_info x86_regsets[]
#define PTRACE_ARCH_PRCTL
static unsigned char small_jump_insn[]
void x86_linux_dr_set_control(unsigned long control)
static struct emit_ops * x86_emit_ops(void)
const struct target_desc * tdesc_x32_avx512_linux
struct target_ops * the_target
Definition: target.c:24
static int x86_supports_z_point_type(char z_type)
void init_registers_amd64_mpx_linux(void)
const struct target_desc * tdesc
Definition: inferiors.h:69
#define X86_XSTATE_SIZE(XCR0)
Definition: x86-xstate.h:62
static void i386_emit_int_call_1(CORE_ADDR fn, int arg1)
static void i386_emit_bit_xor(void)
char * xstrdup(const char *s)
Definition: utils.c:44
int x86_dr_insert_watchpoint(struct x86_debug_reg_state *state, enum target_hw_bp_type type, CORE_ADDR addr, int len)
Definition: x86-dregs.c:474
static int same_process_callback(struct inferior_list_entry *entry, void *data)
struct arch_process_info * arch_private
Definition: linux-low.h:110
static void i386_emit_call(CORE_ADDR fn)
struct x86_debug_reg_state * x86_debug_reg_state(pid_t pid)
static void i386_emit_lsh(void)
void x86_linux_new_thread(struct lwp_info *lwp)
Definition: x86-linux.c:63
struct pt_watch_regs __attribute__
static int x86_cannot_fetch_register(int regno)
#define Z_PACKET_SW_BP
Definition: mem-break.h:32
static void i386_emit_add(void)
void collect_register_by_name(struct regcache *regcache, const char *name, void *buf)
Definition: regcache.c:430
void init_registers_amd64_avx_linux(void)
void init_registers_x32_linux(void)
Definition: x32-linux.c:94
void init_registers_i386_mmx_linux(void)
raw_bkpt_type
Definition: mem-break.h:40
static void i386_emit_ref(int size)
const struct target_desc * tdesc_i386_mpx_linux
CORE_ADDR current_insn_ptr
Definition: ax.c:139
void copy_target_description(struct target_desc *dest, const struct target_desc *src)
Definition: tdesc.c:46
static CORE_ADDR x86_get_pc(struct regcache *regcache)
const struct regs_info * x86_linux_regs_info(void)
static int i386_install_fast_tracepoint_jump_pad(CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, CORE_ADDR *trampoline, ULONGEST *trampoline_size, unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, char *err)
static void i386_emit_swap(void)
void init_registers_i386_linux(void)
Definition: i386-linux.c:78
#define X86_XSTATE_SSE_SIZE
Definition: x86-xstate.h:48
int offset
Definition: tracepoint.c:179
static void x86_set_pc(struct regcache *regcache, CORE_ADDR pc)
#define gdb_assert_not_reached(message)
Definition: gdb_assert.h:56
#define IPA_BUFSIZ
Definition: tracepoint.h:24
ptid_t pid_to_ptid(int pid)
Definition: ptid.c:44
const struct target_desc * tdesc_amd64_avx512_linux
static void x86_store_xstateregset(struct regcache *regcache, const void *buf)
ps_err_e ps_get_thread_area(const struct ps_prochandle *ph, lwpid_t lwpid, int idx, void **base)
static void append_insns(CORE_ADDR *to, size_t len, const unsigned char *buf)
int agent_loaded_p(void)
Definition: agent.c:78
#define get_lwp_thread(lwp)
Definition: linux-low.h:236
int register_size(const struct target_desc *tdesc, int n)
Definition: regcache.c:314
#define gdb_assert(expr)
Definition: gdb_assert.h:33
const struct target_desc * tdesc_amd64_avx_linux
static int startswith(const char *string, const char *pattern)
Definition: common-utils.h:75
static int x86_cannot_store_register(int regno)
#define PTRACE_SETREGSET
Definition: linux-ptrace.h:51
void i386_emit_lt_goto(int *offset_p, int *size_p)
static void i386_emit_mul(void)
static void x86_store_fpxregset(struct regcache *regcache, const void *buf)
static int use_xml
void x86_linux_dr_set_addr(int regnum, CORE_ADDR addr)
struct process_info * current_process(void)
Definition: inferiors.c:356
#define x86_breakpoint_len
void i386_emit_ge_goto(int *offset_p, int *size_p)
struct process_info_private * priv
Definition: inferiors.h:72
struct inferior_list all_threads
Definition: inferiors.c:26
int x86_dr_stopped_by_watchpoint(struct x86_debug_reg_state *state)
Definition: x86-dregs.c:651
void init_registers_amd64_linux(void)
Definition: amd64-linux.c:94
static void x86_fill_fpregset(struct regcache *regcache, void *buf)
struct inferior_list all_processes
Definition: inferiors.c:25
static void x86_arch_setup(void)
static int x86_get_min_fast_tracepoint_insn_len(void)
static void i386_emit_const(LONGEST num)
int have_fast_tracepoint_trampoline_buffer(char *buf)
Definition: tracepoint.c:3001
unsigned long long x86_xcr0
Definition: i387-fp.c:840
#define PTRACE_GETREGSET
Definition: linux-ptrace.h:47
void i386_emit_ne_goto(int *offset_p, int *size_p)
int linux_pid_exe_is_elf_64_file(int pid, unsigned int *machine)
Definition: linux-low.c:379
int write_inferior_memory(CORE_ADDR memaddr, const unsigned char *myaddr, int len)
Definition: target.c:68
void i387_cache_to_xsave(struct regcache *regcache, void *buf)
Definition: i387-fp.c:271
int ptid_get_pid(ptid_t ptid)
Definition: ptid.c:52
static void i386_emit_pop(void)
static struct usrregs_info i386_linux_usrregs_info
CORE_ADDR x86_linux_dr_get_addr(int regnum)
Definition: ax.h:91
static int x86_install_fast_tracepoint_jump_pad(CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, CORE_ADDR *trampoline, ULONGEST *trampoline_size, unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, char *err)
void x86_low_init_dregs(struct x86_debug_reg_state *state)
Definition: x86-low.c:27
const struct target_desc * tdesc_i386_mmx_linux
static int x86_supports_tracepoints(void)
static void i386_emit_zero_ext(int arg)
void i387_cache_to_fsave(struct regcache *regcache, void *buf)
Definition: i387-fp.c:142
static const unsigned char x86_breakpoint[]
static void x86_arch_setup_process_callback(struct inferior_list_entry *entry)
#define X86_XSTATE_AVX512_MASK
Definition: x86-xstate.h:43
static void i386_emit_rsh_unsigned(void)
int claim_trampoline_space(ULONGEST used, CORE_ADDR *trampoline)
Definition: tracepoint.c:2958
#define I386_NUM_REGS
#define lwpid_of(inf)
Definition: inferiors.h:77
static CORE_ADDR x86_stopped_data_address(void)
static const struct target_desc * x86_linux_read_description(void)
void i387_cache_to_fxsave(struct regcache *regcache, void *buf)
Definition: i387-fp.c:218
static struct arch_process_info * x86_linux_new_process(void)
static void i386_emit_less_signed(void)
void init_registers_i386_avx_linux(void)
void init_registers_x32_avx512_linux(void)
#define REGSIZE
CORE_ADDR get_raw_reg_func_addr(void)
Definition: tracepoint.c:5860
void i386_emit_gt_goto(int *offset_p, int *size_p)
static void i386_emit_less_unsigned(void)
int(* read_memory)(CORE_ADDR memaddr, unsigned char *myaddr, int len)
Definition: target.h:160
static void i386_emit_epilogue(void)
struct regcache * get_thread_regcache(struct thread_info *thread, int fetch)
Definition: regcache.c:27
const struct target_desc * tdesc_x32_linux
Definition: x32-linux.c:91
static int i386_regmap[]
#define Z_PACKET_ACCESS_WP
Definition: mem-break.h:36
static int x86_remove_point(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
unsigned long x86_linux_dr_get_control(void)
target_hw_bp_type
Definition: break-common.h:22
static struct target_desc * tdesc_i386_linux_no_xml
Definition: linux-x86-low.c:98
static void i386_emit_log_not(void)
#define PTRACE_GET_THREAD_AREA
static int x86_supports_range_stepping(void)
#define ARCH_GET_GS
unsigned long long ULONGEST
Definition: common-types.h:53
PTR xmalloc(size_t size)
Definition: common-utils.c:34
static void i386_emit_stack_adjust(int n)
static void i386_emit_equal(void)
static void x86_linux_update_xmltarget(void)
static int x86_siginfo_fixup(siginfo_t *native, void *inf, int direction)
Definition: inferiors.h:29
static struct regs_info i386_linux_regs_info
void i386_emit_le_goto(int *offset_p, int *size_p)
static void i386_emit_bit_and(void)
void(* arch_setup)(void)
Definition: linux-low.h:130
void init_registers_i386_avx512_linux(void)
void debug_printf(const char *fmt,...)
Definition: common-debug.c:30
#define EMIT_ASM(NAME, INSNS)
const struct target_desc * tdesc_amd64_linux
Definition: amd64-linux.c:91
static void x86_linux_process_qsupported(const char *query)
void init_registers_x32_avx_linux(void)
#define I386_LINUX_XSAVE_XCR0_OFFSET
static void i386_emit_void_call_2(CORE_ADDR fn, int arg1)
const struct target_desc * tdesc_x32_avx_linux
void supply_register(struct regcache *regcache, int n, const void *buf)
Definition: regcache.c:330
static void add_insns(unsigned char *start, int len)
static const char * xmltarget_i386_linux_no_xml
static int x86_get_thread_area(int lwpid, CORE_ADDR *addr)
void error(const char *fmt,...)
Definition: errors.c:38
void x86_linux_prepare_to_resume(struct lwp_info *lwp)
Definition: x86-linux.c:71
static int x86_breakpoint_at(CORE_ADDR pc)
long long LONGEST
Definition: common-types.h:52
static void i386_emit_sub(void)
int relocate_instruction(CORE_ADDR *to, CORE_ADDR oldloc)
#define Z_PACKET_WRITE_WP
Definition: mem-break.h:34
static int x86_insert_point(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
struct inferior_list_entry * find_inferior(struct inferior_list *list, int(*func)(struct inferior_list_entry *, void *), void *arg)
Definition: inferiors.c:188
static void i386_emit_prologue(void)
static int push_opcode(unsigned char *buf, char *op)
#define X86_XSTATE_ALL_MASK
Definition: x86-xstate.h:46
const struct target_desc * tdesc_amd64_mpx_linux