GDBserver
linux-low.c
Go to the documentation of this file.
1 /* Low level interface to ptrace, for the remote server for GDB.
2  Copyright (C) 1995-2015 Free Software Foundation, Inc.
3 
4  This file is part of GDB.
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 3 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program. If not, see <http://www.gnu.org/licenses/>. */
18 
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include <sys/ptrace.h>
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51  then ELFMAG0 will have been defined. If it didn't get included by
52  gdb_proc_service.h then including it will likely introduce a duplicate
53  definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57 
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61 
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68 
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72 
73 #ifndef W_STOPCODE
74 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
75 #endif
76 
77 /* This is the kernel's hard limit. Not to be confused with
78  SIGRTMIN. */
79 #ifndef __SIGRTMIN
80 #define __SIGRTMIN 32
81 #endif
82 
83 /* Some targets did not define these ptrace constants from the start,
84  so gdbserver defines them locally here. In the future, these may
85  be removed after they are added to asm/ptrace.h. */
86 #if !(defined(PT_TEXT_ADDR) \
87  || defined(PT_DATA_ADDR) \
88  || defined(PT_TEXT_END_ADDR))
89 #if defined(__mcoldfire__)
90 /* These are still undefined in 3.10 kernels. */
91 #define PT_TEXT_ADDR 49*4
92 #define PT_DATA_ADDR 50*4
93 #define PT_TEXT_END_ADDR 51*4
94 /* BFIN already defines these since at least 2.6.32 kernels. */
95 #elif defined(BFIN)
96 #define PT_TEXT_ADDR 220
97 #define PT_TEXT_END_ADDR 224
98 #define PT_DATA_ADDR 228
99 /* These are still undefined in 3.10 kernels. */
100 #elif defined(__TMS320C6X__)
101 #define PT_TEXT_ADDR (0x10000*4)
102 #define PT_DATA_ADDR (0x10004*4)
103 #define PT_TEXT_END_ADDR (0x10008*4)
104 #endif
105 #endif
106 
107 #ifdef HAVE_LINUX_BTRACE
108 # include "nat/linux-btrace.h"
109 # include "btrace-common.h"
110 #endif
111 
112 #ifndef HAVE_ELF32_AUXV_T
113 /* Copied from glibc's elf.h. */
114 typedef struct
115 {
116  uint32_t a_type; /* Entry type */
117  union
118  {
119  uint32_t a_val; /* Integer value */
120  /* We use to have pointer elements added here. We cannot do that,
121  though, since it does not work when using 32-bit definitions
122  on 64-bit platforms and vice versa. */
123  } a_un;
124 } Elf32_auxv_t;
125 #endif
126 
127 #ifndef HAVE_ELF64_AUXV_T
128 /* Copied from glibc's elf.h. */
129 typedef struct
130 {
131  uint64_t a_type; /* Entry type */
132  union
133  {
134  uint64_t a_val; /* Integer value */
135  /* We use to have pointer elements added here. We cannot do that,
136  though, since it does not work when using 32-bit definitions
137  on 64-bit platforms and vice versa. */
138  } a_un;
139 } Elf64_auxv_t;
140 #endif
141 
142 /* LWP accessors. */
143 
144 /* See nat/linux-nat.h. */
145 
146 ptid_t
147 ptid_of_lwp (struct lwp_info *lwp)
148 {
149  return ptid_of (get_lwp_thread (lwp));
150 }
151 
152 /* See nat/linux-nat.h. */
153 
154 void
156  struct arch_lwp_info *info)
157 {
158  lwp->arch_private = info;
159 }
160 
161 /* See nat/linux-nat.h. */
162 
163 struct arch_lwp_info *
165 {
166  return lwp->arch_private;
167 }
168 
169 /* See nat/linux-nat.h. */
170 
171 int
173 {
174  return lwp->stopped;
175 }
176 
177 /* See nat/linux-nat.h. */
178 
179 enum target_stop_reason
181 {
182  return lwp->stop_reason;
183 }
184 
185 /* A list of all unknown processes which receive stop signals. Some
186  other process will presumably claim each of these as forked
187  children momentarily. */
188 
190 {
191  /* The process ID. */
192  int pid;
193 
194  /* The status as reported by waitpid. */
195  int status;
196 
197  /* Next in chain. */
199 };
201 
202 /* Trivial list manipulation functions to keep track of a list of new
203  stopped processes. */
204 
205 static void
206 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
207 {
208  struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
209 
210  new_pid->pid = pid;
211  new_pid->status = status;
212  new_pid->next = *listp;
213  *listp = new_pid;
214 }
215 
216 static int
217 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
218 {
219  struct simple_pid_list **p;
220 
221  for (p = listp; *p != NULL; p = &(*p)->next)
222  if ((*p)->pid == pid)
223  {
224  struct simple_pid_list *next = (*p)->next;
225 
226  *statusp = (*p)->status;
227  xfree (*p);
228  *p = next;
229  return 1;
230  }
231  return 0;
232 }
233 
235  {
236  /* Not stopping threads presently. */
238 
239  /* Stopping threads. */
241 
242  /* Stopping and suspending threads. */
244  };
245 
246 /* This is set while stop_all_lwps is in effect. */
248 
249 /* FIXME make into a target method? */
251 
252 /* True if we're presently stabilizing threads (moving them out of
253  jump pads). */
255 
256 static void linux_resume_one_lwp (struct lwp_info *lwp,
257  int step, int signal, siginfo_t *info);
258 static void linux_resume (struct thread_resume *resume_info, size_t n);
259 static void stop_all_lwps (int suspend, struct lwp_info *except);
260 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
261 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
262  int *wstat, int options);
263 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
264 static struct lwp_info *add_lwp (ptid_t ptid);
265 static int linux_stopped_by_watchpoint (void);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static int lwp_is_marked_dead (struct lwp_info *lwp);
268 static void proceed_all_lwps (void);
269 static int finish_step_over (struct lwp_info *lwp);
270 static int kill_lwp (unsigned long lwpid, int signo);
271 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
272 static void complete_ongoing_step_over (void);
273 
274 /* When the event-loop is doing a step-over, this points at the thread
275  being stepped. */
277 
278 /* True if the low target can hardware single-step. Such targets
279  don't need a BREAKPOINT_REINSERT_ADDR callback. */
280 
281 static int
283 {
284  return (the_low_target.breakpoint_reinsert_addr == NULL);
285 }
286 
287 /* True if the low target supports memory breakpoints. If so, we'll
288  have a GET_PC implementation. */
289 
290 static int
292 {
293  return (the_low_target.get_pc != NULL);
294 }
295 
296 /* Returns true if this target can support fast tracepoints. This
297  does not mean that the in-process agent has been loaded in the
298  inferior. */
299 
300 static int
302 {
304 }
305 
306 /* True if LWP is stopped in its stepping range. */
307 
308 static int
310 {
311  CORE_ADDR pc = lwp->stop_pc;
312 
313  return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
314 }
315 
317 {
318  int signal;
319  siginfo_t info;
321 };
322 
323 /* The read/write ends of the pipe registered as waitable file in the
324  event loop. */
325 static int linux_event_pipe[2] = { -1, -1 };
326 
327 /* True if we're currently in async mode. */
328 #define target_is_async_p() (linux_event_pipe[0] != -1)
329 
330 static void send_sigstop (struct lwp_info *lwp);
331 static void wait_for_sigstop (void);
332 
333 /* Return non-zero if HEADER is a 64-bit ELF file. */
334 
335 static int
336 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
337 {
338  if (header->e_ident[EI_MAG0] == ELFMAG0
339  && header->e_ident[EI_MAG1] == ELFMAG1
340  && header->e_ident[EI_MAG2] == ELFMAG2
341  && header->e_ident[EI_MAG3] == ELFMAG3)
342  {
343  *machine = header->e_machine;
344  return header->e_ident[EI_CLASS] == ELFCLASS64;
345 
346  }
347  *machine = EM_NONE;
348  return -1;
349 }
350 
351 /* Return non-zero if FILE is a 64-bit ELF file,
352  zero if the file is not a 64-bit ELF file,
353  and -1 if the file is not accessible or doesn't exist. */
354 
355 static int
356 elf_64_file_p (const char *file, unsigned int *machine)
357 {
358  Elf64_Ehdr header;
359  int fd;
360 
361  fd = open (file, O_RDONLY);
362  if (fd < 0)
363  return -1;
364 
365  if (read (fd, &header, sizeof (header)) != sizeof (header))
366  {
367  close (fd);
368  return 0;
369  }
370  close (fd);
371 
372  return elf_64_header_p (&header, machine);
373 }
374 
375 /* Accepts an integer PID; Returns true if the executable PID is
376  running is a 64-bit ELF file.. */
377 
378 int
379 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
380 {
381  char file[PATH_MAX];
382 
383  sprintf (file, "/proc/%d/exe", pid);
384  return elf_64_file_p (file, machine);
385 }
386 
387 static void
388 delete_lwp (struct lwp_info *lwp)
389 {
390  struct thread_info *thr = get_lwp_thread (lwp);
391 
392  if (debug_threads)
393  debug_printf ("deleting %ld\n", lwpid_of (thr));
394 
395  remove_thread (thr);
396  free (lwp->arch_private);
397  free (lwp);
398 }
399 
400 /* Add a process to the common process list, and set its private
401  data. */
402 
403 static struct process_info *
405 {
406  struct process_info *proc;
407 
408  proc = add_process (pid, attached);
409  proc->priv = xcalloc (1, sizeof (*proc->priv));
410 
411  /* Set the arch when the first LWP stops. */
412  proc->priv->new_inferior = 1;
413 
414  if (the_low_target.new_process != NULL)
416 
417  return proc;
418 }
419 
420 static CORE_ADDR get_pc (struct lwp_info *lwp);
421 
422 /* Handle a GNU/Linux extended wait response. If we see a clone
423  event, we need to add the new LWP to our list (and return 0 so as
424  not to report the trap to higher layers). */
425 
426 static int
427 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
428 {
429  int event = linux_ptrace_get_extended_event (wstat);
430  struct thread_info *event_thr = get_lwp_thread (event_lwp);
431  struct lwp_info *new_lwp;
432 
433  if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
434  || (event == PTRACE_EVENT_CLONE))
435  {
436  ptid_t ptid;
437  unsigned long new_pid;
438  int ret, status;
439 
440  /* Get the pid of the new lwp. */
441  ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
442  &new_pid);
443 
444  /* If we haven't already seen the new PID stop, wait for it now. */
445  if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
446  {
447  /* The new child has a pending SIGSTOP. We can't affect it until it
448  hits the SIGSTOP, but we're already attached. */
449 
450  ret = my_waitpid (new_pid, &status, __WALL);
451 
452  if (ret == -1)
453  perror_with_name ("waiting for new child");
454  else if (ret != new_pid)
455  warning ("wait returned unexpected PID %d", ret);
456  else if (!WIFSTOPPED (status))
457  warning ("wait returned unexpected status 0x%x", status);
458  }
459 
460  if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
461  {
462  struct process_info *parent_proc;
463  struct process_info *child_proc;
464  struct lwp_info *child_lwp;
465  struct thread_info *child_thr;
466  struct target_desc *tdesc;
467 
468  ptid = ptid_build (new_pid, new_pid, 0);
469 
470  if (debug_threads)
471  {
472  debug_printf ("HEW: Got fork event from LWP %ld, "
473  "new child is %d\n",
474  ptid_get_lwp (ptid_of (event_thr)),
475  ptid_get_pid (ptid));
476  }
477 
478  /* Add the new process to the tables and clone the breakpoint
479  lists of the parent. We need to do this even if the new process
480  will be detached, since we will need the process object and the
481  breakpoints to remove any breakpoints from memory when we
482  detach, and the client side will access registers. */
483  child_proc = linux_add_process (new_pid, 0);
484  gdb_assert (child_proc != NULL);
485  child_lwp = add_lwp (ptid);
486  gdb_assert (child_lwp != NULL);
487  child_lwp->stopped = 1;
488  child_lwp->must_set_ptrace_flags = 1;
489  child_lwp->status_pending_p = 0;
490  child_thr = get_lwp_thread (child_lwp);
491  child_thr->last_resume_kind = resume_stop;
492  child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
493 
494  /* If we're suspending all threads, leave this one suspended
495  too. */
496  if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
497  {
498  if (debug_threads)
499  debug_printf ("HEW: leaving child suspended\n");
500  child_lwp->suspended = 1;
501  }
502 
503  parent_proc = get_thread_process (event_thr);
504  child_proc->attached = parent_proc->attached;
505  clone_all_breakpoints (&child_proc->breakpoints,
506  &child_proc->raw_breakpoints,
507  parent_proc->breakpoints);
508 
509  tdesc = xmalloc (sizeof (struct target_desc));
510  copy_target_description (tdesc, parent_proc->tdesc);
511  child_proc->tdesc = tdesc;
512 
513  /* Clone arch-specific process data. */
514  if (the_low_target.new_fork != NULL)
515  the_low_target.new_fork (parent_proc, child_proc);
516 
517  /* Save fork info in the parent thread. */
518  if (event == PTRACE_EVENT_FORK)
519  event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
520  else if (event == PTRACE_EVENT_VFORK)
521  event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
522 
523  event_lwp->waitstatus.value.related_pid = ptid;
524 
525  /* The status_pending field contains bits denoting the
526  extended event, so when the pending event is handled,
527  the handler will look at lwp->waitstatus. */
528  event_lwp->status_pending_p = 1;
529  event_lwp->status_pending = wstat;
530 
531  /* Report the event. */
532  return 0;
533  }
534 
535  if (debug_threads)
536  debug_printf ("HEW: Got clone event "
537  "from LWP %ld, new child is LWP %ld\n",
538  lwpid_of (event_thr), new_pid);
539 
540  ptid = ptid_build (pid_of (event_thr), new_pid, 0);
541  new_lwp = add_lwp (ptid);
542 
543  /* Either we're going to immediately resume the new thread
544  or leave it stopped. linux_resume_one_lwp is a nop if it
545  thinks the thread is currently running, so set this first
546  before calling linux_resume_one_lwp. */
547  new_lwp->stopped = 1;
548 
549  /* If we're suspending all threads, leave this one suspended
550  too. */
551  if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
552  new_lwp->suspended = 1;
553 
554  /* Normally we will get the pending SIGSTOP. But in some cases
555  we might get another signal delivered to the group first.
556  If we do get another signal, be sure not to lose it. */
557  if (WSTOPSIG (status) != SIGSTOP)
558  {
559  new_lwp->stop_expected = 1;
560  new_lwp->status_pending_p = 1;
561  new_lwp->status_pending = status;
562  }
563 
564  /* Don't report the event. */
565  return 1;
566  }
567  else if (event == PTRACE_EVENT_VFORK_DONE)
568  {
569  event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
570 
571  /* Report the event. */
572  return 0;
573  }
574 
575  internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
576 }
577 
578 /* Return the PC as read from the regcache of LWP, without any
579  adjustment. */
580 
581 static CORE_ADDR
582 get_pc (struct lwp_info *lwp)
583 {
584  struct thread_info *saved_thread;
585  struct regcache *regcache;
586  CORE_ADDR pc;
587 
588  if (the_low_target.get_pc == NULL)
589  return 0;
590 
591  saved_thread = current_thread;
593 
594  regcache = get_thread_regcache (current_thread, 1);
595  pc = (*the_low_target.get_pc) (regcache);
596 
597  if (debug_threads)
598  debug_printf ("pc is 0x%lx\n", (long) pc);
599 
600  current_thread = saved_thread;
601  return pc;
602 }
603 
604 /* This function should only be called if LWP got a SIGTRAP.
605  The SIGTRAP could mean several things.
606 
607  On i386, where decr_pc_after_break is non-zero:
608 
609  If we were single-stepping this process using PTRACE_SINGLESTEP, we
610  will get only the one SIGTRAP. The value of $eip will be the next
611  instruction. If the instruction we stepped over was a breakpoint,
612  we need to decrement the PC.
613 
614  If we continue the process using PTRACE_CONT, we will get a
615  SIGTRAP when we hit a breakpoint. The value of $eip will be
616  the instruction after the breakpoint (i.e. needs to be
617  decremented). If we report the SIGTRAP to GDB, we must also
618  report the undecremented PC. If the breakpoint is removed, we
619  must resume at the decremented PC.
620 
621  On a non-decr_pc_after_break machine with hardware or kernel
622  single-step:
623 
624  If we either single-step a breakpoint instruction, or continue and
625  hit a breakpoint instruction, our PC will point at the breakpoint
626  instruction. */
627 
628 static int
630 {
631  CORE_ADDR pc;
632  CORE_ADDR sw_breakpoint_pc;
633  struct thread_info *saved_thread;
634 #if USE_SIGTRAP_SIGINFO
635  siginfo_t siginfo;
636 #endif
637 
638  if (the_low_target.get_pc == NULL)
639  return 0;
640 
641  pc = get_pc (lwp);
642  sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
643 
644  /* breakpoint_at reads from the current thread. */
645  saved_thread = current_thread;
647 
648 #if USE_SIGTRAP_SIGINFO
650  (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
651  {
652  if (siginfo.si_signo == SIGTRAP)
653  {
654  if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
655  {
656  if (debug_threads)
657  {
658  struct thread_info *thr = get_lwp_thread (lwp);
659 
660  debug_printf ("CSBB: %s stopped by software breakpoint\n",
661  target_pid_to_str (ptid_of (thr)));
662  }
663 
664  /* Back up the PC if necessary. */
665  if (pc != sw_breakpoint_pc)
666  {
667  struct regcache *regcache
669  (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
670  }
671 
672  lwp->stop_pc = sw_breakpoint_pc;
673  lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
674  current_thread = saved_thread;
675  return 1;
676  }
677  else if (siginfo.si_code == TRAP_HWBKPT)
678  {
679  if (debug_threads)
680  {
681  struct thread_info *thr = get_lwp_thread (lwp);
682 
683  debug_printf ("CSBB: %s stopped by hardware "
684  "breakpoint/watchpoint\n",
685  target_pid_to_str (ptid_of (thr)));
686  }
687 
688  lwp->stop_pc = pc;
689  lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
690  current_thread = saved_thread;
691  return 1;
692  }
693  else if (siginfo.si_code == TRAP_TRACE)
694  {
695  if (debug_threads)
696  {
697  struct thread_info *thr = get_lwp_thread (lwp);
698 
699  debug_printf ("CSBB: %s stopped by trace\n",
700  target_pid_to_str (ptid_of (thr)));
701  }
702 
703  lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
704  }
705  }
706  }
707 #else
708  /* We may have just stepped a breakpoint instruction. E.g., in
709  non-stop mode, GDB first tells the thread A to step a range, and
710  then the user inserts a breakpoint inside the range. In that
711  case we need to report the breakpoint PC. */
712  if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
713  && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
714  {
715  if (debug_threads)
716  {
717  struct thread_info *thr = get_lwp_thread (lwp);
718 
719  debug_printf ("CSBB: %s stopped by software breakpoint\n",
720  target_pid_to_str (ptid_of (thr)));
721  }
722 
723  /* Back up the PC if necessary. */
724  if (pc != sw_breakpoint_pc)
725  {
726  struct regcache *regcache
728  (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
729  }
730 
731  lwp->stop_pc = sw_breakpoint_pc;
732  lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
733  current_thread = saved_thread;
734  return 1;
735  }
736 
738  {
739  if (debug_threads)
740  {
741  struct thread_info *thr = get_lwp_thread (lwp);
742 
743  debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
744  target_pid_to_str (ptid_of (thr)));
745  }
746 
747  lwp->stop_pc = pc;
748  lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
749  current_thread = saved_thread;
750  return 1;
751  }
752 #endif
753 
754  current_thread = saved_thread;
755  return 0;
756 }
757 
758 static struct lwp_info *
760 {
761  struct lwp_info *lwp;
762 
763  lwp = (struct lwp_info *) xcalloc (1, sizeof (*lwp));
764 
765  lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
766 
767  if (the_low_target.new_thread != NULL)
769 
770  lwp->thread = add_thread (ptid, lwp);
771 
772  return lwp;
773 }
774 
775 /* Start an inferior process and returns its pid.
776  ALLARGS is a vector of program-name and args. */
777 
778 static int
779 linux_create_inferior (char *program, char **allargs)
780 {
781  struct lwp_info *new_lwp;
782  int pid;
783  ptid_t ptid;
784  struct cleanup *restore_personality
786 
787 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
788  pid = vfork ();
789 #else
790  pid = fork ();
791 #endif
792  if (pid < 0)
793  perror_with_name ("fork");
794 
795  if (pid == 0)
796  {
797  close_most_fds ();
798  ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
799 
800 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
801  signal (__SIGRTMIN + 1, SIG_DFL);
802 #endif
803 
804  setpgid (0, 0);
805 
806  /* If gdbserver is connected to gdb via stdio, redirect the inferior's
807  stdout to stderr so that inferior i/o doesn't corrupt the connection.
808  Also, redirect stdin to /dev/null. */
810  {
811  close (0);
812  open ("/dev/null", O_RDONLY);
813  dup2 (2, 1);
814  if (write (2, "stdin/stdout redirected\n",
815  sizeof ("stdin/stdout redirected\n") - 1) < 0)
816  {
817  /* Errors ignored. */;
818  }
819  }
820 
821  execv (program, allargs);
822  if (errno == ENOENT)
823  execvp (program, allargs);
824 
825  fprintf (stderr, "Cannot exec %s: %s.\n", program,
826  strerror (errno));
827  fflush (stderr);
828  _exit (0177);
829  }
830 
831  do_cleanups (restore_personality);
832 
833  linux_add_process (pid, 0);
834 
835  ptid = ptid_build (pid, pid, 0);
836  new_lwp = add_lwp (ptid);
837  new_lwp->must_set_ptrace_flags = 1;
838 
839  return pid;
840 }
841 
842 /* Attach to an inferior process. Returns 0 on success, ERRNO on
843  error. */
844 
845 int
847 {
848  struct lwp_info *new_lwp;
849  int lwpid = ptid_get_lwp (ptid);
850 
851  if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
852  != 0)
853  return errno;
854 
855  new_lwp = add_lwp (ptid);
856 
857  /* We need to wait for SIGSTOP before being able to make the next
858  ptrace call on this LWP. */
859  new_lwp->must_set_ptrace_flags = 1;
860 
861  if (linux_proc_pid_is_stopped (lwpid))
862  {
863  if (debug_threads)
864  debug_printf ("Attached to a stopped process\n");
865 
866  /* The process is definitely stopped. It is in a job control
867  stop, unless the kernel predates the TASK_STOPPED /
868  TASK_TRACED distinction, in which case it might be in a
869  ptrace stop. Make sure it is in a ptrace stop; from there we
870  can kill it, signal it, et cetera.
871 
872  First make sure there is a pending SIGSTOP. Since we are
873  already attached, the process can not transition from stopped
874  to running without a PTRACE_CONT; so we know this signal will
875  go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
876  probably already in the queue (unless this kernel is old
877  enough to use TASK_STOPPED for ptrace stops); but since
878  SIGSTOP is not an RT signal, it can only be queued once. */
879  kill_lwp (lwpid, SIGSTOP);
880 
881  /* Finally, resume the stopped process. This will deliver the
882  SIGSTOP (or a higher priority signal, just like normal
883  PTRACE_ATTACH), which we'll catch later on. */
884  ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
885  }
886 
887  /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
888  brings it to a halt.
889 
890  There are several cases to consider here:
891 
892  1) gdbserver has already attached to the process and is being notified
893  of a new thread that is being created.
894  In this case we should ignore that SIGSTOP and resume the
895  process. This is handled below by setting stop_expected = 1,
896  and the fact that add_thread sets last_resume_kind ==
897  resume_continue.
898 
899  2) This is the first thread (the process thread), and we're attaching
900  to it via attach_inferior.
901  In this case we want the process thread to stop.
902  This is handled by having linux_attach set last_resume_kind ==
903  resume_stop after we return.
904 
905  If the pid we are attaching to is also the tgid, we attach to and
906  stop all the existing threads. Otherwise, we attach to pid and
907  ignore any other threads in the same group as this pid.
908 
909  3) GDB is connecting to gdbserver and is requesting an enumeration of all
910  existing threads.
911  In this case we want the thread to stop.
912  FIXME: This case is currently not properly handled.
913  We should wait for the SIGSTOP but don't. Things work apparently
914  because enough time passes between when we ptrace (ATTACH) and when
915  gdb makes the next ptrace call on the thread.
916 
917  On the other hand, if we are currently trying to stop all threads, we
918  should treat the new thread as if we had sent it a SIGSTOP. This works
919  because we are guaranteed that the add_lwp call above added us to the
920  end of the list, and so the new thread has not yet reached
921  wait_for_sigstop (but will). */
922  new_lwp->stop_expected = 1;
923 
924  return 0;
925 }
926 
927 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
928  already attached. Returns true if a new LWP is found, false
929  otherwise. */
930 
931 static int
933 {
934  /* Is this a new thread? */
935  if (find_thread_ptid (ptid) == NULL)
936  {
937  int lwpid = ptid_get_lwp (ptid);
938  int err;
939 
940  if (debug_threads)
941  debug_printf ("Found new lwp %d\n", lwpid);
942 
943  err = linux_attach_lwp (ptid);
944 
945  /* Be quiet if we simply raced with the thread exiting. EPERM
946  is returned if the thread's task still exists, and is marked
947  as exited or zombie, as well as other conditions, so in that
948  case, confirm the status in /proc/PID/status. */
949  if (err == ESRCH
950  || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
951  {
952  if (debug_threads)
953  {
954  debug_printf ("Cannot attach to lwp %d: "
955  "thread is gone (%d: %s)\n",
956  lwpid, err, strerror (err));
957  }
958  }
959  else if (err != 0)
960  {
961  warning (_("Cannot attach to lwp %d: %s"),
962  lwpid,
964  }
965 
966  return 1;
967  }
968  return 0;
969 }
970 
971 /* Attach to PID. If PID is the tgid, attach to it and all
972  of its threads. */
973 
974 static int
975 linux_attach (unsigned long pid)
976 {
977  ptid_t ptid = ptid_build (pid, pid, 0);
978  int err;
979 
980  /* Attach to PID. We will check for other threads
981  soon. */
982  err = linux_attach_lwp (ptid);
983  if (err != 0)
984  error ("Cannot attach to process %ld: %s",
985  pid, linux_ptrace_attach_fail_reason_string (ptid, err));
986 
987  linux_add_process (pid, 1);
988 
989  if (!non_stop)
990  {
991  struct thread_info *thread;
992 
993  /* Don't ignore the initial SIGSTOP if we just attached to this
994  process. It will be collected by wait shortly. */
995  thread = find_thread_ptid (ptid_build (pid, pid, 0));
996  thread->last_resume_kind = resume_stop;
997  }
998 
999  /* We must attach to every LWP. If /proc is mounted, use that to
1000  find them now. On the one hand, the inferior may be using raw
1001  clone instead of using pthreads. On the other hand, even if it
1002  is using pthreads, GDB may not be connected yet (thread_db needs
1003  to do symbol lookups, through qSymbol). Also, thread_db walks
1004  structures in the inferior's address space to find the list of
1005  threads/LWPs, and those structures may well be corrupted. Note
1006  that once thread_db is loaded, we'll still use it to list threads
1007  and associate pthread info with each LWP. */
1009  return 0;
1010 }
1011 
1012 struct counter
1013 {
1014  int pid;
1015  int count;
1016 };
1017 
1018 static int
1019 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1020 {
1021  struct counter *counter = args;
1022 
1023  if (ptid_get_pid (entry->id) == counter->pid)
1024  {
1025  if (++counter->count > 1)
1026  return 1;
1027  }
1028 
1029  return 0;
1030 }
1031 
1032 static int
1034 {
1035  struct counter counter = { pid , 0 };
1036 
1037  return (find_inferior (&all_threads,
1038  second_thread_of_pid_p, &counter) == NULL);
1039 }
1040 
1041 /* Kill LWP. */
1042 
1043 static void
1045 {
1046  struct thread_info *thr = get_lwp_thread (lwp);
1047  int pid = lwpid_of (thr);
1048 
1049  /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1050  there is no signal context, and ptrace(PTRACE_KILL) (or
1051  ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1052  ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1053  alternative is to kill with SIGKILL. We only need one SIGKILL
1054  per process, not one for each thread. But since we still support
1055  linuxthreads, and we also support debugging programs using raw
1056  clone without CLONE_THREAD, we send one for each thread. For
1057  years, we used PTRACE_KILL only, so we're being a bit paranoid
1058  about some old kernels where PTRACE_KILL might work better
1059  (dubious if there are any such, but that's why it's paranoia), so
1060  we try SIGKILL first, PTRACE_KILL second, and so we're fine
1061  everywhere. */
1062 
1063  errno = 0;
1064  kill_lwp (pid, SIGKILL);
1065  if (debug_threads)
1066  {
1067  int save_errno = errno;
1068 
1069  debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1070  target_pid_to_str (ptid_of (thr)),
1071  save_errno ? strerror (save_errno) : "OK");
1072  }
1073 
1074  errno = 0;
1075  ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1076  if (debug_threads)
1077  {
1078  int save_errno = errno;
1079 
1080  debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1081  target_pid_to_str (ptid_of (thr)),
1082  save_errno ? strerror (save_errno) : "OK");
1083  }
1084 }
1085 
1086 /* Kill LWP and wait for it to die. */
1087 
1088 static void
1090 {
1091  struct thread_info *thr = get_lwp_thread (lwp);
1092  int pid = ptid_get_pid (ptid_of (thr));
1093  int lwpid = ptid_get_lwp (ptid_of (thr));
1094  int wstat;
1095  int res;
1096 
1097  if (debug_threads)
1098  debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1099 
1100  do
1101  {
1102  linux_kill_one_lwp (lwp);
1103 
1104  /* Make sure it died. Notes:
1105 
1106  - The loop is most likely unnecessary.
1107 
1108  - We don't use linux_wait_for_event as that could delete lwps
1109  while we're iterating over them. We're not interested in
1110  any pending status at this point, only in making sure all
1111  wait status on the kernel side are collected until the
1112  process is reaped.
1113 
1114  - We don't use __WALL here as the __WALL emulation relies on
1115  SIGCHLD, and killing a stopped process doesn't generate
1116  one, nor an exit status.
1117  */
1118  res = my_waitpid (lwpid, &wstat, 0);
1119  if (res == -1 && errno == ECHILD)
1120  res = my_waitpid (lwpid, &wstat, __WCLONE);
1121  } while (res > 0 && WIFSTOPPED (wstat));
1122 
1123  /* Even if it was stopped, the child may have already disappeared.
1124  E.g., if it was killed by SIGKILL. */
1125  if (res < 0 && errno != ECHILD)
1126  perror_with_name ("kill_wait_lwp");
1127 }
1128 
1129 /* Callback for `find_inferior'. Kills an lwp of a given process,
1130  except the leader. */
1131 
1132 static int
1134 {
1135  struct thread_info *thread = (struct thread_info *) entry;
1136  struct lwp_info *lwp = get_thread_lwp (thread);
1137  int pid = * (int *) args;
1138 
1139  if (ptid_get_pid (entry->id) != pid)
1140  return 0;
1141 
1142  /* We avoid killing the first thread here, because of a Linux kernel (at
1143  least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1144  the children get a chance to be reaped, it will remain a zombie
1145  forever. */
1146 
1147  if (lwpid_of (thread) == pid)
1148  {
1149  if (debug_threads)
1150  debug_printf ("lkop: is last of process %s\n",
1151  target_pid_to_str (entry->id));
1152  return 0;
1153  }
1154 
1155  kill_wait_lwp (lwp);
1156  return 0;
1157 }
1158 
1159 static int
1160 linux_kill (int pid)
1161 {
1162  struct process_info *process;
1163  struct lwp_info *lwp;
1164 
1165  process = find_process_pid (pid);
1166  if (process == NULL)
1167  return -1;
1168 
1169  /* If we're killing a running inferior, make sure it is stopped
1170  first, as PTRACE_KILL will not work otherwise. */
1171  stop_all_lwps (0, NULL);
1172 
1174 
1175  /* See the comment in linux_kill_one_lwp. We did not kill the first
1176  thread in the list, so do so now. */
1177  lwp = find_lwp_pid (pid_to_ptid (pid));
1178 
1179  if (lwp == NULL)
1180  {
1181  if (debug_threads)
1182  debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1183  pid);
1184  }
1185  else
1186  kill_wait_lwp (lwp);
1187 
1188  the_target->mourn (process);
1189 
1190  /* Since we presently can only stop all lwps of all processes, we
1191  need to unstop lwps of other processes. */
1192  unstop_all_lwps (0, NULL);
1193  return 0;
1194 }
1195 
1196 /* Get pending signal of THREAD, for detaching purposes. This is the
1197  signal the thread last stopped for, which we need to deliver to the
1198  thread when detaching, otherwise, it'd be suppressed/lost. */
1199 
1200 static int
1202 {
1203  enum gdb_signal signo = GDB_SIGNAL_0;
1204  int status;
1205  struct lwp_info *lp = get_thread_lwp (thread);
1206 
1207  if (lp->status_pending_p)
1208  status = lp->status_pending;
1209  else
1210  {
1211  /* If the thread had been suspended by gdbserver, and it stopped
1212  cleanly, then it'll have stopped with SIGSTOP. But we don't
1213  want to deliver that SIGSTOP. */
1214  if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1215  || thread->last_status.value.sig == GDB_SIGNAL_0)
1216  return 0;
1217 
1218  /* Otherwise, we may need to deliver the signal we
1219  intercepted. */
1220  status = lp->last_status;
1221  }
1222 
1223  if (!WIFSTOPPED (status))
1224  {
1225  if (debug_threads)
1226  debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1227  target_pid_to_str (ptid_of (thread)));
1228  return 0;
1229  }
1230 
1231  /* Extended wait statuses aren't real SIGTRAPs. */
1232  if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1233  {
1234  if (debug_threads)
1235  debug_printf ("GPS: lwp %s had stopped with extended "
1236  "status: no pending signal\n",
1237  target_pid_to_str (ptid_of (thread)));
1238  return 0;
1239  }
1240 
1241  signo = gdb_signal_from_host (WSTOPSIG (status));
1242 
1243  if (program_signals_p && !program_signals[signo])
1244  {
1245  if (debug_threads)
1246  debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1247  target_pid_to_str (ptid_of (thread)),
1248  gdb_signal_to_string (signo));
1249  return 0;
1250  }
1251  else if (!program_signals_p
1252  /* If we have no way to know which signals GDB does not
1253  want to have passed to the program, assume
1254  SIGTRAP/SIGINT, which is GDB's default. */
1255  && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1256  {
1257  if (debug_threads)
1258  debug_printf ("GPS: lwp %s had signal %s, "
1259  "but we don't know if we should pass it. "
1260  "Default to not.\n",
1261  target_pid_to_str (ptid_of (thread)),
1262  gdb_signal_to_string (signo));
1263  return 0;
1264  }
1265  else
1266  {
1267  if (debug_threads)
1268  debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1269  target_pid_to_str (ptid_of (thread)),
1270  gdb_signal_to_string (signo));
1271 
1272  return WSTOPSIG (status);
1273  }
1274 }
1275 
1276 static int
1277 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1278 {
1279  struct thread_info *thread = (struct thread_info *) entry;
1280  struct lwp_info *lwp = get_thread_lwp (thread);
1281  int pid = * (int *) args;
1282  int sig;
1283 
1284  if (ptid_get_pid (entry->id) != pid)
1285  return 0;
1286 
1287  /* If there is a pending SIGSTOP, get rid of it. */
1288  if (lwp->stop_expected)
1289  {
1290  if (debug_threads)
1291  debug_printf ("Sending SIGCONT to %s\n",
1292  target_pid_to_str (ptid_of (thread)));
1293 
1294  kill_lwp (lwpid_of (thread), SIGCONT);
1295  lwp->stop_expected = 0;
1296  }
1297 
1298  /* Flush any pending changes to the process's registers. */
1299  regcache_invalidate_thread (thread);
1300 
1301  /* Pass on any pending signal for this thread. */
1302  sig = get_detach_signal (thread);
1303 
1304  /* Finally, let it resume. */
1305  if (the_low_target.prepare_to_resume != NULL)
1307  if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1308  (PTRACE_TYPE_ARG4) (long) sig) < 0)
1309  error (_("Can't detach %s: %s"),
1310  target_pid_to_str (ptid_of (thread)),
1311  strerror (errno));
1312 
1313  delete_lwp (lwp);
1314  return 0;
1315 }
1316 
1317 static int
1318 linux_detach (int pid)
1319 {
1320  struct process_info *process;
1321 
1322  process = find_process_pid (pid);
1323  if (process == NULL)
1324  return -1;
1325 
1326  /* As there's a step over already in progress, let it finish first,
1327  otherwise nesting a stabilize_threads operation on top gets real
1328  messy. */
1330 
1331  /* Stop all threads before detaching. First, ptrace requires that
1332  the thread is stopped to sucessfully detach. Second, thread_db
1333  may need to uninstall thread event breakpoints from memory, which
1334  only works with a stopped process anyway. */
1335  stop_all_lwps (0, NULL);
1336 
1337 #ifdef USE_THREAD_DB
1338  thread_db_detach (process);
1339 #endif
1340 
1341  /* Stabilize threads (move out of jump pads). */
1342  stabilize_threads ();
1343 
1345 
1346  the_target->mourn (process);
1347 
1348  /* Since we presently can only stop all lwps of all processes, we
1349  need to unstop lwps of other processes. */
1350  unstop_all_lwps (0, NULL);
1351  return 0;
1352 }
1353 
1354 /* Remove all LWPs that belong to process PROC from the lwp list. */
1355 
1356 static int
1358 {
1359  struct thread_info *thread = (struct thread_info *) entry;
1360  struct lwp_info *lwp = get_thread_lwp (thread);
1361  struct process_info *process = proc;
1362 
1363  if (pid_of (thread) == pid_of (process))
1364  delete_lwp (lwp);
1365 
1366  return 0;
1367 }
1368 
1369 static void
1370 linux_mourn (struct process_info *process)
1371 {
1372  struct process_info_private *priv;
1373 
1374 #ifdef USE_THREAD_DB
1375  thread_db_mourn (process);
1376 #endif
1377 
1379 
1380  /* Freeing all private data. */
1381  priv = process->priv;
1382  free (priv->arch_private);
1383  free (priv);
1384  process->priv = NULL;
1385 
1386  remove_process (process);
1387 }
1388 
1389 static void
1390 linux_join (int pid)
1391 {
1392  int status, ret;
1393 
1394  do {
1395  ret = my_waitpid (pid, &status, 0);
1396  if (WIFEXITED (status) || WIFSIGNALED (status))
1397  break;
1398  } while (ret != -1 || errno != ECHILD);
1399 }
1400 
1401 /* Return nonzero if the given thread is still alive. */
1402 static int
1404 {
1405  struct lwp_info *lwp = find_lwp_pid (ptid);
1406 
1407  /* We assume we always know if a thread exits. If a whole process
1408  exited but we still haven't been able to report it to GDB, we'll
1409  hold on to the last lwp of the dead process. */
1410  if (lwp != NULL)
1411  return !lwp_is_marked_dead (lwp);
1412  else
1413  return 0;
1414 }
1415 
1416 /* Return 1 if this lwp still has an interesting status pending. If
1417  not (e.g., it had stopped for a breakpoint that is gone), return
1418  false. */
1419 
1420 static int
1422 {
1423  struct lwp_info *lp = get_thread_lwp (thread);
1424 
1425  if (!lp->status_pending_p)
1426  return 0;
1427 
1428  /* If we got a `vCont;t', but we haven't reported a stop yet, do
1429  report any status pending the LWP may have. */
1430  if (thread->last_resume_kind == resume_stop
1431  && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1432  return 0;
1433 
1434  if (thread->last_resume_kind != resume_stop
1435  && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1436  || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1437  {
1438  struct thread_info *saved_thread;
1439  CORE_ADDR pc;
1440  int discard = 0;
1441 
1442  gdb_assert (lp->last_status != 0);
1443 
1444  pc = get_pc (lp);
1445 
1446  saved_thread = current_thread;
1447  current_thread = thread;
1448 
1449  if (pc != lp->stop_pc)
1450  {
1451  if (debug_threads)
1452  debug_printf ("PC of %ld changed\n",
1453  lwpid_of (thread));
1454  discard = 1;
1455  }
1456 
1457 #if !USE_SIGTRAP_SIGINFO
1458  else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1459  && !(*the_low_target.breakpoint_at) (pc))
1460  {
1461  if (debug_threads)
1462  debug_printf ("previous SW breakpoint of %ld gone\n",
1463  lwpid_of (thread));
1464  discard = 1;
1465  }
1466  else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1468  {
1469  if (debug_threads)
1470  debug_printf ("previous HW breakpoint of %ld gone\n",
1471  lwpid_of (thread));
1472  discard = 1;
1473  }
1474 #endif
1475 
1476  current_thread = saved_thread;
1477 
1478  if (discard)
1479  {
1480  if (debug_threads)
1481  debug_printf ("discarding pending breakpoint status\n");
1482  lp->status_pending_p = 0;
1483  return 0;
1484  }
1485  }
1486 
1487  return 1;
1488 }
1489 
1490 /* Return 1 if this lwp has an interesting status pending. */
1491 static int
1493 {
1494  struct thread_info *thread = (struct thread_info *) entry;
1495  struct lwp_info *lp = get_thread_lwp (thread);
1496  ptid_t ptid = * (ptid_t *) arg;
1497 
1498  /* Check if we're only interested in events from a specific process
1499  or a specific LWP. */
1500  if (!ptid_match (ptid_of (thread), ptid))
1501  return 0;
1502 
1503  if (lp->status_pending_p
1504  && !thread_still_has_status_pending_p (thread))
1505  {
1506  linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1507  return 0;
1508  }
1509 
1510  return lp->status_pending_p;
1511 }
1512 
1513 static int
1514 same_lwp (struct inferior_list_entry *entry, void *data)
1515 {
1516  ptid_t ptid = *(ptid_t *) data;
1517  int lwp;
1518 
1519  if (ptid_get_lwp (ptid) != 0)
1520  lwp = ptid_get_lwp (ptid);
1521  else
1522  lwp = ptid_get_pid (ptid);
1523 
1524  if (ptid_get_lwp (entry->id) == lwp)
1525  return 1;
1526 
1527  return 0;
1528 }
1529 
1530 struct lwp_info *
1532 {
1533  struct inferior_list_entry *thread
1534  = find_inferior (&all_threads, same_lwp, &ptid);
1535 
1536  if (thread == NULL)
1537  return NULL;
1538 
1539  return get_thread_lwp ((struct thread_info *) thread);
1540 }
1541 
1542 /* Return the number of known LWPs in the tgid given by PID. */
1543 
1544 static int
1545 num_lwps (int pid)
1546 {
1547  struct inferior_list_entry *inf, *tmp;
1548  int count = 0;
1549 
1550  ALL_INFERIORS (&all_threads, inf, tmp)
1551  {
1552  if (ptid_get_pid (inf->id) == pid)
1553  count++;
1554  }
1555 
1556  return count;
1557 }
1558 
1559 /* The arguments passed to iterate_over_lwps. */
1560 
1562 {
1563  /* The FILTER argument passed to iterate_over_lwps. */
1565 
1566  /* The CALLBACK argument passed to iterate_over_lwps. */
1568 
1569  /* The DATA argument passed to iterate_over_lwps. */
1570  void *data;
1571 };
1572 
1573 /* Callback for find_inferior used by iterate_over_lwps to filter
1574  calls to the callback supplied to that function. Returning a
1575  nonzero value causes find_inferiors to stop iterating and return
1576  the current inferior_list_entry. Returning zero indicates that
1577  find_inferiors should continue iterating. */
1578 
1579 static int
1580 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1581 {
1582  struct iterate_over_lwps_args *args
1583  = (struct iterate_over_lwps_args *) args_p;
1584 
1585  if (ptid_match (entry->id, args->filter))
1586  {
1587  struct thread_info *thr = (struct thread_info *) entry;
1588  struct lwp_info *lwp = get_thread_lwp (thr);
1589 
1590  return (*args->callback) (lwp, args->data);
1591  }
1592 
1593  return 0;
1594 }
1595 
1596 /* See nat/linux-nat.h. */
1597 
1598 struct lwp_info *
1600  iterate_over_lwps_ftype callback,
1601  void *data)
1602 {
1603  struct iterate_over_lwps_args args = {filter, callback, data};
1604  struct inferior_list_entry *entry;
1605 
1607  if (entry == NULL)
1608  return NULL;
1609 
1610  return get_thread_lwp ((struct thread_info *) entry);
1611 }
1612 
1613 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1614  their exits until all other threads in the group have exited. */
1615 
1616 static void
1618 {
1619  struct process_info *proc, *tmp;
1620 
1621  ALL_PROCESSES (proc, tmp)
1622  {
1623  pid_t leader_pid = pid_of (proc);
1624  struct lwp_info *leader_lp;
1625 
1626  leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1627 
1628  if (debug_threads)
1629  debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1630  "num_lwps=%d, zombie=%d\n",
1631  leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1632  linux_proc_pid_is_zombie (leader_pid));
1633 
1634  if (leader_lp != NULL
1635  /* Check if there are other threads in the group, as we may
1636  have raced with the inferior simply exiting. */
1637  && !last_thread_of_process_p (leader_pid)
1638  && linux_proc_pid_is_zombie (leader_pid))
1639  {
1640  /* A leader zombie can mean one of two things:
1641 
1642  - It exited, and there's an exit status pending
1643  available, or only the leader exited (not the whole
1644  program). In the latter case, we can't waitpid the
1645  leader's exit status until all other threads are gone.
1646 
1647  - There are 3 or more threads in the group, and a thread
1648  other than the leader exec'd. On an exec, the Linux
1649  kernel destroys all other threads (except the execing
1650  one) in the thread group, and resets the execing thread's
1651  tid to the tgid. No exit notification is sent for the
1652  execing thread -- from the ptracer's perspective, it
1653  appears as though the execing thread just vanishes.
1654  Until we reap all other threads except the leader and the
1655  execing thread, the leader will be zombie, and the
1656  execing thread will be in `D (disc sleep)'. As soon as
1657  all other threads are reaped, the execing thread changes
1658  it's tid to the tgid, and the previous (zombie) leader
1659  vanishes, giving place to the "new" leader. We could try
1660  distinguishing the exit and exec cases, by waiting once
1661  more, and seeing if something comes out, but it doesn't
1662  sound useful. The previous leader _does_ go away, and
1663  we'll re-add the new one once we see the exec event
1664  (which is just the same as what would happen if the
1665  previous leader did exit voluntarily before some other
1666  thread execs). */
1667 
1668  if (debug_threads)
1669  fprintf (stderr,
1670  "CZL: Thread group leader %d zombie "
1671  "(it exited, or another thread execd).\n",
1672  leader_pid);
1673 
1674  delete_lwp (leader_lp);
1675  }
1676  }
1677 }
1678 
1679 /* Callback for `find_inferior'. Returns the first LWP that is not
1680  stopped. ARG is a PTID filter. */
1681 
1682 static int
1683 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1684 {
1685  struct thread_info *thr = (struct thread_info *) entry;
1686  struct lwp_info *lwp;
1687  ptid_t filter = *(ptid_t *) arg;
1688 
1689  if (!ptid_match (ptid_of (thr), filter))
1690  return 0;
1691 
1692  lwp = get_thread_lwp (thr);
1693  if (!lwp->stopped)
1694  return 1;
1695 
1696  return 0;
1697 }
1698 
1699 /* Increment LWP's suspend count. */
1700 
1701 static void
1703 {
1704  lwp->suspended++;
1705 
1706  if (debug_threads && lwp->suspended > 4)
1707  {
1708  struct thread_info *thread = get_lwp_thread (lwp);
1709 
1710  debug_printf ("LWP %ld has a suspiciously high suspend count,"
1711  " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1712  }
1713 }
1714 
1715 /* Decrement LWP's suspend count. */
1716 
1717 static void
1719 {
1720  lwp->suspended--;
1721 
1722  if (lwp->suspended < 0)
1723  {
1724  struct thread_info *thread = get_lwp_thread (lwp);
1725 
1726  internal_error (__FILE__, __LINE__,
1727  "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1728  lwp->suspended);
1729  }
1730 }
1731 
1732 /* This function should only be called if the LWP got a SIGTRAP.
1733 
1734  Handle any tracepoint steps or hits. Return true if a tracepoint
1735  event was handled, 0 otherwise. */
1736 
1737 static int
1739 {
1740  struct thread_info *tinfo = get_lwp_thread (lwp);
1741  int tpoint_related_event = 0;
1742 
1743  gdb_assert (lwp->suspended == 0);
1744 
1745  /* If this tracepoint hit causes a tracing stop, we'll immediately
1746  uninsert tracepoints. To do this, we temporarily pause all
1747  threads, unpatch away, and then unpause threads. We need to make
1748  sure the unpausing doesn't resume LWP too. */
1749  lwp_suspended_inc (lwp);
1750 
1751  /* And we need to be sure that any all-threads-stopping doesn't try
1752  to move threads out of the jump pads, as it could deadlock the
1753  inferior (LWP could be in the jump pad, maybe even holding the
1754  lock.) */
1755 
1756  /* Do any necessary step collect actions. */
1757  tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1758 
1759  tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1760 
1761  /* See if we just hit a tracepoint and do its main collect
1762  actions. */
1763  tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1764 
1765  lwp_suspended_decr (lwp);
1766 
1767  gdb_assert (lwp->suspended == 0);
1768  gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1769 
1770  if (tpoint_related_event)
1771  {
1772  if (debug_threads)
1773  debug_printf ("got a tracepoint event\n");
1774  return 1;
1775  }
1776 
1777  return 0;
1778 }
1779 
1780 /* Convenience wrapper. Returns true if LWP is presently collecting a
1781  fast tracepoint. */
1782 
1783 static int
1785  struct fast_tpoint_collect_status *status)
1786 {
1787  CORE_ADDR thread_area;
1788  struct thread_info *thread = get_lwp_thread (lwp);
1789 
1790  if (the_low_target.get_thread_area == NULL)
1791  return 0;
1792 
1793  /* Get the thread area address. This is used to recognize which
1794  thread is which when tracing with the in-process agent library.
1795  We don't read anything from the address, and treat it as opaque;
1796  it's the address itself that we assume is unique per-thread. */
1797  if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1798  return 0;
1799 
1800  return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1801 }
1802 
1803 /* The reason we resume in the caller, is because we want to be able
1804  to pass lwp->status_pending as WSTAT, and we need to clear
1805  status_pending_p before resuming, otherwise, linux_resume_one_lwp
1806  refuses to resume. */
1807 
1808 static int
1809 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1810 {
1811  struct thread_info *saved_thread;
1812 
1813  saved_thread = current_thread;
1815 
1816  if ((wstat == NULL
1817  || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1819  && agent_loaded_p ())
1820  {
1821  struct fast_tpoint_collect_status status;
1822  int r;
1823 
1824  if (debug_threads)
1825  debug_printf ("Checking whether LWP %ld needs to move out of the "
1826  "jump pad.\n",
1828 
1829  r = linux_fast_tracepoint_collecting (lwp, &status);
1830 
1831  if (wstat == NULL
1832  || (WSTOPSIG (*wstat) != SIGILL
1833  && WSTOPSIG (*wstat) != SIGFPE
1834  && WSTOPSIG (*wstat) != SIGSEGV
1835  && WSTOPSIG (*wstat) != SIGBUS))
1836  {
1837  lwp->collecting_fast_tracepoint = r;
1838 
1839  if (r != 0)
1840  {
1841  if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1842  {
1843  /* Haven't executed the original instruction yet.
1844  Set breakpoint there, and wait till it's hit,
1845  then single-step until exiting the jump pad. */
1846  lwp->exit_jump_pad_bkpt
1847  = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1848  }
1849 
1850  if (debug_threads)
1851  debug_printf ("Checking whether LWP %ld needs to move out of "
1852  "the jump pad...it does\n",
1854  current_thread = saved_thread;
1855 
1856  return 1;
1857  }
1858  }
1859  else
1860  {
1861  /* If we get a synchronous signal while collecting, *and*
1862  while executing the (relocated) original instruction,
1863  reset the PC to point at the tpoint address, before
1864  reporting to GDB. Otherwise, it's an IPA lib bug: just
1865  report the signal to GDB, and pray for the best. */
1866 
1867  lwp->collecting_fast_tracepoint = 0;
1868 
1869  if (r != 0
1870  && (status.adjusted_insn_addr <= lwp->stop_pc
1871  && lwp->stop_pc < status.adjusted_insn_addr_end))
1872  {
1873  siginfo_t info;
1874  struct regcache *regcache;
1875 
1876  /* The si_addr on a few signals references the address
1877  of the faulting instruction. Adjust that as
1878  well. */
1879  if ((WSTOPSIG (*wstat) == SIGILL
1880  || WSTOPSIG (*wstat) == SIGFPE
1881  || WSTOPSIG (*wstat) == SIGBUS
1882  || WSTOPSIG (*wstat) == SIGSEGV)
1884  (PTRACE_TYPE_ARG3) 0, &info) == 0
1885  /* Final check just to make sure we don't clobber
1886  the siginfo of non-kernel-sent signals. */
1887  && (uintptr_t) info.si_addr == lwp->stop_pc)
1888  {
1889  info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1891  (PTRACE_TYPE_ARG3) 0, &info);
1892  }
1893 
1894  regcache = get_thread_regcache (current_thread, 1);
1895  (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1896  lwp->stop_pc = status.tpoint_addr;
1897 
1898  /* Cancel any fast tracepoint lock this thread was
1899  holding. */
1901  }
1902 
1903  if (lwp->exit_jump_pad_bkpt != NULL)
1904  {
1905  if (debug_threads)
1906  debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1907  "stopping all threads momentarily.\n");
1908 
1909  stop_all_lwps (1, lwp);
1910 
1912  lwp->exit_jump_pad_bkpt = NULL;
1913 
1914  unstop_all_lwps (1, lwp);
1915 
1916  gdb_assert (lwp->suspended >= 0);
1917  }
1918  }
1919  }
1920 
1921  if (debug_threads)
1922  debug_printf ("Checking whether LWP %ld needs to move out of the "
1923  "jump pad...no\n",
1925 
1926  current_thread = saved_thread;
1927  return 0;
1928 }
1929 
1930 /* Enqueue one signal in the "signals to report later when out of the
1931  jump pad" list. */
1932 
1933 static void
1934 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1935 {
1936  struct pending_signals *p_sig;
1937  struct thread_info *thread = get_lwp_thread (lwp);
1938 
1939  if (debug_threads)
1940  debug_printf ("Deferring signal %d for LWP %ld.\n",
1941  WSTOPSIG (*wstat), lwpid_of (thread));
1942 
1943  if (debug_threads)
1944  {
1945  struct pending_signals *sig;
1946 
1947  for (sig = lwp->pending_signals_to_report;
1948  sig != NULL;
1949  sig = sig->prev)
1950  debug_printf (" Already queued %d\n",
1951  sig->signal);
1952 
1953  debug_printf (" (no more currently queued signals)\n");
1954  }
1955 
1956  /* Don't enqueue non-RT signals if they are already in the deferred
1957  queue. (SIGSTOP being the easiest signal to see ending up here
1958  twice) */
1959  if (WSTOPSIG (*wstat) < __SIGRTMIN)
1960  {
1961  struct pending_signals *sig;
1962 
1963  for (sig = lwp->pending_signals_to_report;
1964  sig != NULL;
1965  sig = sig->prev)
1966  {
1967  if (sig->signal == WSTOPSIG (*wstat))
1968  {
1969  if (debug_threads)
1970  debug_printf ("Not requeuing already queued non-RT signal %d"
1971  " for LWP %ld\n",
1972  sig->signal,
1973  lwpid_of (thread));
1974  return;
1975  }
1976  }
1977  }
1978 
1979  p_sig = xmalloc (sizeof (*p_sig));
1980  p_sig->prev = lwp->pending_signals_to_report;
1981  p_sig->signal = WSTOPSIG (*wstat);
1982  memset (&p_sig->info, 0, sizeof (siginfo_t));
1983  ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1984  &p_sig->info);
1985 
1986  lwp->pending_signals_to_report = p_sig;
1987 }
1988 
1989 /* Dequeue one signal from the "signals to report later when out of
1990  the jump pad" list. */
1991 
1992 static int
1993 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1994 {
1995  struct thread_info *thread = get_lwp_thread (lwp);
1996 
1997  if (lwp->pending_signals_to_report != NULL)
1998  {
1999  struct pending_signals **p_sig;
2000 
2001  p_sig = &lwp->pending_signals_to_report;
2002  while ((*p_sig)->prev != NULL)
2003  p_sig = &(*p_sig)->prev;
2004 
2005  *wstat = W_STOPCODE ((*p_sig)->signal);
2006  if ((*p_sig)->info.si_signo != 0)
2007  ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2008  &(*p_sig)->info);
2009  free (*p_sig);
2010  *p_sig = NULL;
2011 
2012  if (debug_threads)
2013  debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2014  WSTOPSIG (*wstat), lwpid_of (thread));
2015 
2016  if (debug_threads)
2017  {
2018  struct pending_signals *sig;
2019 
2020  for (sig = lwp->pending_signals_to_report;
2021  sig != NULL;
2022  sig = sig->prev)
2023  debug_printf (" Still queued %d\n",
2024  sig->signal);
2025 
2026  debug_printf (" (no more queued signals)\n");
2027  }
2028 
2029  return 1;
2030  }
2031 
2032  return 0;
2033 }
2034 
2035 /* Fetch the possibly triggered data watchpoint info and store it in
2036  CHILD.
2037 
2038  On some archs, like x86, that use debug registers to set
2039  watchpoints, it's possible that the way to know which watched
2040  address trapped, is to check the register that is used to select
2041  which address to watch. Problem is, between setting the watchpoint
2042  and reading back which data address trapped, the user may change
2043  the set of watchpoints, and, as a consequence, GDB changes the
2044  debug registers in the inferior. To avoid reading back a stale
2045  stopped-data-address when that happens, we cache in LP the fact
2046  that a watchpoint trapped, and the corresponding data address, as
2047  soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2048  registers meanwhile, we have the cached data we can rely on. */
2049 
2050 static int
2052 {
2054  {
2055  struct thread_info *saved_thread;
2056 
2057  saved_thread = current_thread;
2058  current_thread = get_lwp_thread (child);
2059 
2061  {
2062  child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2063 
2065  child->stopped_data_address
2067  else
2068  child->stopped_data_address = 0;
2069  }
2070 
2071  current_thread = saved_thread;
2072  }
2073 
2074  return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2075 }
2076 
2077 /* Return the ptrace options that we want to try to enable. */
2078 
2079 static int
2081 {
2082  int options = 0;
2083 
2084  if (!attached)
2085  options |= PTRACE_O_EXITKILL;
2086 
2087  if (report_fork_events)
2088  options |= PTRACE_O_TRACEFORK;
2089 
2090  if (report_vfork_events)
2092 
2093  return options;
2094 }
2095 
2096 /* Do low-level handling of the event, and check if we should go on
2097  and pass it to caller code. Return the affected lwp if we are, or
2098  NULL otherwise. */
2099 
2100 static struct lwp_info *
2101 linux_low_filter_event (int lwpid, int wstat)
2102 {
2103  struct lwp_info *child;
2104  struct thread_info *thread;
2105  int have_stop_pc = 0;
2106 
2107  child = find_lwp_pid (pid_to_ptid (lwpid));
2108 
2109  /* If we didn't find a process, one of two things presumably happened:
2110  - A process we started and then detached from has exited. Ignore it.
2111  - A process we are controlling has forked and the new child's stop
2112  was reported to us by the kernel. Save its PID. */
2113  if (child == NULL && WIFSTOPPED (wstat))
2114  {
2115  add_to_pid_list (&stopped_pids, lwpid, wstat);
2116  return NULL;
2117  }
2118  else if (child == NULL)
2119  return NULL;
2120 
2121  thread = get_lwp_thread (child);
2122 
2123  child->stopped = 1;
2124 
2125  child->last_status = wstat;
2126 
2127  /* Check if the thread has exited. */
2128  if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2129  {
2130  if (debug_threads)
2131  debug_printf ("LLFE: %d exited.\n", lwpid);
2132  if (num_lwps (pid_of (thread)) > 1)
2133  {
2134 
2135  /* If there is at least one more LWP, then the exit signal was
2136  not the end of the debugged application and should be
2137  ignored. */
2138  delete_lwp (child);
2139  return NULL;
2140  }
2141  else
2142  {
2143  /* This was the last lwp in the process. Since events are
2144  serialized to GDB core, and we can't report this one
2145  right now, but GDB core and the other target layers will
2146  want to be notified about the exit code/signal, leave the
2147  status pending for the next time we're able to report
2148  it. */
2149  mark_lwp_dead (child, wstat);
2150  return child;
2151  }
2152  }
2153 
2154  gdb_assert (WIFSTOPPED (wstat));
2155 
2156  if (WIFSTOPPED (wstat))
2157  {
2158  struct process_info *proc;
2159 
2160  /* Architecture-specific setup after inferior is running. This
2161  needs to happen after we have attached to the inferior and it
2162  is stopped for the first time, but before we access any
2163  inferior registers. */
2164  proc = find_process_pid (pid_of (thread));
2165  if (proc->priv->new_inferior)
2166  {
2167  struct thread_info *saved_thread;
2168 
2169  saved_thread = current_thread;
2170  current_thread = thread;
2171 
2173 
2174  current_thread = saved_thread;
2175 
2176  proc->priv->new_inferior = 0;
2177  }
2178  }
2179 
2180  if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2181  {
2182  struct process_info *proc = find_process_pid (pid_of (thread));
2183  int options = linux_low_ptrace_options (proc->attached);
2184 
2185  linux_enable_event_reporting (lwpid, options);
2186  child->must_set_ptrace_flags = 0;
2187  }
2188 
2189  /* Be careful to not overwrite stop_pc until
2190  check_stopped_by_breakpoint is called. */
2191  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2192  && linux_is_extended_waitstatus (wstat))
2193  {
2194  child->stop_pc = get_pc (child);
2195  if (handle_extended_wait (child, wstat))
2196  {
2197  /* The event has been handled, so just return without
2198  reporting it. */
2199  return NULL;
2200  }
2201  }
2202 
2203  /* Check first whether this was a SW/HW breakpoint before checking
2204  watchpoints, because at least s390 can't tell the data address of
2205  hardware watchpoint hits, and returns stopped-by-watchpoint as
2206  long as there's a watchpoint set. */
2207  if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2208  {
2209  if (check_stopped_by_breakpoint (child))
2210  have_stop_pc = 1;
2211  }
2212 
2213  /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2214  or hardware watchpoint. Check which is which if we got
2215  TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2216  stepped an instruction that triggered a watchpoint. In that
2217  case, on some architectures (such as x86), instead of
2218  TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2219  the debug registers separately. */
2220  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2221  && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2223 
2224  if (!have_stop_pc)
2225  child->stop_pc = get_pc (child);
2226 
2227  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2228  && child->stop_expected)
2229  {
2230  if (debug_threads)
2231  debug_printf ("Expected stop.\n");
2232  child->stop_expected = 0;
2233 
2234  if (thread->last_resume_kind == resume_stop)
2235  {
2236  /* We want to report the stop to the core. Treat the
2237  SIGSTOP as a normal event. */
2238  if (debug_threads)
2239  debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2240  target_pid_to_str (ptid_of (thread)));
2241  }
2242  else if (stopping_threads != NOT_STOPPING_THREADS)
2243  {
2244  /* Stopping threads. We don't want this SIGSTOP to end up
2245  pending. */
2246  if (debug_threads)
2247  debug_printf ("LLW: SIGSTOP caught for %s "
2248  "while stopping threads.\n",
2249  target_pid_to_str (ptid_of (thread)));
2250  return NULL;
2251  }
2252  else
2253  {
2254  /* This is a delayed SIGSTOP. Filter out the event. */
2255  if (debug_threads)
2256  debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2257  child->stepping ? "step" : "continue",
2258  target_pid_to_str (ptid_of (thread)));
2259 
2260  linux_resume_one_lwp (child, child->stepping, 0, NULL);
2261  return NULL;
2262  }
2263  }
2264 
2265  child->status_pending_p = 1;
2266  child->status_pending = wstat;
2267  return child;
2268 }
2269 
2270 /* Resume LWPs that are currently stopped without any pending status
2271  to report, but are resumed from the core's perspective. */
2272 
2273 static void
2275 {
2276  struct thread_info *thread = (struct thread_info *) entry;
2277  struct lwp_info *lp = get_thread_lwp (thread);
2278 
2279  if (lp->stopped
2280  && !lp->suspended
2281  && !lp->status_pending_p
2282  && thread->last_resume_kind != resume_stop
2283  && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2284  {
2285  int step = thread->last_resume_kind == resume_step;
2286 
2287  if (debug_threads)
2288  debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2289  target_pid_to_str (ptid_of (thread)),
2290  paddress (lp->stop_pc),
2291  step);
2292 
2293  linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2294  }
2295 }
2296 
2297 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2298  match FILTER_PTID (leaving others pending). The PTIDs can be:
2299  minus_one_ptid, to specify any child; a pid PTID, specifying all
2300  lwps of a thread group; or a PTID representing a single lwp. Store
2301  the stop status through the status pointer WSTAT. OPTIONS is
2302  passed to the waitpid call. Return 0 if no event was found and
2303  OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2304  was found. Return the PID of the stopped child otherwise. */
2305 
2306 static int
2308  int *wstatp, int options)
2309 {
2310  struct thread_info *event_thread;
2311  struct lwp_info *event_child, *requested_child;
2312  sigset_t block_mask, prev_mask;
2313 
2314  retry:
2315  /* N.B. event_thread points to the thread_info struct that contains
2316  event_child. Keep them in sync. */
2317  event_thread = NULL;
2318  event_child = NULL;
2319  requested_child = NULL;
2320 
2321  /* Check for a lwp with a pending status. */
2322 
2323  if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2324  {
2325  event_thread = (struct thread_info *)
2327  if (event_thread != NULL)
2328  event_child = get_thread_lwp (event_thread);
2329  if (debug_threads && event_thread)
2330  debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2331  }
2332  else if (!ptid_equal (filter_ptid, null_ptid))
2333  {
2334  requested_child = find_lwp_pid (filter_ptid);
2335 
2336  if (stopping_threads == NOT_STOPPING_THREADS
2337  && requested_child->status_pending_p
2338  && requested_child->collecting_fast_tracepoint)
2339  {
2340  enqueue_one_deferred_signal (requested_child,
2341  &requested_child->status_pending);
2342  requested_child->status_pending_p = 0;
2343  requested_child->status_pending = 0;
2344  linux_resume_one_lwp (requested_child, 0, 0, NULL);
2345  }
2346 
2347  if (requested_child->suspended
2348  && requested_child->status_pending_p)
2349  {
2350  internal_error (__FILE__, __LINE__,
2351  "requesting an event out of a"
2352  " suspended child?");
2353  }
2354 
2355  if (requested_child->status_pending_p)
2356  {
2357  event_child = requested_child;
2358  event_thread = get_lwp_thread (event_child);
2359  }
2360  }
2361 
2362  if (event_child != NULL)
2363  {
2364  if (debug_threads)
2365  debug_printf ("Got an event from pending child %ld (%04x)\n",
2366  lwpid_of (event_thread), event_child->status_pending);
2367  *wstatp = event_child->status_pending;
2368  event_child->status_pending_p = 0;
2369  event_child->status_pending = 0;
2370  current_thread = event_thread;
2371  return lwpid_of (event_thread);
2372  }
2373 
2374  /* But if we don't find a pending event, we'll have to wait.
2375 
2376  We only enter this loop if no process has a pending wait status.
2377  Thus any action taken in response to a wait status inside this
2378  loop is responding as soon as we detect the status, not after any
2379  pending events. */
2380 
2381  /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2382  all signals while here. */
2383  sigfillset (&block_mask);
2384  sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2385 
2386  /* Always pull all events out of the kernel. We'll randomly select
2387  an event LWP out of all that have events, to prevent
2388  starvation. */
2389  while (event_child == NULL)
2390  {
2391  pid_t ret = 0;
2392 
2393  /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2394  quirks:
2395 
2396  - If the thread group leader exits while other threads in the
2397  thread group still exist, waitpid(TGID, ...) hangs. That
2398  waitpid won't return an exit status until the other threads
2399  in the group are reaped.
2400 
2401  - When a non-leader thread execs, that thread just vanishes
2402  without reporting an exit (so we'd hang if we waited for it
2403  explicitly in that case). The exec event is reported to
2404  the TGID pid (although we don't currently enable exec
2405  events). */
2406  errno = 0;
2407  ret = my_waitpid (-1, wstatp, options | WNOHANG);
2408 
2409  if (debug_threads)
2410  debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2411  ret, errno ? strerror (errno) : "ERRNO-OK");
2412 
2413  if (ret > 0)
2414  {
2415  if (debug_threads)
2416  {
2417  debug_printf ("LLW: waitpid %ld received %s\n",
2418  (long) ret, status_to_str (*wstatp));
2419  }
2420 
2421  /* Filter all events. IOW, leave all events pending. We'll
2422  randomly select an event LWP out of all that have events
2423  below. */
2424  linux_low_filter_event (ret, *wstatp);
2425  /* Retry until nothing comes out of waitpid. A single
2426  SIGCHLD can indicate more than one child stopped. */
2427  continue;
2428  }
2429 
2430  /* Now that we've pulled all events out of the kernel, resume
2431  LWPs that don't have an interesting event to report. */
2432  if (stopping_threads == NOT_STOPPING_THREADS)
2434 
2435  /* ... and find an LWP with a status to report to the core, if
2436  any. */
2437  event_thread = (struct thread_info *)
2439  if (event_thread != NULL)
2440  {
2441  event_child = get_thread_lwp (event_thread);
2442  *wstatp = event_child->status_pending;
2443  event_child->status_pending_p = 0;
2444  event_child->status_pending = 0;
2445  break;
2446  }
2447 
2448  /* Check for zombie thread group leaders. Those can't be reaped
2449  until all other threads in the thread group are. */
2451 
2452  /* If there are no resumed children left in the set of LWPs we
2453  want to wait for, bail. We can't just block in
2454  waitpid/sigsuspend, because lwps might have been left stopped
2455  in trace-stop state, and we'd be stuck forever waiting for
2456  their status to change (which would only happen if we resumed
2457  them). Even if WNOHANG is set, this return code is preferred
2458  over 0 (below), as it is more detailed. */
2459  if ((find_inferior (&all_threads,
2461  &wait_ptid) == NULL))
2462  {
2463  if (debug_threads)
2464  debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2465  sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2466  return -1;
2467  }
2468 
2469  /* No interesting event to report to the caller. */
2470  if ((options & WNOHANG))
2471  {
2472  if (debug_threads)
2473  debug_printf ("WNOHANG set, no event found\n");
2474 
2475  sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2476  return 0;
2477  }
2478 
2479  /* Block until we get an event reported with SIGCHLD. */
2480  if (debug_threads)
2481  debug_printf ("sigsuspend'ing\n");
2482 
2483  sigsuspend (&prev_mask);
2484  sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2485  goto retry;
2486  }
2487 
2488  sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2489 
2490  current_thread = event_thread;
2491 
2492  /* Check for thread exit. */
2493  if (! WIFSTOPPED (*wstatp))
2494  {
2495  gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2496 
2497  if (debug_threads)
2498  debug_printf ("LWP %d is the last lwp of process. "
2499  "Process %ld exiting.\n",
2500  pid_of (event_thread), lwpid_of (event_thread));
2501  return lwpid_of (event_thread);
2502  }
2503 
2504  return lwpid_of (event_thread);
2505 }
2506 
2507 /* Wait for an event from child(ren) PTID. PTIDs can be:
2508  minus_one_ptid, to specify any child; a pid PTID, specifying all
2509  lwps of a thread group; or a PTID representing a single lwp. Store
2510  the stop status through the status pointer WSTAT. OPTIONS is
2511  passed to the waitpid call. Return 0 if no event was found and
2512  OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2513  was found. Return the PID of the stopped child otherwise. */
2514 
2515 static int
2516 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2517 {
2518  return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2519 }
2520 
2521 /* Count the LWP's that have had events. */
2522 
2523 static int
2525 {
2526  struct thread_info *thread = (struct thread_info *) entry;
2527  struct lwp_info *lp = get_thread_lwp (thread);
2528  int *count = data;
2529 
2530  gdb_assert (count != NULL);
2531 
2532  /* Count only resumed LWPs that have an event pending. */
2533  if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2534  && lp->status_pending_p)
2535  (*count)++;
2536 
2537  return 0;
2538 }
2539 
2540 /* Select the LWP (if any) that is currently being single-stepped. */
2541 
2542 static int
2544 {
2545  struct thread_info *thread = (struct thread_info *) entry;
2546  struct lwp_info *lp = get_thread_lwp (thread);
2547 
2548  if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2549  && thread->last_resume_kind == resume_step
2550  && lp->status_pending_p)
2551  return 1;
2552  else
2553  return 0;
2554 }
2555 
2556 /* Select the Nth LWP that has had an event. */
2557 
2558 static int
2560 {
2561  struct thread_info *thread = (struct thread_info *) entry;
2562  struct lwp_info *lp = get_thread_lwp (thread);
2563  int *selector = data;
2564 
2565  gdb_assert (selector != NULL);
2566 
2567  /* Select only resumed LWPs that have an event pending. */
2568  if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2569  && lp->status_pending_p)
2570  if ((*selector)-- == 0)
2571  return 1;
2572 
2573  return 0;
2574 }
2575 
2576 /* Select one LWP out of those that have events pending. */
2577 
2578 static void
2579 select_event_lwp (struct lwp_info **orig_lp)
2580 {
2581  int num_events = 0;
2582  int random_selector;
2583  struct thread_info *event_thread = NULL;
2584 
2585  /* In all-stop, give preference to the LWP that is being
2586  single-stepped. There will be at most one, and it's the LWP that
2587  the core is most interested in. If we didn't do this, then we'd
2588  have to handle pending step SIGTRAPs somehow in case the core
2589  later continues the previously-stepped thread, otherwise we'd
2590  report the pending SIGTRAP, and the core, not having stepped the
2591  thread, wouldn't understand what the trap was for, and therefore
2592  would report it to the user as a random signal. */
2593  if (!non_stop)
2594  {
2595  event_thread
2596  = (struct thread_info *) find_inferior (&all_threads,
2598  NULL);
2599  if (event_thread != NULL)
2600  {
2601  if (debug_threads)
2602  debug_printf ("SEL: Select single-step %s\n",
2603  target_pid_to_str (ptid_of (event_thread)));
2604  }
2605  }
2606  if (event_thread == NULL)
2607  {
2608  /* No single-stepping LWP. Select one at random, out of those
2609  which have had events. */
2610 
2611  /* First see how many events we have. */
2613  gdb_assert (num_events > 0);
2614 
2615  /* Now randomly pick a LWP out of those that have had
2616  events. */
2617  random_selector = (int)
2618  ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2619 
2620  if (debug_threads && num_events > 1)
2621  debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2622  num_events, random_selector);
2623 
2624  event_thread
2625  = (struct thread_info *) find_inferior (&all_threads,
2627  &random_selector);
2628  }
2629 
2630  if (event_thread != NULL)
2631  {
2632  struct lwp_info *event_lp = get_thread_lwp (event_thread);
2633 
2634  /* Switch the event LWP. */
2635  *orig_lp = event_lp;
2636  }
2637 }
2638 
2639 /* Decrement the suspend count of an LWP. */
2640 
2641 static int
2642 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2643 {
2644  struct thread_info *thread = (struct thread_info *) entry;
2645  struct lwp_info *lwp = get_thread_lwp (thread);
2646 
2647  /* Ignore EXCEPT. */
2648  if (lwp == except)
2649  return 0;
2650 
2651  lwp_suspended_decr (lwp);
2652  return 0;
2653 }
2654 
2655 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2656  NULL. */
2657 
2658 static void
2660 {
2662 }
2663 
2664 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2665 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2666  void *data);
2667 static int lwp_running (struct inferior_list_entry *entry, void *data);
2668 static ptid_t linux_wait_1 (ptid_t ptid,
2669  struct target_waitstatus *ourstatus,
2670  int target_options);
2671 
2672 /* Stabilize threads (move out of jump pads).
2673 
2674  If a thread is midway collecting a fast tracepoint, we need to
2675  finish the collection and move it out of the jump pad before
2676  reporting the signal.
2677 
2678  This avoids recursion while collecting (when a signal arrives
2679  midway, and the signal handler itself collects), which would trash
2680  the trace buffer. In case the user set a breakpoint in a signal
2681  handler, this avoids the backtrace showing the jump pad, etc..
2682  Most importantly, there are certain things we can't do safely if
2683  threads are stopped in a jump pad (or in its callee's). For
2684  example:
2685 
2686  - starting a new trace run. A thread still collecting the
2687  previous run, could trash the trace buffer when resumed. The trace
2688  buffer control structures would have been reset but the thread had
2689  no way to tell. The thread could even midway memcpy'ing to the
2690  buffer, which would mean that when resumed, it would clobber the
2691  trace buffer that had been set for a new run.
2692 
2693  - we can't rewrite/reuse the jump pads for new tracepoints
2694  safely. Say you do tstart while a thread is stopped midway while
2695  collecting. When the thread is later resumed, it finishes the
2696  collection, and returns to the jump pad, to execute the original
2697  instruction that was under the tracepoint jump at the time the
2698  older run had been started. If the jump pad had been rewritten
2699  since for something else in the new run, the thread would now
2700  execute the wrong / random instructions. */
2701 
2702 static void
2704 {
2705  struct thread_info *saved_thread;
2706  struct thread_info *thread_stuck;
2707 
2708  thread_stuck
2709  = (struct thread_info *) find_inferior (&all_threads,
2711  NULL);
2712  if (thread_stuck != NULL)
2713  {
2714  if (debug_threads)
2715  debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2716  lwpid_of (thread_stuck));
2717  return;
2718  }
2719 
2720  saved_thread = current_thread;
2721 
2722  stabilizing_threads = 1;
2723 
2724  /* Kick 'em all. */
2726 
2727  /* Loop until all are stopped out of the jump pads. */
2728  while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2729  {
2730  struct target_waitstatus ourstatus;
2731  struct lwp_info *lwp;
2732  int wstat;
2733 
2734  /* Note that we go through the full wait even loop. While
2735  moving threads out of jump pad, we need to be able to step
2736  over internal breakpoints and such. */
2737  linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2738 
2739  if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2740  {
2742 
2743  /* Lock it. */
2744  lwp_suspended_inc (lwp);
2745 
2746  if (ourstatus.value.sig != GDB_SIGNAL_0
2747  || current_thread->last_resume_kind == resume_stop)
2748  {
2749  wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2750  enqueue_one_deferred_signal (lwp, &wstat);
2751  }
2752  }
2753  }
2754 
2756 
2757  stabilizing_threads = 0;
2758 
2759  current_thread = saved_thread;
2760 
2761  if (debug_threads)
2762  {
2763  thread_stuck
2764  = (struct thread_info *) find_inferior (&all_threads,
2766  NULL);
2767  if (thread_stuck != NULL)
2768  debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2769  lwpid_of (thread_stuck));
2770  }
2771 }
2772 
2773 static void async_file_mark (void);
2774 
2775 /* Convenience function that is called when the kernel reports an
2776  event that is not passed out to GDB. */
2777 
2778 static ptid_t
2779 ignore_event (struct target_waitstatus *ourstatus)
2780 {
2781  /* If we got an event, there may still be others, as a single
2782  SIGCHLD can indicate more than one child stopped. This forces
2783  another target_wait call. */
2784  async_file_mark ();
2785 
2786  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2787  return null_ptid;
2788 }
2789 
2790 /* Wait for process, returns status. */
2791 
2792 static ptid_t
2794  struct target_waitstatus *ourstatus, int target_options)
2795 {
2796  int w;
2797  struct lwp_info *event_child;
2798  int options;
2799  int pid;
2800  int step_over_finished;
2801  int bp_explains_trap;
2802  int maybe_internal_trap;
2803  int report_to_gdb;
2804  int trace_event;
2805  int in_step_range;
2806 
2807  if (debug_threads)
2808  {
2809  debug_enter ();
2810  debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2811  }
2812 
2813  /* Translate generic target options into linux options. */
2814  options = __WALL;
2815  if (target_options & TARGET_WNOHANG)
2816  options |= WNOHANG;
2817 
2818  bp_explains_trap = 0;
2819  trace_event = 0;
2820  in_step_range = 0;
2821  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2822 
2823  if (ptid_equal (step_over_bkpt, null_ptid))
2824  pid = linux_wait_for_event (ptid, &w, options);
2825  else
2826  {
2827  if (debug_threads)
2828  debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2829  target_pid_to_str (step_over_bkpt));
2830  pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2831  }
2832 
2833  if (pid == 0)
2834  {
2835  gdb_assert (target_options & TARGET_WNOHANG);
2836 
2837  if (debug_threads)
2838  {
2839  debug_printf ("linux_wait_1 ret = null_ptid, "
2840  "TARGET_WAITKIND_IGNORE\n");
2841  debug_exit ();
2842  }
2843 
2844  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2845  return null_ptid;
2846  }
2847  else if (pid == -1)
2848  {
2849  if (debug_threads)
2850  {
2851  debug_printf ("linux_wait_1 ret = null_ptid, "
2852  "TARGET_WAITKIND_NO_RESUMED\n");
2853  debug_exit ();
2854  }
2855 
2856  ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2857  return null_ptid;
2858  }
2859 
2860  event_child = get_thread_lwp (current_thread);
2861 
2862  /* linux_wait_for_event only returns an exit status for the last
2863  child of a process. Report it. */
2864  if (WIFEXITED (w) || WIFSIGNALED (w))
2865  {
2866  if (WIFEXITED (w))
2867  {
2868  ourstatus->kind = TARGET_WAITKIND_EXITED;
2869  ourstatus->value.integer = WEXITSTATUS (w);
2870 
2871  if (debug_threads)
2872  {
2873  debug_printf ("linux_wait_1 ret = %s, exited with "
2874  "retcode %d\n",
2876  WEXITSTATUS (w));
2877  debug_exit ();
2878  }
2879  }
2880  else
2881  {
2882  ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2883  ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2884 
2885  if (debug_threads)
2886  {
2887  debug_printf ("linux_wait_1 ret = %s, terminated with "
2888  "signal %d\n",
2890  WTERMSIG (w));
2891  debug_exit ();
2892  }
2893  }
2894 
2895  return ptid_of (current_thread);
2896  }
2897 
2898  /* If step-over executes a breakpoint instruction, it means a
2899  gdb/gdbserver breakpoint had been planted on top of a permanent
2900  breakpoint. The PC has been adjusted by
2901  check_stopped_by_breakpoint to point at the breakpoint address.
2902  Advance the PC manually past the breakpoint, otherwise the
2903  program would keep trapping the permanent breakpoint forever. */
2904  if (!ptid_equal (step_over_bkpt, null_ptid)
2905  && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2906  {
2907  unsigned int increment_pc = the_low_target.breakpoint_len;
2908 
2909  if (debug_threads)
2910  {
2911  debug_printf ("step-over for %s executed software breakpoint\n",
2913  }
2914 
2915  if (increment_pc != 0)
2916  {
2917  struct regcache *regcache
2919 
2920  event_child->stop_pc += increment_pc;
2921  (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2922 
2923  if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2924  event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2925  }
2926  }
2927 
2928  /* If this event was not handled before, and is not a SIGTRAP, we
2929  report it. SIGILL and SIGSEGV are also treated as traps in case
2930  a breakpoint is inserted at the current PC. If this target does
2931  not support internal breakpoints at all, we also report the
2932  SIGTRAP without further processing; it's of no concern to us. */
2933  maybe_internal_trap
2934  = (supports_breakpoints ()
2935  && (WSTOPSIG (w) == SIGTRAP
2936  || ((WSTOPSIG (w) == SIGILL
2937  || WSTOPSIG (w) == SIGSEGV)
2938  && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2939 
2940  if (maybe_internal_trap)
2941  {
2942  /* Handle anything that requires bookkeeping before deciding to
2943  report the event or continue waiting. */
2944 
2945  /* First check if we can explain the SIGTRAP with an internal
2946  breakpoint, or if we should possibly report the event to GDB.
2947  Do this before anything that may remove or insert a
2948  breakpoint. */
2949  bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2950 
2951  /* We have a SIGTRAP, possibly a step-over dance has just
2952  finished. If so, tweak the state machine accordingly,
2953  reinsert breakpoints and delete any reinsert (software
2954  single-step) breakpoints. */
2955  step_over_finished = finish_step_over (event_child);
2956 
2957  /* Now invoke the callbacks of any internal breakpoints there. */
2958  check_breakpoints (event_child->stop_pc);
2959 
2960  /* Handle tracepoint data collecting. This may overflow the
2961  trace buffer, and cause a tracing stop, removing
2962  breakpoints. */
2963  trace_event = handle_tracepoints (event_child);
2964 
2965  if (bp_explains_trap)
2966  {
2967  /* If we stepped or ran into an internal breakpoint, we've
2968  already handled it. So next time we resume (from this
2969  PC), we should step over it. */
2970  if (debug_threads)
2971  debug_printf ("Hit a gdbserver breakpoint.\n");
2972 
2973  if (breakpoint_here (event_child->stop_pc))
2974  event_child->need_step_over = 1;
2975  }
2976  }
2977  else
2978  {
2979  /* We have some other signal, possibly a step-over dance was in
2980  progress, and it should be cancelled too. */
2981  step_over_finished = finish_step_over (event_child);
2982  }
2983 
2984  /* We have all the data we need. Either report the event to GDB, or
2985  resume threads and keep waiting for more. */
2986 
2987  /* If we're collecting a fast tracepoint, finish the collection and
2988  move out of the jump pad before delivering a signal. See
2989  linux_stabilize_threads. */
2990 
2991  if (WIFSTOPPED (w)
2992  && WSTOPSIG (w) != SIGTRAP
2994  && agent_loaded_p ())
2995  {
2996  if (debug_threads)
2997  debug_printf ("Got signal %d for LWP %ld. Check if we need "
2998  "to defer or adjust it.\n",
3000 
3001  /* Allow debugging the jump pad itself. */
3002  if (current_thread->last_resume_kind != resume_step
3003  && maybe_move_out_of_jump_pad (event_child, &w))
3004  {
3005  enqueue_one_deferred_signal (event_child, &w);
3006 
3007  if (debug_threads)
3008  debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3010 
3011  linux_resume_one_lwp (event_child, 0, 0, NULL);
3012 
3013  return ignore_event (ourstatus);
3014  }
3015  }
3016 
3017  if (event_child->collecting_fast_tracepoint)
3018  {
3019  if (debug_threads)
3020  debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3021  "Check if we're already there.\n",
3023  event_child->collecting_fast_tracepoint);
3024 
3025  trace_event = 1;
3026 
3027  event_child->collecting_fast_tracepoint
3028  = linux_fast_tracepoint_collecting (event_child, NULL);
3029 
3030  if (event_child->collecting_fast_tracepoint != 1)
3031  {
3032  /* No longer need this breakpoint. */
3033  if (event_child->exit_jump_pad_bkpt != NULL)
3034  {
3035  if (debug_threads)
3036  debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3037  "stopping all threads momentarily.\n");
3038 
3039  /* Other running threads could hit this breakpoint.
3040  We don't handle moribund locations like GDB does,
3041  instead we always pause all threads when removing
3042  breakpoints, so that any step-over or
3043  decr_pc_after_break adjustment is always taken
3044  care of while the breakpoint is still
3045  inserted. */
3046  stop_all_lwps (1, event_child);
3047 
3048  delete_breakpoint (event_child->exit_jump_pad_bkpt);
3049  event_child->exit_jump_pad_bkpt = NULL;
3050 
3051  unstop_all_lwps (1, event_child);
3052 
3053  gdb_assert (event_child->suspended >= 0);
3054  }
3055  }
3056 
3057  if (event_child->collecting_fast_tracepoint == 0)
3058  {
3059  if (debug_threads)
3060  debug_printf ("fast tracepoint finished "
3061  "collecting successfully.\n");
3062 
3063  /* We may have a deferred signal to report. */
3064  if (dequeue_one_deferred_signal (event_child, &w))
3065  {
3066  if (debug_threads)
3067  debug_printf ("dequeued one signal.\n");
3068  }
3069  else
3070  {
3071  if (debug_threads)
3072  debug_printf ("no deferred signals.\n");
3073 
3074  if (stabilizing_threads)
3075  {
3076  ourstatus->kind = TARGET_WAITKIND_STOPPED;
3077  ourstatus->value.sig = GDB_SIGNAL_0;
3078 
3079  if (debug_threads)
3080  {
3081  debug_printf ("linux_wait_1 ret = %s, stopped "
3082  "while stabilizing threads\n",
3084  debug_exit ();
3085  }
3086 
3087  return ptid_of (current_thread);
3088  }
3089  }
3090  }
3091  }
3092 
3093  /* Check whether GDB would be interested in this event. */
3094 
3095  /* If GDB is not interested in this signal, don't stop other
3096  threads, and don't report it to GDB. Just resume the inferior
3097  right away. We do this for threading-related signals as well as
3098  any that GDB specifically requested we ignore. But never ignore
3099  SIGSTOP if we sent it ourselves, and do not ignore signals when
3100  stepping - they may require special handling to skip the signal
3101  handler. Also never ignore signals that could be caused by a
3102  breakpoint. */
3103  /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3104  thread library? */
3105  if (WIFSTOPPED (w)
3106  && current_thread->last_resume_kind != resume_step
3107  && (
3108 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3109  (current_process ()->priv->thread_db != NULL
3110  && (WSTOPSIG (w) == __SIGRTMIN
3111  || WSTOPSIG (w) == __SIGRTMIN + 1))
3112  ||
3113 #endif
3115  && !(WSTOPSIG (w) == SIGSTOP
3116  && current_thread->last_resume_kind == resume_stop)
3118  {
3119  siginfo_t info, *info_p;
3120 
3121  if (debug_threads)
3122  debug_printf ("Ignored signal %d for LWP %ld.\n",
3124 
3125  if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3126  (PTRACE_TYPE_ARG3) 0, &info) == 0)
3127  info_p = &info;
3128  else
3129  info_p = NULL;
3130 
3131  if (step_over_finished)
3132  {
3133  /* We cancelled this thread's step-over above. We still
3134  need to unsuspend all other LWPs, and set them back
3135  running again while the signal handler runs. */
3136  unsuspend_all_lwps (event_child);
3137 
3138  /* Enqueue the pending signal info so that proceed_all_lwps
3139  doesn't lose it. */
3140  enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3141 
3142  proceed_all_lwps ();
3143  }
3144  else
3145  {
3146  linux_resume_one_lwp (event_child, event_child->stepping,
3147  WSTOPSIG (w), info_p);
3148  }
3149  return ignore_event (ourstatus);
3150  }
3151 
3152  /* Note that all addresses are always "out of the step range" when
3153  there's no range to begin with. */
3154  in_step_range = lwp_in_step_range (event_child);
3155 
3156  /* If GDB wanted this thread to single step, and the thread is out
3157  of the step range, we always want to report the SIGTRAP, and let
3158  GDB handle it. Watchpoints should always be reported. So should
3159  signals we can't explain. A SIGTRAP we can't explain could be a
3160  GDB breakpoint --- we may or not support Z0 breakpoints. If we
3161  do, we're be able to handle GDB breakpoints on top of internal
3162  breakpoints, by handling the internal breakpoint and still
3163  reporting the event to GDB. If we don't, we're out of luck, GDB
3164  won't see the breakpoint hit. If we see a single-step event but
3165  the thread should be continuing, don't pass the trap to gdb.
3166  That indicates that we had previously finished a single-step but
3167  left the single-step pending -- see
3168  complete_ongoing_step_over. */
3169  report_to_gdb = (!maybe_internal_trap
3170  || (current_thread->last_resume_kind == resume_step
3171  && !in_step_range)
3172  || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3173  || (!in_step_range
3174  && !bp_explains_trap
3175  && !trace_event
3176  && !step_over_finished
3177  && !(current_thread->last_resume_kind == resume_continue
3178  && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3179  || (gdb_breakpoint_here (event_child->stop_pc)
3180  && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3181  && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3182  || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3183 
3184  run_breakpoint_commands (event_child->stop_pc);
3185 
3186  /* We found no reason GDB would want us to stop. We either hit one
3187  of our own breakpoints, or finished an internal step GDB
3188  shouldn't know about. */
3189  if (!report_to_gdb)
3190  {
3191  if (debug_threads)
3192  {
3193  if (bp_explains_trap)
3194  debug_printf ("Hit a gdbserver breakpoint.\n");
3195  if (step_over_finished)
3196  debug_printf ("Step-over finished.\n");
3197  if (trace_event)
3198  debug_printf ("Tracepoint event.\n");
3199  if (lwp_in_step_range (event_child))
3200  debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3201  paddress (event_child->stop_pc),
3202  paddress (event_child->step_range_start),
3203  paddress (event_child->step_range_end));
3204  }
3205 
3206  /* We're not reporting this breakpoint to GDB, so apply the
3207  decr_pc_after_break adjustment to the inferior's regcache
3208  ourselves. */
3209 
3210  if (the_low_target.set_pc != NULL)
3211  {
3212  struct regcache *regcache
3214  (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3215  }
3216 
3217  /* We may have finished stepping over a breakpoint. If so,
3218  we've stopped and suspended all LWPs momentarily except the
3219  stepping one. This is where we resume them all again. We're
3220  going to keep waiting, so use proceed, which handles stepping
3221  over the next breakpoint. */
3222  if (debug_threads)
3223  debug_printf ("proceeding all threads.\n");
3224 
3225  if (step_over_finished)
3226  unsuspend_all_lwps (event_child);
3227 
3228  proceed_all_lwps ();
3229  return ignore_event (ourstatus);
3230  }
3231 
3232  if (debug_threads)
3233  {
3234  if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3235  {
3236  char *str;
3237 
3238  str = target_waitstatus_to_string (&event_child->waitstatus);
3239  debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3240  lwpid_of (get_lwp_thread (event_child)), str);
3241  xfree (str);
3242  }
3243  if (current_thread->last_resume_kind == resume_step)
3244  {
3245  if (event_child->step_range_start == event_child->step_range_end)
3246  debug_printf ("GDB wanted to single-step, reporting event.\n");
3247  else if (!lwp_in_step_range (event_child))
3248  debug_printf ("Out of step range, reporting event.\n");
3249  }
3250  if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3251  debug_printf ("Stopped by watchpoint.\n");
3252  else if (gdb_breakpoint_here (event_child->stop_pc))
3253  debug_printf ("Stopped by GDB breakpoint.\n");
3254  if (debug_threads)
3255  debug_printf ("Hit a non-gdbserver trap event.\n");
3256  }
3257 
3258  /* Alright, we're going to report a stop. */
3259 
3260  if (!stabilizing_threads)
3261  {
3262  /* In all-stop, stop all threads. */
3263  if (!non_stop)
3264  stop_all_lwps (0, NULL);
3265 
3266  /* If we're not waiting for a specific LWP, choose an event LWP
3267  from among those that have had events. Giving equal priority
3268  to all LWPs that have had events helps prevent
3269  starvation. */
3270  if (ptid_equal (ptid, minus_one_ptid))
3271  {
3272  event_child->status_pending_p = 1;
3273  event_child->status_pending = w;
3274 
3275  select_event_lwp (&event_child);
3276 
3277  /* current_thread and event_child must stay in sync. */
3278  current_thread = get_lwp_thread (event_child);
3279 
3280  event_child->status_pending_p = 0;
3281  w = event_child->status_pending;
3282  }
3283 
3284  if (step_over_finished)
3285  {
3286  if (!non_stop)
3287  {
3288  /* If we were doing a step-over, all other threads but
3289  the stepping one had been paused in start_step_over,
3290  with their suspend counts incremented. We don't want
3291  to do a full unstop/unpause, because we're in
3292  all-stop mode (so we want threads stopped), but we
3293  still need to unsuspend the other threads, to
3294  decrement their `suspended' count back. */
3295  unsuspend_all_lwps (event_child);
3296  }
3297  else
3298  {
3299  /* If we just finished a step-over, then all threads had
3300  been momentarily paused. In all-stop, that's fine,
3301  we want threads stopped by now anyway. In non-stop,
3302  we need to re-resume threads that GDB wanted to be
3303  running. */
3304  unstop_all_lwps (1, event_child);
3305  }
3306  }
3307 
3308  /* Stabilize threads (move out of jump pads). */
3309  if (!non_stop)
3310  stabilize_threads ();
3311  }
3312  else
3313  {
3314  /* If we just finished a step-over, then all threads had been
3315  momentarily paused. In all-stop, that's fine, we want
3316  threads stopped by now anyway. In non-stop, we need to
3317  re-resume threads that GDB wanted to be running. */
3318  if (step_over_finished)
3319  unstop_all_lwps (1, event_child);
3320  }
3321 
3322  if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3323  {
3324  /* If the reported event is an exit, fork, vfork or exec, let
3325  GDB know. */
3326  *ourstatus = event_child->waitstatus;
3327  /* Clear the event lwp's waitstatus since we handled it already. */
3328  event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3329  }
3330  else
3331  ourstatus->kind = TARGET_WAITKIND_STOPPED;
3332 
3333  /* Now that we've selected our final event LWP, un-adjust its PC if
3334  it was a software breakpoint, and the client doesn't know we can
3335  adjust the breakpoint ourselves. */
3336  if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3337  && !swbreak_feature)
3338  {
3339  int decr_pc = the_low_target.decr_pc_after_break;
3340 
3341  if (decr_pc != 0)
3342  {
3343  struct regcache *regcache
3345  (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3346  }
3347  }
3348 
3349  if (current_thread->last_resume_kind == resume_stop
3350  && WSTOPSIG (w) == SIGSTOP)
3351  {
3352  /* A thread that has been requested to stop by GDB with vCont;t,
3353  and it stopped cleanly, so report as SIG0. The use of
3354  SIGSTOP is an implementation detail. */
3355  ourstatus->value.sig = GDB_SIGNAL_0;
3356  }
3357  else if (current_thread->last_resume_kind == resume_stop
3358  && WSTOPSIG (w) != SIGSTOP)
3359  {
3360  /* A thread that has been requested to stop by GDB with vCont;t,
3361  but, it stopped for other reasons. */
3362  ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3363  }
3364  else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3365  {
3366  ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3367  }
3368 
3369  gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3370 
3371  if (debug_threads)
3372  {
3373  debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3375  ourstatus->kind, ourstatus->value.sig);
3376  debug_exit ();
3377  }
3378 
3379  return ptid_of (current_thread);
3380 }
3381 
3382 /* Get rid of any pending event in the pipe. */
3383 static void
3385 {
3386  int ret;
3387  char buf;
3388 
3389  do
3390  ret = read (linux_event_pipe[0], &buf, 1);
3391  while (ret >= 0 || (ret == -1 && errno == EINTR));
3392 }
3393 
3394 /* Put something in the pipe, so the event loop wakes up. */
3395 static void
3397 {
3398  int ret;
3399 
3400  async_file_flush ();
3401 
3402  do
3403  ret = write (linux_event_pipe[1], "+", 1);
3404  while (ret == 0 || (ret == -1 && errno == EINTR));
3405 
3406  /* Ignore EAGAIN. If the pipe is full, the event loop will already
3407  be awakened anyway. */
3408 }
3409 
3410 static ptid_t
3412  struct target_waitstatus *ourstatus, int target_options)
3413 {
3414  ptid_t event_ptid;
3415 
3416  /* Flush the async file first. */
3417  if (target_is_async_p ())
3418  async_file_flush ();
3419 
3420  do
3421  {
3422  event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3423  }
3424  while ((target_options & TARGET_WNOHANG) == 0
3425  && ptid_equal (event_ptid, null_ptid)
3426  && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3427 
3428  /* If at least one stop was reported, there may be more. A single
3429  SIGCHLD can signal more than one child stop. */
3430  if (target_is_async_p ()
3431  && (target_options & TARGET_WNOHANG) != 0
3432  && !ptid_equal (event_ptid, null_ptid))
3433  async_file_mark ();
3434 
3435  return event_ptid;
3436 }
3437 
3438 /* Send a signal to an LWP. */
3439 
3440 static int
3441 kill_lwp (unsigned long lwpid, int signo)
3442 {
3443  /* Use tkill, if possible, in case we are using nptl threads. If tkill
3444  fails, then we are not using nptl threads and we should be using kill. */
3445 
3446 #ifdef __NR_tkill
3447  {
3448  static int tkill_failed;
3449 
3450  if (!tkill_failed)
3451  {
3452  int ret;
3453 
3454  errno = 0;
3455  ret = syscall (__NR_tkill, lwpid, signo);
3456  if (errno != ENOSYS)
3457  return ret;
3458  tkill_failed = 1;
3459  }
3460  }
3461 #endif
3462 
3463  return kill (lwpid, signo);
3464 }
3465 
3466 void
3468 {
3469  send_sigstop (lwp);
3470 }
3471 
3472 static void
3473 send_sigstop (struct lwp_info *lwp)
3474 {
3475  int pid;
3476 
3477  pid = lwpid_of (get_lwp_thread (lwp));
3478 
3479  /* If we already have a pending stop signal for this process, don't
3480  send another. */
3481  if (lwp->stop_expected)
3482  {
3483  if (debug_threads)
3484  debug_printf ("Have pending sigstop for lwp %d\n", pid);
3485 
3486  return;
3487  }
3488 
3489  if (debug_threads)
3490  debug_printf ("Sending sigstop to lwp %d\n", pid);
3491 
3492  lwp->stop_expected = 1;
3493  kill_lwp (pid, SIGSTOP);
3494 }
3495 
3496 static int
3497 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3498 {
3499  struct thread_info *thread = (struct thread_info *) entry;
3500  struct lwp_info *lwp = get_thread_lwp (thread);
3501 
3502  /* Ignore EXCEPT. */
3503  if (lwp == except)
3504  return 0;
3505 
3506  if (lwp->stopped)
3507  return 0;
3508 
3509  send_sigstop (lwp);
3510  return 0;
3511 }
3512 
3513 /* Increment the suspend count of an LWP, and stop it, if not stopped
3514  yet. */
3515 static int
3517  void *except)
3518 {
3519  struct thread_info *thread = (struct thread_info *) entry;
3520  struct lwp_info *lwp = get_thread_lwp (thread);
3521 
3522  /* Ignore EXCEPT. */
3523  if (lwp == except)
3524  return 0;
3525 
3526  lwp_suspended_inc (lwp);
3527 
3528  return send_sigstop_callback (entry, except);
3529 }
3530 
3531 static void
3532 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3533 {
3534  /* Store the exit status for later. */
3535  lwp->status_pending_p = 1;
3536  lwp->status_pending = wstat;
3537 
3538  /* Store in waitstatus as well, as there's nothing else to process
3539  for this event. */
3540  if (WIFEXITED (wstat))
3541  {
3542  lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3543  lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3544  }
3545  else if (WIFSIGNALED (wstat))
3546  {
3547  lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3548  lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3549  }
3550 
3551  /* Prevent trying to stop it. */
3552  lwp->stopped = 1;
3553 
3554  /* No further stops are expected from a dead lwp. */
3555  lwp->stop_expected = 0;
3556 }
3557 
3558 /* Return true if LWP has exited already, and has a pending exit event
3559  to report to GDB. */
3560 
3561 static int
3563 {
3564  return (lwp->status_pending_p
3565  && (WIFEXITED (lwp->status_pending)
3566  || WIFSIGNALED (lwp->status_pending)));
3567 }
3568 
3569 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3570 
3571 static void
3573 {
3574  struct thread_info *saved_thread;
3575  ptid_t saved_tid;
3576  int wstat;
3577  int ret;
3578 
3579  saved_thread = current_thread;
3580  if (saved_thread != NULL)
3581  saved_tid = saved_thread->entry.id;
3582  else
3583  saved_tid = null_ptid; /* avoid bogus unused warning */
3584 
3585  if (debug_threads)
3586  debug_printf ("wait_for_sigstop: pulling events\n");
3587 
3588  /* Passing NULL_PTID as filter indicates we want all events to be
3589  left pending. Eventually this returns when there are no
3590  unwaited-for children left. */
3592  &wstat, __WALL);
3593  gdb_assert (ret == -1);
3594 
3595  if (saved_thread == NULL || linux_thread_alive (saved_tid))
3596  current_thread = saved_thread;
3597  else
3598  {
3599  if (debug_threads)
3600  debug_printf ("Previously current thread died.\n");
3601 
3602  if (non_stop)
3603  {
3604  /* We can't change the current inferior behind GDB's back,
3605  otherwise, a subsequent command may apply to the wrong
3606  process. */
3607  current_thread = NULL;
3608  }
3609  else
3610  {
3611  /* Set a valid thread as current. */
3612  set_desired_thread (0);
3613  }
3614  }
3615 }
3616 
3617 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3618  move it out, because we need to report the stop event to GDB. For
3619  example, if the user puts a breakpoint in the jump pad, it's
3620  because she wants to debug it. */
3621 
3622 static int
3624 {
3625  struct thread_info *thread = (struct thread_info *) entry;
3626  struct lwp_info *lwp = get_thread_lwp (thread);
3627 
3628  if (lwp->suspended != 0)
3629  {
3630  internal_error (__FILE__, __LINE__,
3631  "LWP %ld is suspended, suspended=%d\n",
3632  lwpid_of (thread), lwp->suspended);
3633  }
3634  gdb_assert (lwp->stopped);
3635 
3636  /* Allow debugging the jump pad, gdb_collect, etc.. */
3637  return (supports_fast_tracepoints ()
3638  && agent_loaded_p ()
3639  && (gdb_breakpoint_here (lwp->stop_pc)
3640  || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3641  || thread->last_resume_kind == resume_step)
3642  && linux_fast_tracepoint_collecting (lwp, NULL));
3643 }
3644 
3645 static void
3647 {
3648  struct thread_info *thread = (struct thread_info *) entry;
3649  struct thread_info *saved_thread;
3650  struct lwp_info *lwp = get_thread_lwp (thread);
3651  int *wstat;
3652 
3653  if (lwp->suspended != 0)
3654  {
3655  internal_error (__FILE__, __LINE__,
3656  "LWP %ld is suspended, suspended=%d\n",
3657  lwpid_of (thread), lwp->suspended);
3658  }
3659  gdb_assert (lwp->stopped);
3660 
3661  /* For gdb_breakpoint_here. */
3662  saved_thread = current_thread;
3664 
3665  wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3666 
3667  /* Allow debugging the jump pad, gdb_collect, etc. */
3668  if (!gdb_breakpoint_here (lwp->stop_pc)
3669  && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3670  && thread->last_resume_kind != resume_step
3671  && maybe_move_out_of_jump_pad (lwp, wstat))
3672  {
3673  if (debug_threads)
3674  debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3675  lwpid_of (thread));
3676 
3677  if (wstat)
3678  {
3679  lwp->status_pending_p = 0;
3680  enqueue_one_deferred_signal (lwp, wstat);
3681 
3682  if (debug_threads)
3683  debug_printf ("Signal %d for LWP %ld deferred "
3684  "(in jump pad)\n",
3685  WSTOPSIG (*wstat), lwpid_of (thread));
3686  }
3687 
3688  linux_resume_one_lwp (lwp, 0, 0, NULL);
3689  }
3690  else
3691  lwp_suspended_inc (lwp);
3692 
3693  current_thread = saved_thread;
3694 }
3695 
3696 static int
3697 lwp_running (struct inferior_list_entry *entry, void *data)
3698 {
3699  struct thread_info *thread = (struct thread_info *) entry;
3700  struct lwp_info *lwp = get_thread_lwp (thread);
3701 
3702  if (lwp_is_marked_dead (lwp))
3703  return 0;
3704  if (lwp->stopped)
3705  return 0;
3706  return 1;
3707 }
3708 
3709 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3710  If SUSPEND, then also increase the suspend count of every LWP,
3711  except EXCEPT. */
3712 
3713 static void
3714 stop_all_lwps (int suspend, struct lwp_info *except)
3715 {
3716  /* Should not be called recursively. */
3717  gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3718 
3719  if (debug_threads)
3720  {
3721  debug_enter ();
3722  debug_printf ("stop_all_lwps (%s, except=%s)\n",
3723  suspend ? "stop-and-suspend" : "stop",
3724  except != NULL
3726  : "none");
3727  }
3728 
3729  stopping_threads = (suspend
3731  : STOPPING_THREADS);
3732 
3733  if (suspend)
3735  else
3737  wait_for_sigstop ();
3738  stopping_threads = NOT_STOPPING_THREADS;
3739 
3740  if (debug_threads)
3741  {
3742  debug_printf ("stop_all_lwps done, setting stopping_threads "
3743  "back to !stopping\n");
3744  debug_exit ();
3745  }
3746 }
3747 
3748 /* Enqueue one signal in the chain of signals which need to be
3749  delivered to this process on next resume. */
3750 
3751 static void
3752 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3753 {
3754  struct pending_signals *p_sig;
3755 
3756  p_sig = xmalloc (sizeof (*p_sig));
3757  p_sig->prev = lwp->pending_signals;
3758  p_sig->signal = signal;
3759  if (info == NULL)
3760  memset (&p_sig->info, 0, sizeof (siginfo_t));
3761  else
3762  memcpy (&p_sig->info, info, sizeof (siginfo_t));
3763  lwp->pending_signals = p_sig;
3764 }
3765 
3766 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3767  SIGNAL is nonzero, give it that signal. */
3768 
3769 static void
3771  int step, int signal, siginfo_t *info)
3772 {
3773  struct thread_info *thread = get_lwp_thread (lwp);
3774  struct thread_info *saved_thread;
3775  int fast_tp_collecting;
3776 
3777  if (lwp->stopped == 0)
3778  return;
3779 
3780  fast_tp_collecting = lwp->collecting_fast_tracepoint;
3781 
3782  gdb_assert (!stabilizing_threads || fast_tp_collecting);
3783 
3784  /* Cancel actions that rely on GDB not changing the PC (e.g., the
3785  user used the "jump" command, or "set $pc = foo"). */
3786  if (lwp->stop_pc != get_pc (lwp))
3787  {
3788  /* Collecting 'while-stepping' actions doesn't make sense
3789  anymore. */
3791  }
3792 
3793  /* If we have pending signals or status, and a new signal, enqueue the
3794  signal. Also enqueue the signal if we are waiting to reinsert a
3795  breakpoint; it will be picked up again below. */
3796  if (signal != 0
3797  && (lwp->status_pending_p
3798  || lwp->pending_signals != NULL
3799  || lwp->bp_reinsert != 0
3800  || fast_tp_collecting))
3801  {
3802  struct pending_signals *p_sig;
3803  p_sig = xmalloc (sizeof (*p_sig));
3804  p_sig->prev = lwp->pending_signals;
3805  p_sig->signal = signal;
3806  if (info == NULL)
3807  memset (&p_sig->info, 0, sizeof (siginfo_t));
3808  else
3809  memcpy (&p_sig->info, info, sizeof (siginfo_t));
3810  lwp->pending_signals = p_sig;
3811  }
3812 
3813  if (lwp->status_pending_p)
3814  {
3815  if (debug_threads)
3816  debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3817  " has pending status\n",
3818  lwpid_of (thread), step ? "step" : "continue", signal,
3819  lwp->stop_expected ? "expected" : "not expected");
3820  return;
3821  }
3822 
3823  saved_thread = current_thread;
3824  current_thread = thread;
3825 
3826  if (debug_threads)
3827  debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3828  lwpid_of (thread), step ? "step" : "continue", signal,
3829  lwp->stop_expected ? "expected" : "not expected");
3830 
3831  /* This bit needs some thinking about. If we get a signal that
3832  we must report while a single-step reinsert is still pending,
3833  we often end up resuming the thread. It might be better to
3834  (ew) allow a stack of pending events; then we could be sure that
3835  the reinsert happened right away and not lose any signals.
3836 
3837  Making this stack would also shrink the window in which breakpoints are
3838  uninserted (see comment in linux_wait_for_lwp) but not enough for
3839  complete correctness, so it won't solve that problem. It may be
3840  worthwhile just to solve this one, however. */
3841  if (lwp->bp_reinsert != 0)
3842  {
3843  if (debug_threads)
3844  debug_printf (" pending reinsert at 0x%s\n",
3845  paddress (lwp->bp_reinsert));
3846 
3847  if (can_hardware_single_step ())
3848  {
3849  if (fast_tp_collecting == 0)
3850  {
3851  if (step == 0)
3852  fprintf (stderr, "BAD - reinserting but not stepping.\n");
3853  if (lwp->suspended)
3854  fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3855  lwp->suspended);
3856  }
3857 
3858  step = 1;
3859  }
3860 
3861  /* Postpone any pending signal. It was enqueued above. */
3862  signal = 0;
3863  }
3864 
3865  if (fast_tp_collecting == 1)
3866  {
3867  if (debug_threads)
3868  debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3869  " (exit-jump-pad-bkpt)\n",
3870  lwpid_of (thread));
3871 
3872  /* Postpone any pending signal. It was enqueued above. */
3873  signal = 0;
3874  }
3875  else if (fast_tp_collecting == 2)
3876  {
3877  if (debug_threads)
3878  debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3879  " single-stepping\n",
3880  lwpid_of (thread));
3881 
3882  if (can_hardware_single_step ())
3883  step = 1;
3884  else
3885  {
3886  internal_error (__FILE__, __LINE__,
3887  "moving out of jump pad single-stepping"
3888  " not implemented on this target");
3889  }
3890 
3891  /* Postpone any pending signal. It was enqueued above. */
3892  signal = 0;
3893  }
3894 
3895  /* If we have while-stepping actions in this thread set it stepping.
3896  If we have a signal to deliver, it may or may not be set to
3897  SIG_IGN, we don't know. Assume so, and allow collecting
3898  while-stepping into a signal handler. A possible smart thing to
3899  do would be to set an internal breakpoint at the signal return
3900  address, continue, and carry on catching this while-stepping
3901  action only when that breakpoint is hit. A future
3902  enhancement. */
3903  if (thread->while_stepping != NULL
3905  {
3906  if (debug_threads)
3907  debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3908  lwpid_of (thread));
3909  step = 1;
3910  }
3911 
3912  if (the_low_target.get_pc != NULL)
3913  {
3914  struct regcache *regcache = get_thread_regcache (current_thread, 1);
3915 
3916  lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3917 
3918  if (debug_threads)
3919  {
3920  debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3921  (long) lwp->stop_pc);
3922  }
3923  }
3924 
3925  /* If we have pending signals, consume one unless we are trying to
3926  reinsert a breakpoint or we're trying to finish a fast tracepoint
3927  collect. */
3928  if (lwp->pending_signals != NULL
3929  && lwp->bp_reinsert == 0
3930  && fast_tp_collecting == 0)
3931  {
3932  struct pending_signals **p_sig;
3933 
3934  p_sig = &lwp->pending_signals;
3935  while ((*p_sig)->prev != NULL)
3936  p_sig = &(*p_sig)->prev;
3937 
3938  signal = (*p_sig)->signal;
3939  if ((*p_sig)->info.si_signo != 0)
3940  ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3941  &(*p_sig)->info);
3942 
3943  free (*p_sig);
3944  *p_sig = NULL;
3945  }
3946 
3947  if (the_low_target.prepare_to_resume != NULL)
3949 
3950  regcache_invalidate_thread (thread);
3951  errno = 0;
3952  lwp->stepping = step;
3953  ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3954  (PTRACE_TYPE_ARG3) 0,
3955  /* Coerce to a uintptr_t first to avoid potential gcc warning
3956  of coercing an 8 byte integer to a 4 byte pointer. */
3957  (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3958 
3959  current_thread = saved_thread;
3960  if (errno)
3961  perror_with_name ("resuming thread");
3962 
3963  /* Successfully resumed. Clear state that no longer makes sense,
3964  and mark the LWP as running. Must not do this before resuming
3965  otherwise if that fails other code will be confused. E.g., we'd
3966  later try to stop the LWP and hang forever waiting for a stop
3967  status. Note that we must not throw after this is cleared,
3968  otherwise handle_zombie_lwp_error would get confused. */
3969  lwp->stopped = 0;
3970  lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3971 }
3972 
3973 /* Called when we try to resume a stopped LWP and that errors out. If
3974  the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3975  or about to become), discard the error, clear any pending status
3976  the LWP may have, and return true (we'll collect the exit status
3977  soon enough). Otherwise, return false. */
3978 
3979 static int
3981 {
3982  struct thread_info *thread = get_lwp_thread (lp);
3983 
3984  /* If we get an error after resuming the LWP successfully, we'd
3985  confuse !T state for the LWP being gone. */
3986  gdb_assert (lp->stopped);
3987 
3988  /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3989  because even if ptrace failed with ESRCH, the tracee may be "not
3990  yet fully dead", but already refusing ptrace requests. In that
3991  case the tracee has 'R (Running)' state for a little bit
3992  (observed in Linux 3.18). See also the note on ESRCH in the
3993  ptrace(2) man page. Instead, check whether the LWP has any state
3994  other than ptrace-stopped. */
3995 
3996  /* Don't assume anything if /proc/PID/status can't be read. */
3998  {
3999  lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4000  lp->status_pending_p = 0;
4001  return 1;
4002  }
4003  return 0;
4004 }
4005 
4006 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4007  disappears while we try to resume it. */
4008 
4009 static void
4011  int step, int signal, siginfo_t *info)
4012 {
4013  TRY
4014  {
4015  linux_resume_one_lwp_throw (lwp, step, signal, info);
4016  }
4017  CATCH (ex, RETURN_MASK_ERROR)
4018  {
4019  if (!check_ptrace_stopped_lwp_gone (lwp))
4020  throw_exception (ex);
4021  }
4022  END_CATCH
4023 }
4024 
4026 {
4028  size_t n;
4029 };
4030 
4031 /* This function is called once per thread via find_inferior.
4032  ARG is a pointer to a thread_resume_array struct.
4033  We look up the thread specified by ENTRY in ARG, and mark the thread
4034  with a pointer to the appropriate resume request.
4035 
4036  This algorithm is O(threads * resume elements), but resume elements
4037  is small (and will remain small at least until GDB supports thread
4038  suspension). */
4039 
4040 static int
4042 {
4043  struct thread_info *thread = (struct thread_info *) entry;
4044  struct lwp_info *lwp = get_thread_lwp (thread);
4045  int ndx;
4046  struct thread_resume_array *r;
4047 
4048  r = arg;
4049 
4050  for (ndx = 0; ndx < r->n; ndx++)
4051  {
4052  ptid_t ptid = r->resume[ndx].thread;
4053  if (ptid_equal (ptid, minus_one_ptid)
4054  || ptid_equal (ptid, entry->id)
4055  /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4056  of PID'. */
4057  || (ptid_get_pid (ptid) == pid_of (thread)
4058  && (ptid_is_pid (ptid)
4059  || ptid_get_lwp (ptid) == -1)))
4060  {
4061  if (r->resume[ndx].kind == resume_stop
4062  && thread->last_resume_kind == resume_stop)
4063  {
4064  if (debug_threads)
4065  debug_printf ("already %s LWP %ld at GDB's request\n",
4066  (thread->last_status.kind
4067  == TARGET_WAITKIND_STOPPED)
4068  ? "stopped"
4069  : "stopping",
4070  lwpid_of (thread));
4071 
4072  continue;
4073  }
4074 
4075  lwp->resume = &r->resume[ndx];
4076  thread->last_resume_kind = lwp->resume->kind;
4077 
4079  lwp->step_range_end = lwp->resume->step_range_end;
4080 
4081  /* If we had a deferred signal to report, dequeue one now.
4082  This can happen if LWP gets more than one signal while
4083  trying to get out of a jump pad. */
4084  if (lwp->stopped
4085  && !lwp->status_pending_p
4087  {
4088  lwp->status_pending_p = 1;
4089 
4090  if (debug_threads)
4091  debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4092  "leaving status pending.\n",
4093  WSTOPSIG (lwp->status_pending),
4094  lwpid_of (thread));
4095  }
4096 
4097  return 0;
4098  }
4099  }
4100 
4101  /* No resume action for this thread. */
4102  lwp->resume = NULL;
4103 
4104  return 0;
4105 }
4106 
4107 /* find_inferior callback for linux_resume.
4108  Set *FLAG_P if this lwp has an interesting status pending. */
4109 
4110 static int
4111 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4112 {
4113  struct thread_info *thread = (struct thread_info *) entry;
4114  struct lwp_info *lwp = get_thread_lwp (thread);
4115 
4116  /* LWPs which will not be resumed are not interesting, because
4117  we might not wait for them next time through linux_wait. */
4118  if (lwp->resume == NULL)
4119  return 0;
4120 
4121  if (thread_still_has_status_pending_p (thread))
4122  * (int *) flag_p = 1;
4123 
4124  return 0;
4125 }
4126 
4127 /* Return 1 if this lwp that GDB wants running is stopped at an
4128  internal breakpoint that we need to step over. It assumes that any
4129  required STOP_PC adjustment has already been propagated to the
4130  inferior's regcache. */
4131 
4132 static int
4133 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4134 {
4135  struct thread_info *thread = (struct thread_info *) entry;
4136  struct lwp_info *lwp = get_thread_lwp (thread);
4137  struct thread_info *saved_thread;
4138  CORE_ADDR pc;
4139 
4140  /* LWPs which will not be resumed are not interesting, because we
4141  might not wait for them next time through linux_wait. */
4142 
4143  if (!lwp->stopped)
4144  {
4145  if (debug_threads)
4146  debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4147  lwpid_of (thread));
4148  return 0;
4149  }
4150 
4151  if (thread->last_resume_kind == resume_stop)
4152  {
4153  if (debug_threads)
4154  debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4155  " stopped\n",
4156  lwpid_of (thread));
4157  return 0;
4158  }
4159 
4160  gdb_assert (lwp->suspended >= 0);
4161 
4162  if (lwp->suspended)
4163  {
4164  if (debug_threads)
4165  debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4166  lwpid_of (thread));
4167  return 0;
4168  }
4169 
4170  if (!lwp->need_step_over)
4171  {
4172  if (debug_threads)
4173  debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4174  }
4175 
4176  if (lwp->status_pending_p)
4177  {
4178  if (debug_threads)
4179  debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4180  " status.\n",
4181  lwpid_of (thread));
4182  return 0;
4183  }
4184 
4185  /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4186  or we have. */
4187  pc = get_pc (lwp);
4188 
4189  /* If the PC has changed since we stopped, then don't do anything,
4190  and let the breakpoint/tracepoint be hit. This happens if, for
4191  instance, GDB handled the decr_pc_after_break subtraction itself,
4192  GDB is OOL stepping this thread, or the user has issued a "jump"
4193  command, or poked thread's registers herself. */
4194  if (pc != lwp->stop_pc)
4195  {
4196  if (debug_threads)
4197  debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4198  "Old stop_pc was 0x%s, PC is now 0x%s\n",
4199  lwpid_of (thread),
4200  paddress (lwp->stop_pc), paddress (pc));
4201 
4202  lwp->need_step_over = 0;
4203  return 0;
4204  }
4205 
4206  saved_thread = current_thread;
4207  current_thread = thread;
4208 
4209  /* We can only step over breakpoints we know about. */
4210  if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4211  {
4212  /* Don't step over a breakpoint that GDB expects to hit
4213  though. If the condition is being evaluated on the target's side
4214  and it evaluate to false, step over this breakpoint as well. */
4215  if (gdb_breakpoint_here (pc)
4218  {
4219  if (debug_threads)
4220  debug_printf ("Need step over [LWP %ld]? yes, but found"
4221  " GDB breakpoint at 0x%s; skipping step over\n",
4222  lwpid_of (thread), paddress (pc));
4223 
4224  current_thread = saved_thread;
4225  return 0;
4226  }
4227  else
4228  {
4229  if (debug_threads)
4230  debug_printf ("Need step over [LWP %ld]? yes, "
4231  "found breakpoint at 0x%s\n",
4232  lwpid_of (thread), paddress (pc));
4233 
4234  /* We've found an lwp that needs stepping over --- return 1 so
4235  that find_inferior stops looking. */
4236  current_thread = saved_thread;
4237 
4238  /* If the step over is cancelled, this is set again. */
4239  lwp->need_step_over = 0;
4240  return 1;
4241  }
4242  }
4243 
4244  current_thread = saved_thread;
4245 
4246  if (debug_threads)
4247  debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4248  " at 0x%s\n",
4249  lwpid_of (thread), paddress (pc));
4250 
4251  return 0;
4252 }
4253 
4254 /* Start a step-over operation on LWP. When LWP stopped at a
4255  breakpoint, to make progress, we need to remove the breakpoint out
4256  of the way. If we let other threads run while we do that, they may
4257  pass by the breakpoint location and miss hitting it. To avoid
4258  that, a step-over momentarily stops all threads while LWP is
4259  single-stepped while the breakpoint is temporarily uninserted from
4260  the inferior. When the single-step finishes, we reinsert the
4261  breakpoint, and let all threads that are supposed to be running,
4262  run again.
4263 
4264  On targets that don't support hardware single-step, we don't
4265  currently support full software single-stepping. Instead, we only
4266  support stepping over the thread event breakpoint, by asking the
4267  low target where to place a reinsert breakpoint. Since this
4268  routine assumes the breakpoint being stepped over is a thread event
4269  breakpoint, it usually assumes the return address of the current
4270  function is a good enough place to set the reinsert breakpoint. */
4271 
4272 static int
4274 {
4275  struct thread_info *thread = get_lwp_thread (lwp);
4276  struct thread_info *saved_thread;
4277  CORE_ADDR pc;
4278  int step;
4279 
4280  if (debug_threads)
4281  debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4282  lwpid_of (thread));
4283 
4284  stop_all_lwps (1, lwp);
4285 
4286  if (lwp->suspended != 0)
4287  {
4288  internal_error (__FILE__, __LINE__,
4289  "LWP %ld suspended=%d\n", lwpid_of (thread),
4290  lwp->suspended);
4291  }
4292 
4293  if (debug_threads)
4294  debug_printf ("Done stopping all threads for step-over.\n");
4295 
4296  /* Note, we should always reach here with an already adjusted PC,
4297  either by GDB (if we're resuming due to GDB's request), or by our
4298  caller, if we just finished handling an internal breakpoint GDB
4299  shouldn't care about. */
4300  pc = get_pc (lwp);
4301 
4302  saved_thread = current_thread;
4303  current_thread = thread;
4304 
4305  lwp->bp_reinsert = pc;
4308 
4309  if (can_hardware_single_step ())
4310  {
4311  step = 1;
4312  }
4313  else
4314  {
4316  set_reinsert_breakpoint (raddr);
4317  step = 0;
4318  }
4319 
4320  current_thread = saved_thread;
4321 
4322  linux_resume_one_lwp (lwp, step, 0, NULL);
4323 
4324  /* Require next event from this LWP. */
4325  step_over_bkpt = thread->entry.id;
4326  return 1;
4327 }
4328 
4329 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4330  start_step_over, if still there, and delete any reinsert
4331  breakpoints we've set, on non hardware single-step targets. */
4332 
4333 static int
4335 {
4336  if (lwp->bp_reinsert != 0)
4337  {
4338  if (debug_threads)
4339  debug_printf ("Finished step over.\n");
4340 
4341  /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4342  may be no breakpoint to reinsert there by now. */
4345 
4346  lwp->bp_reinsert = 0;
4347 
4348  /* Delete any software-single-step reinsert breakpoints. No
4349  longer needed. We don't have to worry about other threads
4350  hitting this trap, and later not being able to explain it,
4351  because we were stepping over a breakpoint, and we hold all
4352  threads but LWP stopped while doing that. */
4353  if (!can_hardware_single_step ())
4355 
4356  step_over_bkpt = null_ptid;
4357  return 1;
4358  }
4359  else
4360  return 0;
4361 }
4362 
4363 /* If there's a step over in progress, wait until all threads stop
4364  (that is, until the stepping thread finishes its step), and
4365  unsuspend all lwps. The stepping thread ends with its status
4366  pending, which is processed later when we get back to processing
4367  events. */
4368 
4369 static void
4371 {
4372  if (!ptid_equal (step_over_bkpt, null_ptid))
4373  {
4374  struct lwp_info *lwp;
4375  int wstat;
4376  int ret;
4377 
4378  if (debug_threads)
4379  debug_printf ("detach: step over in progress, finish it first\n");
4380 
4381  /* Passing NULL_PTID as filter indicates we want all events to
4382  be left pending. Eventually this returns when there are no
4383  unwaited-for children left. */
4385  &wstat, __WALL);
4386  gdb_assert (ret == -1);
4387 
4388  lwp = find_lwp_pid (step_over_bkpt);
4389  if (lwp != NULL)
4390  finish_step_over (lwp);
4391  step_over_bkpt = null_ptid;
4392  unsuspend_all_lwps (lwp);
4393  }
4394 }
4395 
4396 /* This function is called once per thread. We check the thread's resume
4397  request, which will tell us whether to resume, step, or leave the thread
4398  stopped; and what signal, if any, it should be sent.
4399 
4400  For threads which we aren't explicitly told otherwise, we preserve
4401  the stepping flag; this is used for stepping over gdbserver-placed
4402  breakpoints.
4403 
4404  If pending_flags was set in any thread, we queue any needed
4405  signals, since we won't actually resume. We already have a pending
4406  event to report, so we don't need to preserve any step requests;
4407  they should be re-issued if necessary. */
4408 
4409 static int
4411 {
4412  struct thread_info *thread = (struct thread_info *) entry;
4413  struct lwp_info *lwp = get_thread_lwp (thread);
4414  int step;
4415  int leave_all_stopped = * (int *) arg;
4416  int leave_pending;
4417 
4418  if (lwp->resume == NULL)
4419  return 0;
4420 
4421  if (lwp->resume->kind == resume_stop)
4422  {
4423  if (debug_threads)
4424  debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4425 
4426  if (!lwp->stopped)
4427  {
4428  if (debug_threads)
4429  debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4430 
4431  /* Stop the thread, and wait for the event asynchronously,
4432  through the event loop. */
4433  send_sigstop (lwp);
4434  }
4435  else
4436  {
4437  if (debug_threads)
4438  debug_printf ("already stopped LWP %ld\n",
4439  lwpid_of (thread));
4440 
4441  /* The LWP may have been stopped in an internal event that
4442  was not meant to be notified back to GDB (e.g., gdbserver
4443  breakpoint), so we should be reporting a stop event in
4444  this case too. */
4445 
4446  /* If the thread already has a pending SIGSTOP, this is a
4447  no-op. Otherwise, something later will presumably resume
4448  the thread and this will cause it to cancel any pending
4449  operation, due to last_resume_kind == resume_stop. If
4450  the thread already has a pending status to report, we
4451  will still report it the next time we wait - see
4452  status_pending_p_callback. */
4453 
4454  /* If we already have a pending signal to report, then
4455  there's no need to queue a SIGSTOP, as this means we're
4456  midway through moving the LWP out of the jumppad, and we
4457  will report the pending signal as soon as that is
4458  finished. */
4459  if (lwp->pending_signals_to_report == NULL)
4460  send_sigstop (lwp);
4461  }
4462 
4463  /* For stop requests, we're done. */
4464  lwp->resume = NULL;
4465  thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4466  return 0;
4467  }
4468 
4469  /* If this thread which is about to be resumed has a pending status,
4470  then don't resume it - we can just report the pending status.
4471  Likewise if it is suspended, because e.g., another thread is
4472  stepping past a breakpoint. Make sure to queue any signals that
4473  would otherwise be sent. In all-stop mode, we do this decision
4474  based on if *any* thread has a pending status. If there's a
4475  thread that needs the step-over-breakpoint dance, then don't
4476  resume any other thread but that particular one. */
4477  leave_pending = (lwp->suspended
4478  || lwp->status_pending_p
4479  || leave_all_stopped);
4480 
4481  if (!leave_pending)
4482  {
4483  if (debug_threads)
4484  debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4485 
4486  step = (lwp->resume->kind == resume_step);
4487  linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4488  }
4489  else
4490  {
4491  if (debug_threads)
4492  debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4493 
4494  /* If we have a new signal, enqueue the signal. */
4495  if (lwp->resume->sig != 0)
4496  {
4497  struct pending_signals *p_sig;
4498  p_sig = xmalloc (sizeof (*p_sig));
4499  p_sig->prev = lwp->pending_signals;
4500  p_sig->signal = lwp->resume->sig;
4501  memset (&p_sig->info, 0, sizeof (siginfo_t));
4502 
4503  /* If this is the same signal we were previously stopped by,
4504  make sure to queue its siginfo. We can ignore the return
4505  value of ptrace; if it fails, we'll skip
4506  PTRACE_SETSIGINFO. */
4507  if (WIFSTOPPED (lwp->last_status)
4508  && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4509  ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4510  &p_sig->info);
4511 
4512  lwp->pending_signals = p_sig;
4513  }
4514  }
4515 
4516  thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4517  lwp->resume = NULL;
4518  return 0;
4519 }
4520 
4521 static void
4522 linux_resume (struct thread_resume *resume_info, size_t n)
4523 {
4524  struct thread_resume_array array = { resume_info, n };
4525  struct thread_info *need_step_over = NULL;
4526  int any_pending;
4527  int leave_all_stopped;
4528 
4529  if (debug_threads)
4530  {
4531  debug_enter ();
4532  debug_printf ("linux_resume:\n");
4533  }
4534 
4536 
4537  /* If there is a thread which would otherwise be resumed, which has
4538  a pending status, then don't resume any threads - we can just
4539  report the pending status. Make sure to queue any signals that
4540  would otherwise be sent. In non-stop mode, we'll apply this
4541  logic to each thread individually. We consume all pending events
4542  before considering to start a step-over (in all-stop). */
4543  any_pending = 0;
4544  if (!non_stop)
4546 
4547  /* If there is a thread which would otherwise be resumed, which is
4548  stopped at a breakpoint that needs stepping over, then don't
4549  resume any threads - have it step over the breakpoint with all
4550  other threads stopped, then resume all threads again. Make sure
4551  to queue any signals that would otherwise be delivered or
4552  queued. */
4553  if (!any_pending && supports_breakpoints ())
4554  need_step_over
4555  = (struct thread_info *) find_inferior (&all_threads,
4556  need_step_over_p, NULL);
4557 
4558  leave_all_stopped = (need_step_over != NULL || any_pending);
4559 
4560  if (debug_threads)
4561  {
4562  if (need_step_over != NULL)
4563  debug_printf ("Not resuming all, need step over\n");
4564  else if (any_pending)
4565  debug_printf ("Not resuming, all-stop and found "
4566  "an LWP with pending status\n");
4567  else
4568  debug_printf ("Resuming, no pending status or step over needed\n");
4569  }
4570 
4571  /* Even if we're leaving threads stopped, queue all signals we'd
4572  otherwise deliver. */
4573  find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4574 
4575  if (need_step_over)
4576  start_step_over (get_thread_lwp (need_step_over));
4577 
4578  if (debug_threads)
4579  {
4580  debug_printf ("linux_resume done\n");
4581  debug_exit ();
4582  }
4583 }
4584 
4585 /* This function is called once per thread. We check the thread's
4586  last resume request, which will tell us whether to resume, step, or
4587  leave the thread stopped. Any signal the client requested to be
4588  delivered has already been enqueued at this point.
4589 
4590  If any thread that GDB wants running is stopped at an internal
4591  breakpoint that needs stepping over, we start a step-over operation
4592  on that particular thread, and leave all others stopped. */
4593 
4594 static int
4596 {
4597  struct thread_info *thread = (struct thread_info *) entry;
4598  struct lwp_info *lwp = get_thread_lwp (thread);
4599  int step;
4600 
4601  if (lwp == except)
4602  return 0;
4603 
4604  if (debug_threads)
4605  debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4606 
4607  if (!lwp->stopped)
4608  {
4609  if (debug_threads)
4610  debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4611  return 0;
4612  }
4613 
4614  if (thread->last_resume_kind == resume_stop
4615  && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4616  {
4617  if (debug_threads)
4618  debug_printf (" client wants LWP to remain %ld stopped\n",
4619  lwpid_of (thread));
4620  return 0;
4621  }
4622 
4623  if (lwp->status_pending_p)
4624  {
4625  if (debug_threads)
4626  debug_printf (" LWP %ld has pending status, leaving stopped\n",
4627  lwpid_of (thread));
4628  return 0;
4629  }
4630 
4631  gdb_assert (lwp->suspended >= 0);
4632 
4633  if (lwp->suspended)
4634  {
4635  if (debug_threads)
4636  debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4637  return 0;
4638  }
4639 
4640  if (thread->last_resume_kind == resume_stop
4641  && lwp->pending_signals_to_report == NULL
4642  && lwp->collecting_fast_tracepoint == 0)
4643  {
4644  /* We haven't reported this LWP as stopped yet (otherwise, the
4645  last_status.kind check above would catch it, and we wouldn't
4646  reach here. This LWP may have been momentarily paused by a
4647  stop_all_lwps call while handling for example, another LWP's
4648  step-over. In that case, the pending expected SIGSTOP signal
4649  that was queued at vCont;t handling time will have already
4650  been consumed by wait_for_sigstop, and so we need to requeue
4651  another one here. Note that if the LWP already has a SIGSTOP
4652  pending, this is a no-op. */
4653 
4654  if (debug_threads)
4655  debug_printf ("Client wants LWP %ld to stop. "
4656  "Making sure it has a SIGSTOP pending\n",
4657  lwpid_of (thread));
4658 
4659  send_sigstop (lwp);
4660  }
4661 
4662  if (thread->last_resume_kind == resume_step)
4663  {
4664  if (debug_threads)
4665  debug_printf (" stepping LWP %ld, client wants it stepping\n",
4666  lwpid_of (thread));
4667  step = 1;
4668  }
4669  else if (lwp->bp_reinsert != 0)
4670  {
4671  if (debug_threads)
4672  debug_printf (" stepping LWP %ld, reinsert set\n",
4673  lwpid_of (thread));
4674  step = 1;
4675  }
4676  else
4677  step = 0;
4678 
4679  linux_resume_one_lwp (lwp, step, 0, NULL);
4680  return 0;
4681 }
4682 
4683 static int
4685 {
4686  struct thread_info *thread = (struct thread_info *) entry;
4687  struct lwp_info *lwp = get_thread_lwp (thread);
4688 
4689  if (lwp == except)
4690  return 0;
4691 
4692  lwp_suspended_decr (lwp);
4693 
4694  return proceed_one_lwp (entry, except);
4695 }
4696 
4697 /* When we finish a step-over, set threads running again. If there's
4698  another thread that may need a step-over, now's the time to start
4699  it. Eventually, we'll move all threads past their breakpoints. */
4700 
4701 static void
4703 {
4704  struct thread_info *need_step_over;
4705 
4706  /* If there is a thread which would otherwise be resumed, which is
4707  stopped at a breakpoint that needs stepping over, then don't
4708  resume any threads - have it step over the breakpoint with all
4709  other threads stopped, then resume all threads again. */
4710 
4711  if (supports_breakpoints ())
4712  {
4713  need_step_over
4714  = (struct thread_info *) find_inferior (&all_threads,
4715  need_step_over_p, NULL);
4716 
4717  if (need_step_over != NULL)
4718  {
4719  if (debug_threads)
4720  debug_printf ("proceed_all_lwps: found "
4721  "thread %ld needing a step-over\n",
4722  lwpid_of (need_step_over));
4723 
4724  start_step_over (get_thread_lwp (need_step_over));
4725  return;
4726  }
4727  }
4728 
4729  if (debug_threads)
4730  debug_printf ("Proceeding, no step-over needed\n");
4731 
4733 }
4734 
4735 /* Stopped LWPs that the client wanted to be running, that don't have
4736  pending statuses, are set to run again, except for EXCEPT, if not
4737  NULL. This undoes a stop_all_lwps call. */
4738 
4739 static void
4740 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4741 {
4742  if (debug_threads)
4743  {
4744  debug_enter ();
4745  if (except)
4746  debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4747  lwpid_of (get_lwp_thread (except)));
4748  else
4749  debug_printf ("unstopping all lwps\n");
4750  }
4751 
4752  if (unsuspend)
4754  else
4756 
4757  if (debug_threads)
4758  {
4759  debug_printf ("unstop_all_lwps done\n");
4760  debug_exit ();
4761  }
4762 }
4763 
4764 
4765 #ifdef HAVE_LINUX_REGSETS
4766 
4767 #define use_linux_regsets 1
4768 
4769 /* Returns true if REGSET has been disabled. */
4770 
4771 static int
4772 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4773 {
4774  return (info->disabled_regsets != NULL
4775  && info->disabled_regsets[regset - info->regsets]);
4776 }
4777 
4778 /* Disable REGSET. */
4779 
4780 static void
4781 disable_regset (struct regsets_info *info, struct regset_info *regset)
4782 {
4783  int dr_offset;
4784 
4785  dr_offset = regset - info->regsets;
4786  if (info->disabled_regsets == NULL)
4787  info->disabled_regsets = xcalloc (1, info->num_regsets);
4788  info->disabled_regsets[dr_offset] = 1;
4789 }
4790 
4791 static int
4792 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4793  struct regcache *regcache)
4794 {
4795  struct regset_info *regset;
4796  int saw_general_regs = 0;
4797  int pid;
4798  struct iovec iov;
4799 
4800  pid = lwpid_of (current_thread);
4801  for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4802  {
4803  void *buf, *data;
4804  int nt_type, res;
4805 
4806  if (regset->size == 0 || regset_disabled (regsets_info, regset))
4807  continue;
4808 
4809  buf = xmalloc (regset->size);
4810 
4811  nt_type = regset->nt_type;
4812  if (nt_type)
4813  {
4814  iov.iov_base = buf;
4815  iov.iov_len = regset->size;
4816  data = (void *) &iov;
4817  }
4818  else
4819  data = buf;
4820 
4821 #ifndef __sparc__
4822  res = ptrace (regset->get_request, pid,
4823  (PTRACE_TYPE_ARG3) (long) nt_type, data);
4824 #else
4825  res = ptrace (regset->get_request, pid, data, nt_type);
4826 #endif
4827  if (res < 0)
4828  {
4829  if (errno == EIO)
4830  {
4831  /* If we get EIO on a regset, do not try it again for
4832  this process mode. */
4833  disable_regset (regsets_info, regset);
4834  }
4835  else if (errno == ENODATA)
4836  {
4837  /* ENODATA may be returned if the regset is currently
4838  not "active". This can happen in normal operation,
4839  so suppress the warning in this case. */
4840  }
4841  else
4842  {
4843  char s[256];
4844  sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4845  pid);
4846  perror (s);
4847  }
4848  }
4849  else
4850  {
4851  if (regset->type == GENERAL_REGS)
4852  saw_general_regs = 1;
4853  regset->store_function (regcache, buf);
4854  }
4855  free (buf);
4856  }
4857  if (saw_general_regs)
4858  return 0;
4859  else
4860  return 1;
4861 }
4862 
4863 static int
4864 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4865  struct regcache *regcache)
4866 {
4867  struct regset_info *regset;
4868  int saw_general_regs = 0;
4869  int pid;
4870  struct iovec iov;
4871 
4872  pid = lwpid_of (current_thread);
4873  for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4874  {
4875  void *buf, *data;
4876  int nt_type, res;
4877 
4878  if (regset->size == 0 || regset_disabled (regsets_info, regset)
4879  || regset->fill_function == NULL)
4880  continue;
4881 
4882  buf = xmalloc (regset->size);
4883 
4884  /* First fill the buffer with the current register set contents,
4885  in case there are any items in the kernel's regset that are
4886  not in gdbserver's regcache. */
4887 
4888  nt_type = regset->nt_type;
4889  if (nt_type)
4890  {
4891  iov.iov_base = buf;
4892  iov.iov_len = regset->size;
4893  data = (void *) &iov;
4894  }
4895  else
4896  data = buf;
4897 
4898 #ifndef __sparc__
4899  res = ptrace (regset->get_request, pid,
4900  (PTRACE_TYPE_ARG3) (long) nt_type, data);
4901 #else
4902  res = ptrace (regset->get_request, pid, data, nt_type);
4903 #endif
4904 
4905  if (res == 0)
4906  {
4907  /* Then overlay our cached registers on that. */
4908  regset->fill_function (regcache, buf);
4909 
4910  /* Only now do we write the register set. */
4911 #ifndef __sparc__
4912  res = ptrace (regset->set_request, pid,
4913  (PTRACE_TYPE_ARG3) (long) nt_type, data);
4914 #else
4915  res = ptrace (regset->set_request, pid, data, nt_type);
4916 #endif
4917  }
4918 
4919  if (res < 0)
4920  {
4921  if (errno == EIO)
4922  {
4923  /* If we get EIO on a regset, do not try it again for
4924  this process mode. */
4925  disable_regset (regsets_info, regset);
4926  }
4927  else if (errno == ESRCH)
4928  {
4929  /* At this point, ESRCH should mean the process is
4930  already gone, in which case we simply ignore attempts
4931  to change its registers. See also the related
4932  comment in linux_resume_one_lwp. */
4933  free (buf);
4934  return 0;
4935  }
4936  else
4937  {
4938  perror ("Warning: ptrace(regsets_store_inferior_registers)");
4939  }
4940  }
4941  else if (regset->type == GENERAL_REGS)
4942  saw_general_regs = 1;
4943  free (buf);
4944  }
4945  if (saw_general_regs)
4946  return 0;
4947  else
4948  return 1;
4949 }
4950 
4951 #else /* !HAVE_LINUX_REGSETS */
4952 
4953 #define use_linux_regsets 0
4954 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4955 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4956 
4957 #endif
4958 
4959 /* Return 1 if register REGNO is supported by one of the regset ptrace
4960  calls or 0 if it has to be transferred individually. */
4961 
4962 static int
4963 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4964 {
4965  unsigned char mask = 1 << (regno % 8);
4966  size_t index = regno / 8;
4967 
4968  return (use_linux_regsets
4969  && (regs_info->regset_bitmap == NULL
4970  || (regs_info->regset_bitmap[index] & mask) != 0));
4971 }
4972 
4973 #ifdef HAVE_LINUX_USRREGS
4974 
4975 int
4976 register_addr (const struct usrregs_info *usrregs, int regnum)
4977 {
4978  int addr;
4979 
4980  if (regnum < 0 || regnum >= usrregs->num_regs)
4981  error ("Invalid register number %d.", regnum);
4982 
4983  addr = usrregs->regmap[regnum];
4984 
4985  return addr;
4986 }
4987 
4988 /* Fetch one register. */
4989 static void
4990 fetch_register (const struct usrregs_info *usrregs,
4991  struct regcache *regcache, int regno)
4992 {
4993  CORE_ADDR regaddr;
4994  int i, size;
4995  char *buf;
4996  int pid;
4997 
4998  if (regno >= usrregs->num_regs)
4999  return;
5000  if ((*the_low_target.cannot_fetch_register) (regno))
5001  return;
5002 
5003  regaddr = register_addr (usrregs, regno);
5004  if (regaddr == -1)
5005  return;
5006 
5007  size = ((register_size (regcache->tdesc, regno)
5008  + sizeof (PTRACE_XFER_TYPE) - 1)
5009  & -sizeof (PTRACE_XFER_TYPE));
5010  buf = alloca (size);
5011 
5012  pid = lwpid_of (current_thread);
5013  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5014  {
5015  errno = 0;
5016  *(PTRACE_XFER_TYPE *) (buf + i) =
5017  ptrace (PTRACE_PEEKUSER, pid,
5018  /* Coerce to a uintptr_t first to avoid potential gcc warning
5019  of coercing an 8 byte integer to a 4 byte pointer. */
5020  (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5021  regaddr += sizeof (PTRACE_XFER_TYPE);
5022  if (errno != 0)
5023  error ("reading register %d: %s", regno, strerror (errno));
5024  }
5025 
5027  the_low_target.supply_ptrace_register (regcache, regno, buf);
5028  else
5029  supply_register (regcache, regno, buf);
5030 }
5031 
5032 /* Store one register. */
5033 static void
5034 store_register (const struct usrregs_info *usrregs,
5035  struct regcache *regcache, int regno)
5036 {
5037  CORE_ADDR regaddr;
5038  int i, size;
5039  char *buf;
5040  int pid;
5041 
5042  if (regno >= usrregs->num_regs)
5043  return;
5044  if ((*the_low_target.cannot_store_register) (regno))
5045  return;
5046 
5047  regaddr = register_addr (usrregs, regno);
5048  if (regaddr == -1)
5049  return;
5050 
5051  size = ((register_size (regcache->tdesc, regno)
5052  + sizeof (PTRACE_XFER_TYPE) - 1)
5053  & -sizeof (PTRACE_XFER_TYPE));
5054  buf = alloca (size);
5055  memset (buf, 0, size);
5056 
5058  the_low_target.collect_ptrace_register (regcache, regno, buf);
5059  else
5060  collect_register (regcache, regno, buf);
5061 
5062  pid = lwpid_of (current_thread);
5063  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5064  {
5065  errno = 0;
5066  ptrace (PTRACE_POKEUSER, pid,
5067  /* Coerce to a uintptr_t first to avoid potential gcc warning
5068  about coercing an 8 byte integer to a 4 byte pointer. */
5069  (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5070  (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5071  if (errno != 0)
5072  {
5073  /* At this point, ESRCH should mean the process is
5074  already gone, in which case we simply ignore attempts
5075  to change its registers. See also the related
5076  comment in linux_resume_one_lwp. */
5077  if (errno == ESRCH)
5078  return;
5079 
5080  if ((*the_low_target.cannot_store_register) (regno) == 0)
5081  error ("writing register %d: %s", regno, strerror (errno));
5082  }
5083  regaddr += sizeof (PTRACE_XFER_TYPE);
5084  }
5085 }
5086 
5087 /* Fetch all registers, or just one, from the child process.
5088  If REGNO is -1, do this for all registers, skipping any that are
5089  assumed to have been retrieved by regsets_fetch_inferior_registers,
5090  unless ALL is non-zero.
5091  Otherwise, REGNO specifies which register (so we can save time). */
5092 static void
5093 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5094  struct regcache *regcache, int regno, int all)
5095 {
5096  struct usrregs_info *usr = regs_info->usrregs;
5097 
5098  if (regno == -1)
5099  {
5100  for (regno = 0; regno < usr->num_regs; regno++)
5101  if (all || !linux_register_in_regsets (regs_info, regno))
5102  fetch_register (usr, regcache, regno);
5103  }
5104  else
5105  fetch_register (usr, regcache, regno);
5106 }
5107 
5108 /* Store our register values back into the inferior.
5109  If REGNO is -1, do this for all registers, skipping any that are
5110  assumed to have been saved by regsets_store_inferior_registers,
5111  unless ALL is non-zero.
5112  Otherwise, REGNO specifies which register (so we can save time). */
5113 static void
5114 usr_store_inferior_registers (const struct regs_info *regs_info,
5115  struct regcache *regcache, int regno, int all)
5116 {
5117  struct usrregs_info *usr = regs_info->usrregs;
5118 
5119  if (regno == -1)
5120  {
5121  for (regno = 0; regno < usr->num_regs; regno++)
5122  if (all || !linux_register_in_regsets (regs_info, regno))
5123  store_register (usr, regcache, regno);
5124  }
5125  else
5126  store_register (usr, regcache, regno);
5127 }
5128 
5129 #else /* !HAVE_LINUX_USRREGS */
5130 
5131 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5132 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5133 
5134 #endif
5135 
5136 
5137 void
5138 linux_fetch_registers (struct regcache *regcache, int regno)
5139 {
5140  int use_regsets;
5141  int all = 0;
5142  const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5143 
5144  if (regno == -1)
5145  {
5146  if (the_low_target.fetch_register != NULL
5147  && regs_info->usrregs != NULL)
5148  for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5149  (*the_low_target.fetch_register) (regcache, regno);
5150 
5151  all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5152  if (regs_info->usrregs != NULL)
5153  usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5154  }
5155  else
5156  {
5157  if (the_low_target.fetch_register != NULL
5158  && (*the_low_target.fetch_register) (regcache, regno))
5159  return;
5160 
5161  use_regsets = linux_register_in_regsets (regs_info, regno);
5162  if (use_regsets)
5163  all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5164  regcache);
5165  if ((!use_regsets || all) && regs_info->usrregs != NULL)
5166  usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5167  }
5168 }
5169 
5170 void
5171 linux_store_registers (struct regcache *regcache, int regno)
5172 {
5173  int use_regsets;
5174  int all = 0;
5175  const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5176 
5177  if (regno == -1)
5178  {
5179  all = regsets_store_inferior_registers (regs_info->regsets_info,
5180  regcache);
5181  if (regs_info->usrregs != NULL)
5182  usr_store_inferior_registers (regs_info, regcache, regno, all);
5183  }
5184  else
5185  {
5186  use_regsets = linux_register_in_regsets (regs_info, regno);
5187  if (use_regsets)
5188  all = regsets_store_inferior_registers (regs_info->regsets_info,
5189  regcache);
5190  if ((!use_regsets || all) && regs_info->usrregs != NULL)
5191  usr_store_inferior_registers (regs_info, regcache, regno, 1);
5192  }
5193 }
5194 
5195 
5196 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5197  to debugger memory starting at MYADDR. */
5198 
5199 static int
5200 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5201 {
5202  int pid = lwpid_of (current_thread);
5203  register PTRACE_XFER_TYPE *buffer;
5204  register CORE_ADDR addr;
5205  register int count;
5206  char filename[64];
5207  register int i;
5208  int ret;
5209  int fd;
5210 
5211  /* Try using /proc. Don't bother for one word. */
5212  if (len >= 3 * sizeof (long))
5213  {
5214  int bytes;
5215 
5216  /* We could keep this file open and cache it - possibly one per
5217  thread. That requires some juggling, but is even faster. */
5218  sprintf (filename, "/proc/%d/mem", pid);
5219  fd = open (filename, O_RDONLY | O_LARGEFILE);
5220  if (fd == -1)
5221  goto no_proc;
5222 
5223  /* If pread64 is available, use it. It's faster if the kernel
5224  supports it (only one syscall), and it's 64-bit safe even on
5225  32-bit platforms (for instance, SPARC debugging a SPARC64
5226  application). */
5227 #ifdef HAVE_PREAD64
5228  bytes = pread64 (fd, myaddr, len, memaddr);
5229 #else
5230  bytes = -1;
5231  if (lseek (fd, memaddr, SEEK_SET) != -1)
5232  bytes = read (fd, myaddr, len);
5233 #endif
5234 
5235  close (fd);
5236  if (bytes == len)
5237  return 0;
5238 
5239  /* Some data was read, we'll try to get the rest with ptrace. */
5240  if (bytes > 0)
5241  {
5242  memaddr += bytes;
5243  myaddr += bytes;
5244  len -= bytes;
5245  }
5246  }
5247 
5248  no_proc:
5249  /* Round starting address down to longword boundary. */
5250  addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5251  /* Round ending address up; get number of longwords that makes. */
5252  count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5253  / sizeof (PTRACE_XFER_TYPE));
5254  /* Allocate buffer of that many longwords. */
5255  buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5256 
5257  /* Read all the longwords */
5258  errno = 0;
5259  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5260  {
5261  /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5262  about coercing an 8 byte integer to a 4 byte pointer. */
5263  buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5264  (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5265  (PTRACE_TYPE_ARG4) 0);
5266  if (errno)
5267  break;
5268  }
5269  ret = errno;
5270 
5271  /* Copy appropriate bytes out of the buffer. */
5272  if (i > 0)
5273  {
5274  i *= sizeof (PTRACE_XFER_TYPE);
5275  i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5276  memcpy (myaddr,
5277  (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5278  i < len ? i : len);
5279  }
5280 
5281  return ret;
5282 }
5283 
5284 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5285  memory at MEMADDR. On failure (cannot write to the inferior)
5286  returns the value of errno. Always succeeds if LEN is zero. */
5287 
5288 static int
5289 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5290 {
5291  register int i;
5292  /* Round starting address down to longword boundary. */
5293  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5294  /* Round ending address up; get number of longwords that makes. */
5295  register int count
5296  = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5297  / sizeof (PTRACE_XFER_TYPE);
5298 
5299  /* Allocate buffer of that many longwords. */
5300  register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5301  alloca (count * sizeof (PTRACE_XFER_TYPE));
5302 
5303  int pid = lwpid_of (current_thread);
5304 
5305  if (len == 0)
5306  {
5307  /* Zero length write always succeeds. */
5308  return 0;
5309  }
5310 
5311  if (debug_threads)
5312  {
5313  /* Dump up to four bytes. */
5314  char str[4 * 2 + 1];
5315  char *p = str;
5316  int dump = len < 4 ? len : 4;
5317 
5318  for (i = 0; i < dump; i++)
5319  {
5320  sprintf (p, "%02x", myaddr[i]);
5321  p += 2;
5322  }
5323  *p = '\0';
5324 
5325  debug_printf ("Writing %s to 0x%08lx in process %d\n",
5326  str, (long) memaddr, pid);
5327  }
5328 
5329  /* Fill start and end extra bytes of buffer with existing memory data. */
5330 
5331  errno = 0;
5332  /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5333  about coercing an 8 byte integer to a 4 byte pointer. */
5334  buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5335  (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5336  (PTRACE_TYPE_ARG4) 0);
5337  if (errno)
5338  return errno;
5339 
5340  if (count > 1)
5341  {
5342  errno = 0;
5343  buffer[count - 1]
5344  = ptrace (PTRACE_PEEKTEXT, pid,
5345  /* Coerce to a uintptr_t first to avoid potential gcc warning
5346  about coercing an 8 byte integer to a 4 byte pointer. */
5347  (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5348  * sizeof (PTRACE_XFER_TYPE)),
5349  (PTRACE_TYPE_ARG4) 0);
5350  if (errno)
5351  return errno;
5352  }
5353 
5354  /* Copy data to be written over corresponding part of buffer. */
5355 
5356  memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5357  myaddr, len);
5358 
5359  /* Write the entire buffer. */
5360 
5361  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5362  {
5363  errno = 0;
5364  ptrace (PTRACE_POKETEXT, pid,
5365  /* Coerce to a uintptr_t first to avoid potential gcc warning
5366  about coercing an 8 byte integer to a 4 byte pointer. */
5367  (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5368  (PTRACE_TYPE_ARG4) buffer[i]);
5369  if (errno)
5370  return errno;
5371  }
5372 
5373  return 0;
5374 }
5375 
5376 static void
5378 {
5379 #ifdef USE_THREAD_DB
5380  struct process_info *proc = current_process ();
5381 
5382  if (proc->priv->thread_db != NULL)
5383  return;
5384 
5385  /* If the kernel supports tracing clones, then we don't need to
5386  use the magic thread event breakpoint to learn about
5387  threads. */
5389 #endif
5390 }
5391 
5392 static void
5394 {
5395  extern unsigned long signal_pid;
5396 
5397  /* Send a SIGINT to the process group. This acts just like the user
5398  typed a ^C on the controlling terminal. */
5399  kill (-signal_pid, SIGINT);
5400 }
5401 
5402 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5403  to debugger memory starting at MYADDR. */
5404 
5405 static int
5406 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5407 {
5408  char filename[PATH_MAX];
5409  int fd, n;
5410  int pid = lwpid_of (current_thread);
5411 
5412  xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5413 
5414  fd = open (filename, O_RDONLY);
5415  if (fd < 0)
5416  return -1;
5417 
5418  if (offset != (CORE_ADDR) 0
5419  && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5420  n = -1;
5421  else
5422  n = read (fd, myaddr, len);
5423 
5424  close (fd);
5425 
5426  return n;
5427 }
5428 
5429 /* These breakpoint and watchpoint related wrapper functions simply
5430  pass on the function call if the target has registered a
5431  corresponding function. */
5432 
5433 static int
5435 {
5436  return (the_low_target.supports_z_point_type != NULL
5438 }
5439 
5440 static int
5442  int size, struct raw_breakpoint *bp)
5443 {
5444  if (type == raw_bkpt_type_sw)
5445  return insert_memory_breakpoint (bp);
5446  else if (the_low_target.insert_point != NULL)
5447  return the_low_target.insert_point (type, addr, size, bp);
5448  else
5449  /* Unsupported (see target.h). */
5450  return 1;
5451 }
5452 
5453 static int
5455  int size, struct raw_breakpoint *bp)
5456 {
5457  if (type == raw_bkpt_type_sw)
5458  return remove_memory_breakpoint (bp);
5459  else if (the_low_target.remove_point != NULL)
5460  return the_low_target.remove_point (type, addr, size, bp);
5461  else
5462  /* Unsupported (see target.h). */
5463  return 1;
5464 }
5465 
5466 /* Implement the to_stopped_by_sw_breakpoint target_ops
5467  method. */
5468 
5469 static int
5471 {
5472  struct lwp_info *lwp = get_thread_lwp (current_thread);
5473 
5474  return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5475 }
5476 
5477 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5478  method. */
5479 
5480 static int
5482 {
5483  return USE_SIGTRAP_SIGINFO;
5484 }
5485 
5486 /* Implement the to_stopped_by_hw_breakpoint target_ops
5487  method. */
5488 
5489 static int
5491 {
5492  struct lwp_info *lwp = get_thread_lwp (current_thread);
5493 
5494  return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5495 }
5496 
5497 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5498  method. */
5499 
5500 static int
5502 {
5503  return USE_SIGTRAP_SIGINFO;
5504 }
5505 
5506 /* Implement the supports_conditional_breakpoints target_ops
5507  method. */
5508 
5509 static int
5511 {
5512  /* GDBserver needs to step over the breakpoint if the condition is
5513  false. GDBserver software single step is too simple, so disable
5514  conditional breakpoints if the target doesn't have hardware single
5515  step. */
5516  return can_hardware_single_step ();
5517 }
5518 
5519 static int
5521 {
5522  struct lwp_info *lwp = get_thread_lwp (current_thread);
5523 
5524  return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5525 }
5526 
5527 static CORE_ADDR
5529 {
5530  struct lwp_info *lwp = get_thread_lwp (current_thread);
5531 
5532  return lwp->stopped_data_address;
5533 }
5534 
5535 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5536  && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5537  && defined(PT_TEXT_END_ADDR)
5538 
5539 /* This is only used for targets that define PT_TEXT_ADDR,
5540  PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5541  the target has different ways of acquiring this information, like
5542  loadmaps. */
5543 
5544 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5545  to tell gdb about. */
5546 
5547 static int
5548 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5549 {
5550  unsigned long text, text_end, data;
5551  int pid = lwpid_of (current_thread);
5552 
5553  errno = 0;
5554 
5555  text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5556  (PTRACE_TYPE_ARG4) 0);
5557  text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5558  (PTRACE_TYPE_ARG4) 0);
5559  data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5560  (PTRACE_TYPE_ARG4) 0);
5561 
5562  if (errno == 0)
5563  {
5564  /* Both text and data offsets produced at compile-time (and so
5565  used by gdb) are relative to the beginning of the program,
5566  with the data segment immediately following the text segment.
5567  However, the actual runtime layout in memory may put the data
5568  somewhere else, so when we send gdb a data base-address, we
5569  use the real data base address and subtract the compile-time
5570  data base-address from it (which is just the length of the
5571  text segment). BSS immediately follows data in both
5572  cases. */
5573  *text_p = text;
5574  *data_p = data - (text_end - text);
5575 
5576  return 1;
5577  }
5578  return 0;
5579 }
5580 #endif
5581 
5582 static int
5583 linux_qxfer_osdata (const char *annex,
5584  unsigned char *readbuf, unsigned const char *writebuf,
5585  CORE_ADDR offset, int len)
5586 {
5587  return linux_common_xfer_osdata (annex, readbuf, offset, len);
5588 }
5589 
5590 /* Convert a native/host siginfo object, into/from the siginfo in the
5591  layout of the inferiors' architecture. */
5592 
5593 static void
5594 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5595 {
5596  int done = 0;
5597 
5598  if (the_low_target.siginfo_fixup != NULL)
5599  done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5600 
5601  /* If there was no callback, or the callback didn't do anything,
5602  then just do a straight memcpy. */
5603  if (!done)
5604  {
5605  if (direction == 1)
5606  memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5607  else
5608  memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5609  }
5610 }
5611 
5612 static int
5613 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5614  unsigned const char *writebuf, CORE_ADDR offset, int len)
5615 {
5616  int pid;
5617  siginfo_t siginfo;
5618  char inf_siginfo[sizeof (siginfo_t)];
5619 
5620  if (current_thread == NULL)
5621  return -1;
5622 
5623  pid = lwpid_of (current_thread);
5624 
5625  if (debug_threads)
5626  debug_printf ("%s siginfo for lwp %d.\n",
5627  readbuf != NULL ? "Reading" : "Writing",
5628  pid);
5629 
5630  if (offset >= sizeof (siginfo))
5631  return -1;
5632 
5633  if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5634  return -1;
5635 
5636  /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5637  SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5638  inferior with a 64-bit GDBSERVER should look the same as debugging it
5639  with a 32-bit GDBSERVER, we need to convert it. */
5640  siginfo_fixup (&siginfo, inf_siginfo, 0);
5641 
5642  if (offset + len > sizeof (siginfo))
5643  len = sizeof (siginfo) - offset;
5644 
5645  if (readbuf != NULL)
5646  memcpy (readbuf, inf_siginfo + offset, len);
5647  else
5648  {
5649  memcpy (inf_siginfo + offset, writebuf, len);
5650 
5651  /* Convert back to ptrace layout before flushing it out. */
5652  siginfo_fixup (&siginfo, inf_siginfo, 1);
5653 
5654  if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5655  return -1;
5656  }
5657 
5658  return len;
5659 }
5660 
5661 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5662  so we notice when children change state; as the handler for the
5663  sigsuspend in my_waitpid. */
5664 
5665 static void
5666 sigchld_handler (int signo)
5667 {
5668  int old_errno = errno;
5669 
5670  if (debug_threads)
5671  {
5672  do
5673  {
5674  /* fprintf is not async-signal-safe, so call write
5675  directly. */
5676  if (write (2, "sigchld_handler\n",
5677  sizeof ("sigchld_handler\n") - 1) < 0)
5678  break; /* just ignore */
5679  } while (0);
5680  }
5681 
5682  if (target_is_async_p ())
5683  async_file_mark (); /* trigger a linux_wait */
5684 
5685  errno = old_errno;
5686 }
5687 
5688 static int
5690 {
5691  return 1;
5692 }
5693 
5694 static int
5695 linux_async (int enable)
5696 {
5697  int previous = target_is_async_p ();
5698 
5699  if (debug_threads)
5700  debug_printf ("linux_async (%d), previous=%d\n",
5701  enable, previous);
5702 
5703  if (previous != enable)
5704  {
5705  sigset_t mask;
5706  sigemptyset (&mask);
5707  sigaddset (&mask, SIGCHLD);
5708 
5709  sigprocmask (SIG_BLOCK, &mask, NULL);
5710 
5711  if (enable)
5712  {
5713  if (pipe (linux_event_pipe) == -1)
5714  {
5715  linux_event_pipe[0] = -1;
5716  linux_event_pipe[1] = -1;
5717  sigprocmask (SIG_UNBLOCK, &mask, NULL);
5718 
5719  warning ("creating event pipe failed.");
5720  return previous;
5721  }
5722 
5723  fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5724  fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5725 
5726  /* Register the event loop handler. */
5727  add_file_handler (linux_event_pipe[0],
5728  handle_target_event, NULL);
5729 
5730  /* Always trigger a linux_wait. */
5731  async_file_mark ();
5732  }
5733  else
5734  {
5735  delete_file_handler (linux_event_pipe[0]);
5736 
5737  close (linux_event_pipe[0]);
5738  close (linux_event_pipe[1]);
5739  linux_event_pipe[0] = -1;
5740  linux_event_pipe[1] = -1;
5741  }
5742 
5743  sigprocmask (SIG_UNBLOCK, &mask, NULL);
5744  }
5745 
5746  return previous;
5747 }
5748 
5749 static int
5751 {
5752  /* Register or unregister from event-loop accordingly. */
5753  linux_async (nonstop);
5754 
5755  if (target_is_async_p () != (nonstop != 0))
5756  return -1;
5757 
5758  return 0;
5759 }
5760 
5761 static int
5763 {
5764  return 1;
5765 }
5766 
5767 /* Check if fork events are supported. */
5768 
5769 static int
5771 {
5772  return linux_supports_tracefork ();
5773 }
5774 
5775 /* Check if vfork events are supported. */
5776 
5777 static int
5779 {
5780  return linux_supports_tracefork ();
5781 }
5782 
5783 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5784  options for the specified lwp. */
5785 
5786 static int
5788  void *args)
5789 {
5790  struct thread_info *thread = (struct thread_info *) entry;
5791  struct lwp_info *lwp = get_thread_lwp (thread);
5792 
5793  if (!lwp->stopped)
5794  {
5795  /* Stop the lwp so we can modify its ptrace options. */
5796  lwp->must_set_ptrace_flags = 1;
5797  linux_stop_lwp (lwp);
5798  }
5799  else
5800  {
5801  /* Already stopped; go ahead and set the ptrace options. */
5802  struct process_info *proc = find_process_pid (pid_of (thread));
5803  int options = linux_low_ptrace_options (proc->attached);
5804 
5805  linux_enable_event_reporting (lwpid_of (thread), options);
5806  lwp->must_set_ptrace_flags = 0;
5807  }
5808 
5809  return 0;
5810 }
5811 
5812 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5813  ptrace flags for all inferiors. This is in case the new GDB connection
5814  doesn't support the same set of events that the previous one did. */
5815 
5816 static void
5818 {
5819  pid_t pid;
5820 
5821  /* Request that all the lwps reset their ptrace options. */
5823 }
5824 
5825 static int
5827 {
5828 #ifdef HAVE_PERSONALITY
5829  return 1;
5830 #else
5831  return 0;
5832 #endif
5833 }
5834 
5835 static int
5837 {
5838  return 1;
5839 }
5840 
5841 static int
5843 {
5845  return 0;
5846 
5848 }
5849 
5850 /* Enumerate spufs IDs for process PID. */
5851 static int
5852 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5853 {
5854  int pos = 0;
5855  int written = 0;
5856  char path[128];
5857  DIR *dir;
5858  struct dirent *entry;
5859 
5860  sprintf (path, "/proc/%ld/fd", pid);
5861  dir = opendir (path);
5862  if (!dir)
5863  return -1;
5864 
5865  rewinddir (dir);
5866  while ((entry = readdir (dir)) != NULL)
5867  {
5868  struct stat st;
5869  struct statfs stfs;
5870  int fd;
5871 
5872  fd = atoi (entry->d_name);
5873  if (!fd)
5874  continue;
5875 
5876  sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5877  if (stat (path, &st) != 0)
5878  continue;
5879  if (!S_ISDIR (st.st_mode))
5880  continue;
5881 
5882  if (statfs (path, &stfs) != 0)
5883  continue;
5884  if (stfs.f_type != SPUFS_MAGIC)
5885  continue;
5886 
5887  if (pos >= offset && pos + 4 <= offset + len)
5888  {
5889  *(unsigned int *)(buf + pos - offset) = fd;
5890  written += 4;
5891  }
5892  pos += 4;
5893  }
5894 
5895  closedir (dir);
5896  return written;
5897 }
5898 
5899 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5900  object type, using the /proc file system. */
5901 static int
5902 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5903  unsigned const char *writebuf,
5904  CORE_ADDR offset, int len)
5905 {
5906  long pid = lwpid_of (current_thread);
5907  char buf[128];
5908  int fd = 0;
5909  int ret = 0;
5910 
5911  if (!writebuf && !readbuf)
5912  return -1;
5913 
5914  if (!*annex)
5915  {
5916  if (!readbuf)
5917  return -1;
5918  else
5919  return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5920  }
5921 
5922  sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5923  fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5924  if (fd <= 0)
5925  return -1;
5926 
5927  if (offset != 0
5928  && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5929  {
5930  close (fd);
5931  return 0;
5932  }
5933 
5934  if (writebuf)
5935  ret = write (fd, writebuf, (size_t) len);
5936  else
5937  ret = read (fd, readbuf, (size_t) len);
5938 
5939  close (fd);
5940  return ret;
5941 }
5942 
5943 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5944 struct target_loadseg
5945 {
5946  /* Core address to which the segment is mapped. */
5947  Elf32_Addr addr;
5948  /* VMA recorded in the program header. */
5949  Elf32_Addr p_vaddr;
5950  /* Size of this segment in memory. */
5951  Elf32_Word p_memsz;
5952 };
5953 
5954 # if defined PT_GETDSBT
5955 struct target_loadmap
5956 {
5957  /* Protocol version number, must be zero. */
5958  Elf32_Word version;
5959  /* Pointer to the DSBT table, its size, and the DSBT index. */
5960  unsigned *dsbt_table;
5961  unsigned dsbt_size, dsbt_index;
5962  /* Number of segments in this map. */
5963  Elf32_Word nsegs;
5964  /* The actual memory map. */
5965  struct target_loadseg segs[/*nsegs*/];
5966 };
5967 # define LINUX_LOADMAP PT_GETDSBT
5968 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5969 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5970 # else
5971 struct target_loadmap
5972 {
5973  /* Protocol version number, must be zero. */
5974  Elf32_Half version;
5975  /* Number of segments in this map. */
5976  Elf32_Half nsegs;
5977  /* The actual memory map. */
5978  struct target_loadseg segs[/*nsegs*/];
5979 };
5980 # define LINUX_LOADMAP PTRACE_GETFDPIC
5981 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5982 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5983 # endif
5984 
5985 static int
5986 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5987  unsigned char *myaddr, unsigned int len)
5988 {
5989  int pid = lwpid_of (current_thread);
5990  int addr = -1;
5991  struct target_loadmap *data = NULL;
5992  unsigned int actual_length, copy_length;
5993 
5994  if (strcmp (annex, "exec") == 0)
5995  addr = (int) LINUX_LOADMAP_EXEC;
5996  else if (strcmp (annex, "interp") == 0)
5997  addr = (int) LINUX_LOADMAP_INTERP;
5998  else
5999  return -1;
6000 
6001  if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6002  return -1;
6003 
6004  if (data == NULL)
6005  return -1;
6006 
6007  actual_length = sizeof (struct target_loadmap)
6008  + sizeof (struct target_loadseg) * data->nsegs;
6009 
6010  if (offset < 0 || offset > actual_length)
6011  return -1;
6012 
6013  copy_length = actual_length - offset < len ? actual_length - offset : len;
6014  memcpy (myaddr, (char *) data + offset, copy_length);
6015  return copy_length;
6016 }
6017 #else
6018 # define linux_read_loadmap NULL
6019 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6020 
6021 static void
6022 linux_process_qsupported (const char *query)
6023 {
6024  if (the_low_target.process_qsupported != NULL)
6026 }
6027 
6028 static int
6030 {
6031  if (*the_low_target.supports_tracepoints == NULL)
6032  return 0;
6033 
6034  return (*the_low_target.supports_tracepoints) ();
6035 }
6036 
6037 static CORE_ADDR
6038 linux_read_pc (struct regcache *regcache)
6039 {
6040  if (the_low_target.get_pc == NULL)
6041  return 0;
6042 
6043  return (*the_low_target.get_pc) (regcache);
6044 }
6045 
6046 static void
6047 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6048 {
6049  gdb_assert (the_low_target.set_pc != NULL);
6050 
6051  (*the_low_target.set_pc) (regcache, pc);
6052 }
6053 
6054 static int
6056 {
6057  return get_thread_lwp (thread)->stopped;
6058 }
6059 
6060 /* This exposes stop-all-threads functionality to other modules. */
6061 
6062 static void
6063 linux_pause_all (int freeze)
6064 {
6065  stop_all_lwps (freeze, NULL);
6066 }
6067 
6068 /* This exposes unstop-all-threads functionality to other gdbserver
6069  modules. */
6070 
6071 static void
6072 linux_unpause_all (int unfreeze)
6073 {
6074  unstop_all_lwps (unfreeze, NULL);
6075 }
6076 
6077 static int
6079 {
6080  /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6081  running LWP. */
6082  if (non_stop)
6083  linux_pause_all (1);
6084  return 0;
6085 }
6086 
6087 static void
6089 {
6090  /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6091  running LWP. */
6092  if (non_stop)
6093  linux_unpause_all (1);
6094 }
6095 
6096 static int
6098  CORE_ADDR collector,
6099  CORE_ADDR lockaddr,
6100  ULONGEST orig_size,
6101  CORE_ADDR *jump_entry,
6102  CORE_ADDR *trampoline,
6103  ULONGEST *trampoline_size,
6104  unsigned char *jjump_pad_insn,
6105  ULONGEST *jjump_pad_insn_size,
6106  CORE_ADDR *adjusted_insn_addr,
6107  CORE_ADDR *adjusted_insn_addr_end,
6108  char *err)
6109 {
6111  (tpoint, tpaddr, collector, lockaddr, orig_size,
6112  jump_entry, trampoline, trampoline_size,
6113  jjump_pad_insn, jjump_pad_insn_size,
6114  adjusted_insn_addr, adjusted_insn_addr_end,
6115  err);
6116 }
6117 
6118 static struct emit_ops *
6120 {
6121  if (the_low_target.emit_ops != NULL)
6122  return (*the_low_target.emit_ops) ();
6123  else
6124  return NULL;
6125 }
6126 
6127 static int
6129 {
6131 }
6132 
6133 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6134 
6135 static int
6136 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6137  CORE_ADDR *phdr_memaddr, int *num_phdr)
6138 {
6139  char filename[PATH_MAX];
6140  int fd;
6141  const int auxv_size = is_elf64
6142  ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6143  char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6144 
6145  xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6146 
6147  fd = open (filename, O_RDONLY);
6148  if (fd < 0)
6149  return 1;
6150 
6151  *phdr_memaddr = 0;
6152  *num_phdr = 0;
6153  while (read (fd, buf, auxv_size) == auxv_size
6154  && (*phdr_memaddr == 0 || *num_phdr == 0))
6155  {
6156  if (is_elf64)
6157  {
6158  Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6159 
6160  switch (aux->a_type)
6161  {
6162  case AT_PHDR:
6163  *phdr_memaddr = aux->a_un.a_val;
6164  break;
6165  case AT_PHNUM:
6166  *num_phdr = aux->a_un.a_val;
6167  break;
6168  }
6169  }
6170  else
6171  {
6172  Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6173 
6174  switch (aux->a_type)
6175  {
6176  case AT_PHDR:
6177  *phdr_memaddr = aux->a_un.a_val;
6178  break;
6179  case AT_PHNUM:
6180  *num_phdr = aux->a_un.a_val;
6181  break;
6182  }
6183  }
6184  }
6185 
6186  close (fd);
6187 
6188  if (*phdr_memaddr == 0 || *num_phdr == 0)
6189  {
6190  warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6191  "phdr_memaddr = %ld, phdr_num = %d",
6192  (long) *phdr_memaddr, *num_phdr);
6193  return 2;
6194  }
6195 
6196  return 0;
6197 }
6198 
6199 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6200 
6201 static CORE_ADDR
6202 get_dynamic (const int pid, const int is_elf64)
6203 {
6204  CORE_ADDR phdr_memaddr, relocation;
6205  int num_phdr, i;
6206  unsigned char *phdr_buf;
6207  const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6208 
6209  if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6210  return 0;
6211 
6212  gdb_assert (num_phdr < 100); /* Basic sanity check. */
6213  phdr_buf = alloca (num_phdr * phdr_size);
6214 
6215  if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6216  return 0;
6217 
6218  /* Compute relocation: it is expected to be 0 for "regular" executables,
6219  non-zero for PIE ones. */
6220  relocation = -1;
6221  for (i = 0; relocation == -1 && i < num_phdr; i++)
6222  if (is_elf64)
6223  {
6224  Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6225 
6226  if (p->p_type == PT_PHDR)
6227  relocation = phdr_memaddr - p->p_vaddr;
6228  }
6229  else
6230  {
6231  Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6232 
6233  if (p->p_type == PT_PHDR)
6234  relocation = phdr_memaddr - p->p_vaddr;
6235  }
6236 
6237  if (relocation == -1)
6238  {
6239  /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6240  any real world executables, including PIE executables, have always
6241  PT_PHDR present. PT_PHDR is not present in some shared libraries or
6242  in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6243  or present DT_DEBUG anyway (fpc binaries are statically linked).
6244 
6245  Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6246 
6247  GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6248 
6249  return 0;
6250  }
6251 
6252  for (i = 0; i < num_phdr; i++)
6253  {
6254  if (is_elf64)
6255  {
6256  Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6257 
6258  if (p->p_type == PT_DYNAMIC)
6259  return p->p_vaddr + relocation;
6260  }
6261  else
6262  {
6263  Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6264 
6265  if (p->p_type == PT_DYNAMIC)
6266  return p->p_vaddr + relocation;
6267  }
6268  }
6269 
6270  return 0;
6271 }
6272 
6273 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6274  can be 0 if the inferior does not yet have the library list initialized.
6275  We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6276  DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6277 
6278 static CORE_ADDR
6279 get_r_debug (const int pid, const int is_elf64)
6280 {
6281  CORE_ADDR dynamic_memaddr;
6282  const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6283  unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6284  CORE_ADDR map = -1;
6285 
6286  dynamic_memaddr = get_dynamic (pid, is_elf64);
6287  if (dynamic_memaddr == 0)
6288  return map;
6289 
6290  while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6291  {
6292  if (is_elf64)
6293  {
6294  Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6295 #ifdef DT_MIPS_RLD_MAP
6296  union
6297  {
6298  Elf64_Xword map;
6299  unsigned char buf[sizeof (Elf64_Xword)];
6300  }
6301  rld_map;
6302 
6303  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6304  {
6305  if (linux_read_memory (dyn->d_un.d_val,
6306  rld_map.buf, sizeof (rld_map.buf)) == 0)
6307  return rld_map.map;
6308  else
6309  break;
6310  }
6311 #endif /* DT_MIPS_RLD_MAP */
6312 
6313  if (dyn->d_tag == DT_DEBUG && map == -1)
6314  map = dyn->d_un.d_val;
6315 
6316  if (dyn->d_tag == DT_NULL)
6317  break;
6318  }
6319  else
6320  {
6321  Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6322 #ifdef DT_MIPS_RLD_MAP
6323  union
6324  {
6325  Elf32_Word map;
6326  unsigned char buf[sizeof (Elf32_Word)];
6327  }
6328  rld_map;
6329 
6330  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6331  {
6332  if (linux_read_memory (dyn->d_un.d_val,
6333  rld_map.buf, sizeof (rld_map.buf)) == 0)
6334  return rld_map.map;
6335  else
6336  break;
6337  }
6338 #endif /* DT_MIPS_RLD_MAP */
6339 
6340  if (dyn->d_tag == DT_DEBUG && map == -1)
6341  map = dyn->d_un.d_val;
6342 
6343  if (dyn->d_tag == DT_NULL)
6344  break;
6345  }
6346 
6347  dynamic_memaddr += dyn_size;
6348  }
6349 
6350  return map;
6351 }
6352 
6353 /* Read one pointer from MEMADDR in the inferior. */
6354 
6355 static int
6356 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6357 {
6358  int ret;
6359 
6360  /* Go through a union so this works on either big or little endian
6361  hosts, when the inferior's pointer size is smaller than the size
6362  of CORE_ADDR. It is assumed the inferior's endianness is the
6363  same of the superior's. */
6364  union
6365  {
6366  CORE_ADDR core_addr;
6367  unsigned int ui;
6368  unsigned char uc;
6369  } addr;
6370 
6371  ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6372  if (ret == 0)
6373  {
6374  if (ptr_size == sizeof (CORE_ADDR))
6375  *ptr = addr.core_addr;
6376  else if (ptr_size == sizeof (unsigned int))
6377  *ptr = addr.ui;
6378  else
6379  gdb_assert_not_reached ("unhandled pointer size");
6380  }
6381  return ret;
6382 }
6383 
6385  {
6386  /* Offset and size of r_debug.r_version. */
6388 
6389  /* Offset and size of r_debug.r_map. */
6391 
6392  /* Offset to l_addr field in struct link_map. */
6394 
6395  /* Offset to l_name field in struct link_map. */
6397 
6398  /* Offset to l_ld field in struct link_map. */
6400 
6401  /* Offset to l_next field in struct link_map. */
6403 
6404  /* Offset to l_prev field in struct link_map. */
6406  };
6407 
6408 /* Construct qXfer:libraries-svr4:read reply. */
6409 
6410 static int
6411 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6412  unsigned const char *writebuf,
6413  CORE_ADDR offset, int len)
6414 {
6415  char *document;
6416  unsigned document_len;
6417  struct process_info_private *const priv = current_process ()->priv;
6418  char filename[PATH_MAX];
6419  int pid, is_elf64;
6420 
6421  static const struct link_map_offsets lmo_32bit_offsets =
6422  {
6423  0, /* r_version offset. */
6424  4, /* r_debug.r_map offset. */
6425  0, /* l_addr offset in link_map. */
6426  4, /* l_name offset in link_map. */
6427  8, /* l_ld offset in link_map. */
6428  12, /* l_next offset in link_map. */
6429  16 /* l_prev offset in link_map. */
6430  };
6431 
6432  static const struct link_map_offsets lmo_64bit_offsets =
6433  {
6434  0, /* r_version offset. */
6435  8, /* r_debug.r_map offset. */
6436  0, /* l_addr offset in link_map. */
6437  8, /* l_name offset in link_map. */
6438  16, /* l_ld offset in link_map. */
6439  24, /* l_next offset in link_map. */
6440  32 /* l_prev offset in link_map. */
6441  };
6442  const struct link_map_offsets *lmo;
6443  unsigned int machine;
6444  int ptr_size;
6445  CORE_ADDR lm_addr = 0, lm_prev = 0;
6446  int allocated = 1024;
6447  char *p;
6448  CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6449  int header_done = 0;
6450 
6451  if (writebuf != NULL)
6452  return -2;
6453  if (readbuf == NULL)
6454  return -1;
6455 
6456  pid = lwpid_of (current_thread);
6457  xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6458  is_elf64 = elf_64_file_p (filename, &machine);
6459  lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6460  ptr_size = is_elf64 ? 8 : 4;
6461 
6462  while (annex[0] != '\0')
6463  {
6464  const char *sep;
6465  CORE_ADDR *addrp;
6466  int len;
6467 
6468  sep = strchr (annex, '=');
6469  if (sep == NULL)
6470  break;
6471 
6472  len = sep - annex;
6473  if (len == 5 && startswith (annex, "start"))
6474  addrp = &lm_addr;
6475  else if (len == 4 && startswith (annex, "prev"))
6476  addrp = &lm_prev;
6477  else
6478  {
6479  annex = strchr (sep, ';');
6480  if (annex == NULL)
6481  break;
6482  annex++;
6483  continue;
6484  }
6485 
6486  annex = decode_address_to_semicolon (addrp, sep + 1);
6487  }
6488 
6489  if (lm_addr == 0)
6490  {
6491  int r_version = 0;
6492 
6493  if (priv->r_debug == 0)
6494  priv->r_debug = get_r_debug (pid, is_elf64);
6495 
6496  /* We failed to find DT_DEBUG. Such situation will not change
6497  for this inferior - do not retry it. Report it to GDB as
6498  E01, see for the reasons at the GDB solib-svr4.c side. */
6499  if (priv->r_debug == (CORE_ADDR) -1)
6500  return -1;
6501 
6502  if (priv->r_debug != 0)
6503  {
6504  if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6505  (unsigned char *) &r_version,
6506  sizeof (r_version)) != 0
6507  || r_version != 1)
6508  {
6509  warning ("unexpected r_debug version %d", r_version);
6510  }
6511  else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6512  &lm_addr, ptr_size) != 0)
6513  {
6514  warning ("unable to read r_map from 0x%lx",
6515  (long) priv->r_debug + lmo->r_map_offset);
6516  }
6517  }
6518  }
6519 
6520  document = xmalloc (allocated);
6521  strcpy (document, "<library-list-svr4 version=\"1.0\"");
6522  p = document + strlen (document);
6523 
6524  while (lm_addr
6525  && read_one_ptr (lm_addr + lmo->l_name_offset,
6526  &l_name, ptr_size) == 0
6527  && read_one_ptr (lm_addr + lmo->l_addr_offset,
6528  &l_addr, ptr_size) == 0
6529  && read_one_ptr (lm_addr + lmo->l_ld_offset,
6530  &l_ld, ptr_size) == 0
6531  && read_one_ptr (lm_addr + lmo->l_prev_offset,
6532  &l_prev, ptr_size) == 0
6533  && read_one_ptr (lm_addr + lmo->l_next_offset,
6534  &l_next, ptr_size) == 0)
6535  {
6536  unsigned char libname[PATH_MAX];
6537 
6538  if (lm_prev != l_prev)
6539  {
6540  warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6541  (long) lm_prev, (long) l_prev);
6542  break;
6543  }
6544 
6545  /* Ignore the first entry even if it has valid name as the first entry
6546  corresponds to the main executable. The first entry should not be
6547  skipped if the dynamic loader was loaded late by a static executable
6548  (see solib-svr4.c parameter ignore_first). But in such case the main
6549  executable does not have PT_DYNAMIC present and this function already
6550  exited above due to failed get_r_debug. */
6551  if (lm_prev == 0)
6552  {
6553  sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6554  p = p + strlen (p);
6555  }
6556  else
6557  {
6558  /* Not checking for error because reading may stop before
6559  we've got PATH_MAX worth of characters. */
6560  libname[0] = '\0';
6561  linux_read_memory (l_name, libname, sizeof (libname) - 1);
6562  libname[sizeof (libname) - 1] = '\0';
6563  if (libname[0] != '\0')
6564  {
6565  /* 6x the size for xml_escape_text below. */
6566  size_t len = 6 * strlen ((char *) libname);
6567  char *name;
6568 
6569  if (!header_done)
6570  {
6571  /* Terminate `<library-list-svr4'. */
6572  *p++ = '>';
6573  header_done = 1;
6574  }
6575 
6576  while (allocated < p - document + len + 200)
6577  {
6578  /* Expand to guarantee sufficient storage. */
6579  uintptr_t document_len = p - document;
6580 
6581  document = xrealloc (document, 2 * allocated);
6582  allocated *= 2;
6583  p = document + document_len;
6584  }
6585 
6586  name = xml_escape_text ((char *) libname);
6587  p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6588  "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6589  name, (unsigned long) lm_addr,
6590  (unsigned long) l_addr, (unsigned long) l_ld);
6591  free (name);
6592  }
6593  }
6594 
6595  lm_prev = lm_addr;
6596  lm_addr = l_next;
6597  }
6598 
6599  if (!header_done)
6600  {
6601  /* Empty list; terminate `<library-list-svr4'. */
6602  strcpy (p, "/>");
6603  }
6604  else
6605  strcpy (p, "</library-list-svr4>");
6606 
6607  document_len = strlen (document);
6608  if (offset < document_len)
6609  document_len -= offset;
6610  else
6611  document_len = 0;
6612  if (len > document_len)
6613  len = document_len;
6614 
6615  memcpy (readbuf, document + offset, len);
6616  xfree (document);
6617 
6618  return len;
6619 }
6620 
6621 #ifdef HAVE_LINUX_BTRACE
6622 
6623 /* See to_enable_btrace target method. */
6624 
6625 static struct btrace_target_info *
6626 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6627 {
6628  struct btrace_target_info *tinfo;
6629 
6630  tinfo = linux_enable_btrace (ptid, conf);
6631 
6632  if (tinfo != NULL && tinfo->ptr_bits == 0)
6633  {
6634  struct thread_info *thread = find_thread_ptid (ptid);
6635  struct regcache *regcache = get_thread_regcache (thread, 0);
6636 
6637  tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6638  }
6639 
6640  return tinfo;
6641 }
6642 
6643 /* See to_disable_btrace target method. */
6644 
6645 static int
6646 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6647 {
6648  enum btrace_error err;
6649 
6650  err = linux_disable_btrace (tinfo);
6651  return (err == BTRACE_ERR_NONE ? 0 : -1);
6652 }
6653 
6654 /* Encode an Intel(R) Processor Trace configuration. */
6655 
6656 static void
6657 linux_low_encode_pt_config (struct buffer *buffer,
6658  const struct btrace_data_pt_config *config)
6659 {
6660  buffer_grow_str (buffer, "<pt-config>\n");
6661 
6662  switch (config->cpu.vendor)
6663  {
6664  case CV_INTEL:
6665  buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6666  "model=\"%u\" stepping=\"%u\"/>\n",
6667  config->cpu.family, config->cpu.model,
6668  config->cpu.stepping);
6669  break;
6670 
6671  default:
6672  break;
6673  }
6674 
6675  buffer_grow_str (buffer, "</pt-config>\n");
6676 }
6677 
6678 /* Encode a raw buffer. */
6679 
6680 static void
6681 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6682  unsigned int size)
6683 {
6684  if (size == 0)
6685  return;
6686 
6687  /* We use hex encoding - see common/rsp-low.h. */
6688  buffer_grow_str (buffer, "<raw>\n");
6689 
6690  while (size-- > 0)
6691  {
6692  char elem[2];
6693 
6694  elem[0] = tohex ((*data >> 4) & 0xf);
6695  elem[1] = tohex (*data++ & 0xf);
6696 
6697  buffer_grow (buffer, elem, 2);
6698  }
6699 
6700  buffer_grow_str (buffer, "</raw>\n");
6701 }
6702 
6703 /* See to_read_btrace target method. */
6704 
6705 static int
6706 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6707  int type)
6708 {
6709  struct btrace_data btrace;
6710  struct btrace_block *block;
6711  enum btrace_error err;
6712  int i;
6713 
6714  btrace_data_init (&btrace);
6715 
6716  err = linux_read_btrace (&btrace, tinfo, type);
6717  if (err != BTRACE_ERR_NONE)
6718  {
6719  if (err == BTRACE_ERR_OVERFLOW)
6720  buffer_grow_str0 (buffer, "E.Overflow.");
6721  else
6722  buffer_grow_str0 (buffer, "E.Generic Error.");
6723 
6724  goto err;
6725  }
6726 
6727  switch (btrace.format)
6728  {
6729  case BTRACE_FORMAT_NONE:
6730  buffer_grow_str0 (buffer, "E.No Trace.");
6731  goto err;
6732 
6733  case BTRACE_FORMAT_BTS:
6734  buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6735  buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6736 
6737  for (i = 0;
6738  VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6739  i++)
6740  buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6741  paddress (block->begin), paddress (block->end));
6742 
6743  buffer_grow_str0 (buffer, "</btrace>\n");
6744  break;
6745 
6746  case BTRACE_FORMAT_PT:
6747  buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6748  buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6749  buffer_grow_str (buffer, "<pt>\n");
6750 
6751  linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6752 
6753  linux_low_encode_raw (buffer, btrace.variant.pt.data,
6754  btrace.variant.pt.size);
6755 
6756  buffer_grow_str (buffer, "</pt>\n");
6757  buffer_grow_str0 (buffer, "</btrace>\n");
6758  break;
6759 
6760  default:
6761  buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6762  goto err;
6763  }
6764 
6765  btrace_data_fini (&btrace);
6766  return 0;
6767 
6768 err:
6769  btrace_data_fini (&btrace);
6770  return -1;
6771 }
6772 
6773 /* See to_btrace_conf target method. */
6774 
6775 static int
6776 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6777  struct buffer *buffer)
6778 {
6779  const struct btrace_config *conf;
6780 
6781  buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6782  buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6783 
6784  conf = linux_btrace_conf (tinfo);
6785  if (conf != NULL)
6786  {
6787  switch (conf->format)
6788  {
6789  case BTRACE_FORMAT_NONE:
6790  break;
6791 
6792  case BTRACE_FORMAT_BTS:
6793  buffer_xml_printf (buffer, "<bts");
6794  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6795  buffer_xml_printf (buffer, " />\n");
6796  break;
6797 
6798  case BTRACE_FORMAT_PT:
6799  buffer_xml_printf (buffer, "<pt");
6800  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6801  buffer_xml_printf (buffer, "/>\n");
6802  break;
6803  }
6804  }
6805 
6806  buffer_grow_str0 (buffer, "</btrace-conf>\n");
6807  return 0;
6808 }
6809 #endif /* HAVE_LINUX_BTRACE */
6810 
6811 /* See nat/linux-nat.h. */
6812 
6813 ptid_t
6815 {
6816  return ptid_of (current_thread);
6817 }
6818 
6819 static struct target_ops linux_target_ops = {
6821  linux_attach,
6822  linux_kill,
6823  linux_detach,
6824  linux_mourn,
6825  linux_join,
6827  linux_resume,
6828  linux_wait,
6848 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6849  && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6850  && defined(PT_TEXT_END_ADDR)
6851  linux_read_offsets,
6852 #else
6853  NULL,
6854 #endif
6855 #ifdef USE_THREAD_DB
6857 #else
6858  NULL,
6859 #endif
6865  linux_async,
6871 #ifdef USE_THREAD_DB
6873 #else
6874  NULL,
6875 #endif
6880  linux_read_pc,
6883  NULL,
6893 #ifdef HAVE_LINUX_BTRACE
6895  linux_low_enable_btrace,
6896  linux_low_disable_btrace,
6897  linux_low_read_btrace,
6898  linux_low_btrace_conf,
6899 #else
6900  NULL,
6901  NULL,
6902  NULL,
6903  NULL,
6904  NULL,
6905 #endif
6911 };
6912 
6913 static void
6915 {
6916  /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6917  to find what the cancel signal actually is. */
6918 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6919  signal (__SIGRTMIN+1, SIG_IGN);
6920 #endif
6921 }
6922 
6923 #ifdef HAVE_LINUX_REGSETS
6924 void
6925 initialize_regsets_info (struct regsets_info *info)
6926 {
6927  for (info->num_regsets = 0;
6928  info->regsets[info->num_regsets].size >= 0;
6929  info->num_regsets++)
6930  ;
6931 }
6932 #endif
6933 
6934 void
6936 {
6937  struct sigaction sigchld_action;
6938  memset (&sigchld_action, 0, sizeof (sigchld_action));
6939  set_target_ops (&linux_target_ops);
6942  linux_init_signals ();
6944 
6945  sigchld_action.sa_handler = sigchld_handler;
6946  sigemptyset (&sigchld_action.sa_mask);
6947  sigchld_action.sa_flags = SA_RESTART;
6948  sigaction (SIGCHLD, &sigchld_action, NULL);
6949 
6951 
6953 }
int debug_threads
Definition: debug.c:24
const struct target_desc * tdesc
Definition: regcache.h:41
struct arch_lwp_info * arch_private
Definition: linux-low.h:352
static int linux_supports_stopped_by_sw_breakpoint(void)
Definition: linux-low.c:5481
static void proceed_all_lwps(void)
Definition: linux-low.c:4702
static struct process_info * linux_add_process(int pid, int attached)
Definition: linux-low.c:404
static void sigchld_handler(int signo)
Definition: linux-low.c:5666
#define regsets_fetch_inferior_registers(regsets_info, regcache)
Definition: linux-low.c:4954
struct btrace_config_bts bts
int( iterate_over_lwps_ftype)(struct lwp_info *lwp, void *arg)
Definition: linux-nat.h:41
ptid_t ptid_of_lwp(struct lwp_info *lwp)
Definition: linux-low.c:147
uint64_t a_type
Definition: linux-low.c:131
static int linux_async(int enable)
Definition: linux-low.c:5695
void initialize_low_arch(void)
static int finish_step_over(struct lwp_info *lwp)
Definition: linux-low.c:4334
struct thread_info * current_thread
Definition: inferiors.c:28
#define PTRACE_GETEVENTMSG
Definition: linux-ptrace.h:59
int handle_target_event(int err, gdb_client_data client_data)
Definition: server.c:4194
void collect_register(struct regcache *regcache, int n, void *buf)
Definition: regcache.c:414
static int iterate_over_lwps_filter(struct inferior_list_entry *entry, void *args_p)
Definition: linux-low.c:1580
#define ptid_of(inf)
Definition: inferiors.h:75
struct process_info * add_process(int pid, int attached)
Definition: inferiors.c:274
struct thread_info * find_thread_ptid(ptid_t ptid)
Definition: inferiors.c:141
#define PTRACE_TYPE_ARG4
Definition: linux-ptrace.h:38
int gdb_condition_true_at_breakpoint(CORE_ADDR where)
Definition: mem-break.c:1254
static int can_hardware_single_step(void)
Definition: linux-low.c:282
struct btrace_cpu cpu
#define WNOHANG
Definition: gdb_wait.h:102
static int linux_read_auxv(CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
Definition: linux-low.c:5406
static void linux_resume_one_lwp(struct lwp_info *lwp, int step, int signal, siginfo_t *info)
Definition: linux-low.c:4010
int ptid_is_pid(ptid_t ptid)
Definition: ptid.c:86
unsigned short family
Definition: btrace-common.h:86
struct usrregs_info * usrregs
Definition: linux-low.h:99
static void select_event_lwp(struct lwp_info **orig_lp)
Definition: linux-low.c:2579
#define linux_read_loadmap
Definition: linux-low.c:6018
static void kill_wait_lwp(struct lwp_info *lwp)
Definition: linux-low.c:1089
int ptid_equal(ptid_t ptid1, ptid_t ptid2)
Definition: ptid.c:76
static int elf_64_file_p(const char *file, unsigned int *machine)
Definition: linux-low.c:356
bfd_vma CORE_ADDR
Definition: common-types.h:41
void(* new_fork)(struct process_info *parent, struct process_info *child)
Definition: linux-low.h:191
void set_desired_thread(int use_general)
Definition: target.c:27
static int linux_insert_point(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
Definition: linux-low.c:5441
static int linux_attach(unsigned long pid)
Definition: linux-low.c:975
CORE_ADDR begin
Definition: btrace-common.h:42
int(* cannot_store_register)(int)
Definition: linux-low.h:138
int(* supports_z_point_type)(char z_type)
Definition: linux-low.h:158
void thread_db_mourn(struct process_info *)
Definition: thread-db.c:972
void clone_all_breakpoints(struct breakpoint **new_list, struct raw_breakpoint **new_raw_list, const struct breakpoint *src_list)
Definition: mem-break.c:2003
static void enqueue_one_deferred_signal(struct lwp_info *lwp, int *wstat)
Definition: linux-low.c:1934
void warning(const char *fmt,...)
Definition: errors.c:26
int using_threads
Definition: linux-low.c:250
int linux_wstatus_maybe_breakpoint(int wstat)
Definition: linux-ptrace.c:603
static int not_stopped_callback(struct inferior_list_entry *entry, void *arg)
Definition: linux-low.c:1683
static void move_out_of_jump_pad_callback(struct inferior_list_entry *entry)
Definition: linux-low.c:3646
static void linux_resume_one_lwp_throw(struct lwp_info *lwp, int step, int signal, siginfo_t *info)
Definition: linux-low.c:3770
void btrace_data_fini(struct btrace_data *data)
Definition: btrace-common.c:55
union Elf64_auxv_t::@3 a_un
unsigned long signal_pid
Definition: server.c:81
struct lwp_info * find_lwp_pid(ptid_t ptid)
Definition: linux-low.c:1531
int pid
Definition: linux-low.c:1014
struct inferior_list_entry entry
Definition: inferiors.h:47
static int linux_supports_disable_randomization(void)
Definition: linux-low.c:5826
int linux_is_extended_waitstatus(int wstat)
Definition: linux-ptrace.c:595
static int select_event_lwp_callback(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:2559
struct pending_signals * prev
Definition: linux-low.c:320
static int supports_breakpoints(void)
Definition: linux-low.c:291
static void linux_pause_all(int freeze)
Definition: linux-low.c:6063
static int lwp_in_step_range(struct lwp_info *lwp)
Definition: linux-low.c:309
unsigned int size
static int linux_stopped_by_sw_breakpoint(void)
Definition: linux-low.c:5470
CORE_ADDR end
Definition: btrace-common.h:45
static CORE_ADDR get_dynamic(const int pid, const int is_elf64)
Definition: linux-low.c:6202
static void linux_process_qsupported(const char *query)
Definition: linux-low.c:6022
CORE_ADDR adjusted_insn_addr_end
Definition: tracepoint.h:115
int ptid_match(ptid_t ptid, ptid_t filter)
Definition: ptid.c:120
void internal_error(const char *file, int line, const char *fmt,...)
Definition: errors.c:50
struct process_info * get_thread_process(struct thread_info *thread)
Definition: inferiors.c:349
CORE_ADDR step_range_end
Definition: target.h:61
struct raw_breakpoint * raw_breakpoints
Definition: inferiors.h:64
static int unsuspend_one_lwp(struct inferior_list_entry *entry, void *except)
Definition: linux-low.c:2642
int decr_pc_after_break
Definition: linux-low.h:153
static void linux_join(int pid)
Definition: linux-low.c:1390
struct process_info * find_process_pid(int pid)
Definition: inferiors.c:302
static int linux_supports_fork_events(void)
Definition: linux-low.c:5770
enum target_stop_reason lwp_stop_reason(struct lwp_info *lwp)
Definition: linux-low.c:180
static ptid_t ignore_event(struct target_waitstatus *ourstatus)
Definition: linux-low.c:2779
static int linux_supports_range_stepping(void)
Definition: linux-low.c:5842
#define __WCLONE
Definition: gdb_wait.h:110
#define usr_store_inferior_registers(regs_info, regcache, regno, all)
Definition: linux-low.c:5132
#define GDB_ARCH_TRAP_BRKPT
Definition: linux-ptrace.h:143
static ptid_t linux_wait(ptid_t ptid, struct target_waitstatus *ourstatus, int target_options)
Definition: linux-low.c:3411
static void linux_done_accessing_memory(void)
Definition: linux-low.c:6088
char * paddress(CORE_ADDR addr)
Definition: utils.c:124
static int linux_fast_tracepoint_collecting(struct lwp_info *lwp, struct fast_tpoint_collect_status *status)
Definition: linux-low.c:1784
int collecting_fast_tracepoint
Definition: linux-low.h:329
static int elf_64_header_p(const Elf64_Ehdr *header, unsigned int *machine)
Definition: linux-low.c:336
void for_each_inferior(struct inferior_list *list, void(*action)(struct inferior_list_entry *))
Definition: inferiors.c:47
void linux_check_ptrace_features(void)
Definition: linux-ptrace.c:335
ptid_t id
Definition: inferiors.h:31
const char * name
Definition: tracepoint.c:178
const struct regs_info *(* regs_info)(void)
Definition: linux-low.h:132
int(* stopped_by_watchpoint)(void)
Definition: linux-low.h:164
static void linux_stabilize_threads(void)
Definition: linux-low.c:2703
void set_breakpoint_data(const unsigned char *bp_data, int bp_len)
Definition: mem-break.c:1592
enum btrace_error linux_read_btrace(struct btrace_data *btrace, struct btrace_target_info *tinfo, enum btrace_read_type type)
regnum
int tohex(int nib)
Definition: rsp-low.c:41
struct wstep_state * while_stepping
Definition: gdbthread.h:67
void(* mourn)(struct process_info *proc)
Definition: target.h:98
struct linux_target_ops the_low_target
#define _(String)
Definition: gdb_locale.h:40
uint64_t a_val
Definition: linux-low.c:134
static int stuck_in_jump_pad_callback(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:3623
int thread_db_init(int use_events)
Definition: thread-db.c:836
int non_stop
Definition: server.c:62
int attached
Definition: inferiors.h:51
static int second_thread_of_pid_p(struct inferior_list_entry *entry, void *args)
Definition: linux-low.c:1019
#define get_thread_lwp(thr)
Definition: linux-low.h:235
struct target_ops * the_target
Definition: target.c:24
#define END_CATCH
char * xml_escape_text(const char *text)
Definition: xml-utils.c:27
const struct target_desc * tdesc
Definition: inferiors.h:69
static int maybe_move_out_of_jump_pad(struct lwp_info *lwp, int *wstat)
Definition: linux-low.c:1809
CORE_ADDR step_range_start
Definition: target.h:60
int(* breakpoint_at)(CORE_ADDR pc)
Definition: linux-low.h:154
static int resume_status_pending_p(struct inferior_list_entry *entry, void *flag_p)
Definition: linux-low.c:4111
#define debug_exit()
Definition: debug.h:50
static void wait_for_sigstop(void)
Definition: linux-low.c:3572
static void unstop_all_lwps(int unsuspend, struct lwp_info *except)
Definition: linux-low.c:4740
void perror(const char *)
Definition: wincecompat.c:24
const unsigned char * breakpoint
Definition: linux-low.h:149
int(* fetch_register)(struct regcache *regcache, int regno)
Definition: linux-low.h:145
int fast_tracepoint_jump_here(CORE_ADDR where)
Definition: mem-break.c:492
static CORE_ADDR get_pc(struct lwp_info *lwp)
Definition: linux-low.c:582
static int unsuspend_and_proceed_one_lwp(struct inferior_list_entry *entry, void *except)
Definition: linux-low.c:4684
int need_step_over
Definition: linux-low.h:342
union Elf32_auxv_t::@2 a_un
Definition: ptid.h:35
enum resume_kind last_resume_kind
Definition: gdbthread.h:36
struct inferior_list_entry entry
Definition: gdbthread.h:30
iterate_over_lwps_ftype * callback
Definition: linux-low.c:1567
#define PTRACE_EVENT_VFORK_DONE
Definition: linux-ptrace.h:75
static int get_detach_signal(struct thread_info *thread)
Definition: linux-low.c:1201
ptid_t ptid_build(int pid, long lwp, long tid)
Definition: ptid.c:31
struct target_waitstatus waitstatus
Definition: linux-low.h:276
static int linux_supports_tracepoints(void)
Definition: linux-low.c:6029
int tracepoint_was_hit(struct thread_info *tinfo, CORE_ADDR stop_pc)
Definition: tracepoint.c:4556
int must_set_ptrace_flags
Definition: linux-low.h:314
struct arch_process_info * arch_private
Definition: linux-low.h:110
int lwp_is_stopped(struct lwp_info *lwp)
Definition: linux-low.c:172
enum btrace_cpu_vendor vendor
Definition: btrace-common.h:83
#define TRY
static int check_stopped_by_breakpoint(struct lwp_info *lwp)
Definition: linux-low.c:629
static struct lwp_info * linux_low_filter_event(int lwpid, int wstat)
Definition: linux-low.c:2101
static int proceed_one_lwp(struct inferior_list_entry *entry, void *except)
Definition: linux-low.c:4595
static void async_file_mark(void)
Definition: linux-low.c:3396
#define PTRACE_EVENT_FORK
Definition: linux-ptrace.h:71
static int linux_supports_conditional_breakpoints(void)
Definition: linux-low.c:5510
void(* set_pc)(struct regcache *regcache, CORE_ADDR newpc)
Definition: linux-low.h:148
int hardware_breakpoint_inserted_here(CORE_ADDR addr)
Definition: mem-break.c:1649
static int linux_stopped_by_hw_breakpoint(void)
Definition: linux-low.c:5490
#define WSTOPSIG
Definition: gdb_wait.h:75
static struct lwp_info * add_lwp(ptid_t ptid)
Definition: linux-low.c:759
static int linux_qxfer_spu(const char *annex, unsigned char *readbuf, unsigned const char *writebuf, CORE_ADDR offset, int len)
Definition: linux-low.c:5902
static void linux_mourn(struct process_info *process)
Definition: linux-low.c:1370
#define VEC_iterate(T, V, I, P)
Definition: vec.h:165
static int linux_remove_point(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
Definition: linux-low.c:5454
int linux_supports_btrace(struct target_ops *ops, enum btrace_format format)
int handle_tracepoint_bkpts(struct thread_info *tinfo, CORE_ADDR stop_pc)
Definition: tracepoint.c:4474
void set_reinsert_breakpoint(CORE_ADDR stop_at)
Definition: mem-break.c:1393
unsigned char * regset_bitmap
Definition: linux-low.h:94
static void linux_kill_one_lwp(struct lwp_info *lwp)
Definition: linux-low.c:1044
static int linux_thread_stopped(struct thread_info *thread)
Definition: linux-low.c:6055
CORE_ADDR step_range_end
Definition: linux-low.h:310
int remote_connection_is_stdio(void)
Definition: remote-utils.c:134
void linux_ptrace_init_warnings(void)
Definition: linux-ptrace.c:573
static int linux_supports_vfork_events(void)
Definition: linux-low.c:5778
#define CATCH(EXCEPTION, MASK)
void uninsert_breakpoints_at(CORE_ADDR pc)
Definition: mem-break.c:1454
void delete_file_handler(gdb_fildes_t fd)
Definition: event-loop.c:335
raw_bkpt_type
Definition: mem-break.h:40
int fast_tracepoint_collecting(CORE_ADDR thread_area, CORE_ADDR stop_pc, struct fast_tpoint_collect_status *status)
Definition: tracepoint.c:5569
static int last_thread_of_process_p(int pid)
Definition: linux-low.c:1033
static int linux_supports_multi_process(void)
Definition: linux-low.c:5762
char * linux_ptrace_attach_fail_reason_string(ptid_t ptid, int err)
Definition: linux-ptrace.c:55
#define WTERMSIG(w)
Definition: gdb_wait.h:71
void release_while_stepping_state_list(struct thread_info *tinfo)
Definition: tracepoint.c:4351
int linux_supports_traceclone(void)
Definition: linux-ptrace.c:546
void uninsert_fast_tracepoint_jumps_at(CORE_ADDR pc)
Definition: mem-break.c:641
CORE_ADDR bp_reinsert
Definition: linux-low.h:300
static void add_to_pid_list(struct simple_pid_list **listp, int pid, int status)
Definition: linux-low.c:206
int(* get_thread_area)(int lwpid, CORE_ADDR *addrp)
Definition: linux-low.h:204
struct thread_db * thread_db
Definition: linux-low.h:114
static int start_step_over(struct lwp_info *lwp)
Definition: linux-low.c:4273
static int linux_create_inferior(char *program, char **allargs)
Definition: linux-low.c:779
int disable_randomization
Definition: server.c:68
void buffer_xml_printf(struct buffer *buffer, const char *format,...)
Definition: buffer.c:76
#define ALL_PROCESSES(cur, tmp)
Definition: inferiors.h:121
void copy_target_description(struct target_desc *dest, const struct target_desc *src)
Definition: tdesc.c:46
#define __SIGRTMIN
Definition: linux-low.c:80
siginfo_t info
Definition: linux-low.c:319
int(* cannot_fetch_register)(int)
Definition: linux-low.h:133
static int linux_install_fast_tracepoint_jump_pad(CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, CORE_ADDR *trampoline, ULONGEST *trampoline_size, unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, char *err)
Definition: linux-low.c:6097
#define W_STOPCODE(sig)
Definition: linux-low.c:74
enum resume_kind kind
Definition: target.h:45
int program_signals[GDB_SIGNAL_LAST]
Definition: server.c:73
#define PTRACE_O_TRACEVFORKDONE
Definition: linux-ptrace.h:67
const char * decode_address_to_semicolon(CORE_ADDR *addrp, const char *start)
Definition: remote-utils.c:438
struct simple_pid_list * next
Definition: linux-low.c:198
#define WIFEXITED(w)
Definition: gdb_wait.h:44
int report_fork_events
Definition: server.c:60
#define PTRACE_O_TRACEVFORK
Definition: linux-ptrace.h:64
const char version[]
Definition: version.c:2
int offset
Definition: tracepoint.c:179
#define regsets_store_inferior_registers(regsets_info, regcache)
Definition: linux-low.c:4955
void linux_store_registers(struct regcache *regcache, int regno)
Definition: linux-low.c:5171
int linux_ptrace_get_extended_event(int wstat)
Definition: linux-ptrace.c:587
static void linux_init_signals()
Definition: linux-low.c:6914
static int kill_one_lwp_callback(struct inferior_list_entry *entry, void *args)
Definition: linux-low.c:1133
#define PTRACE_EVENT_CLONE
Definition: linux-ptrace.h:73
#define gdb_assert_not_reached(message)
Definition: gdb_assert.h:56
static void unsuspend_all_lwps(struct lwp_info *except)
Definition: linux-low.c:2659
static int linux_read_memory(CORE_ADDR memaddr, unsigned char *myaddr, int len)
Definition: linux-low.c:5200
struct thread_resume * resume
Definition: linux-low.c:4027
static int linux_supports_stopped_by_hw_breakpoint(void)
Definition: linux-low.c:5501
const struct btrace_config * linux_btrace_conf(const struct btrace_target_info *tinfo)
ptid_t pid_to_ptid(int pid)
Definition: ptid.c:44
static void async_file_flush(void)
Definition: linux-low.c:3384
static int linux_detach_one_lwp(struct inferior_list_entry *entry, void *args)
Definition: linux-low.c:1277
static int attach_proc_task_lwp_callback(ptid_t ptid)
Definition: linux-low.c:932
static int check_stopped_by_watchpoint(struct lwp_info *child)
Definition: linux-low.c:2051
int stop_expected
Definition: linux-low.h:259
int agent_loaded_p(void)
Definition: agent.c:78
int gdb_signal_to_host(enum gdb_signal)
Definition: signals.c:631
#define get_lwp_thread(lwp)
Definition: linux-low.h:236
int register_size(const struct target_desc *tdesc, int n)
Definition: regcache.c:314
int breakpoint_here(CORE_ADDR addr)
Definition: mem-break.c:1599
CORE_ADDR step_range_start
Definition: linux-low.h:309
enum gdb_signal gdb_signal_from_host(int)
Definition: signals.c:116
int linux_proc_pid_is_stopped(pid_t pid)
Definition: linux-procfs.c:149
char * linux_proc_pid_to_exec_file(int pid)
Definition: linux-procfs.c:261
unsigned char model
Definition: btrace-common.h:89
int last_status
Definition: linux-low.h:270
static int delete_lwp_callback(struct inferior_list_entry *entry, void *proc)
Definition: linux-low.c:1357
int thread_db_get_tls_address(struct thread_info *thread, CORE_ADDR offset, CORE_ADDR load_module, CORE_ADDR *address)
Definition: thread-db.c:504
static int linux_resume_one_thread(struct inferior_list_entry *entry, void *arg)
Definition: linux-low.c:4410
static int handle_tracepoints(struct lwp_info *lwp)
Definition: linux-low.c:1738
ptid_t step_over_bkpt
Definition: linux-low.c:276
void(* collect_ptrace_register)(struct regcache *regcache, int regno, char *buf)
Definition: linux-low.h:169
#define gdb_assert(expr)
Definition: gdb_assert.h:33
int status_pending
Definition: linux-low.h:286
int linux_common_core_of_thread(ptid_t ptid)
Definition: linux-osdata.c:61
static int linux_qxfer_osdata(const char *annex, unsigned char *readbuf, unsigned const char *writebuf, CORE_ADDR offset, int len)
Definition: linux-low.c:5583
static int startswith(const char *string, const char *pattern)
Definition: common-utils.h:75
struct cleanup * maybe_disable_address_space_randomization(int disable_randomization)
static int linux_qxfer_libraries_svr4(const char *annex, unsigned char *readbuf, unsigned const char *writebuf, CORE_ADDR offset, int len)
Definition: linux-low.c:6411
static int linux_wait_for_event(ptid_t ptid, int *wstat, int options)
Definition: linux-low.c:2516
CORE_ADDR(* stopped_data_address)(void)
Definition: linux-low.h:165
int stopped
Definition: linux-low.h:267
void(* process_qsupported)(const char *)
Definition: linux-low.h:197
#define PTRACE_TYPE_ARG3
Definition: spu-low.c:38
void check_breakpoints(CORE_ADDR stop_pc)
Definition: mem-break.c:1553
static int linux_register_in_regsets(const struct regs_info *regs_info, int regno)
Definition: linux-low.c:4963
void remove_process(struct process_info *process)
Definition: inferiors.c:293
int gdb_no_commands_at_breakpoint(CORE_ADDR where)
Definition: mem-break.c:1332
btrace_error
static int num_lwps(int pid)
Definition: linux-low.c:1545
void set_target_ops(struct target_ops *target)
Definition: target.c:184
static int linux_low_ptrace_options(int attached)
Definition: linux-low.c:2080
#define PTRACE_O_TRACEFORK
Definition: linux-ptrace.h:63
struct process_info * current_process(void)
Definition: inferiors.c:356
static void complete_ongoing_step_over(void)
Definition: linux-low.c:4370
uint32_t a_val
Definition: linux-low.c:119
static int thread_still_has_status_pending_p(struct thread_info *thread)
Definition: linux-low.c:1421
#define WEXITSTATUS(w)
Definition: gdb_wait.h:67
void linux_fetch_registers(struct regcache *regcache, int regno)
Definition: linux-low.c:5138
static int same_lwp(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:1514
static void enqueue_pending_signal(struct lwp_info *lwp, int signal, siginfo_t *info)
Definition: linux-low.c:3752
static int lwp_running(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:3697
char * status_to_str(int status)
Definition: linux-waitpid.c:55
struct process_info_private * priv
Definition: inferiors.h:72
#define buffer_grow_str(BUFFER, STRING)
Definition: buffer.h:54
struct inferior_list all_threads
Definition: inferiors.c:26
int report_vfork_events
Definition: server.c:61
static int stabilizing_threads
Definition: linux-low.c:254
static int reset_lwp_ptrace_options_callback(struct inferior_list_entry *entry, void *args)
Definition: linux-low.c:5787
void thread_db_detach(struct process_info *)
Definition: thread-db.c:958
static int supports_fast_tracepoints(void)
Definition: linux-low.c:301
void close_most_fds(void)
Definition: filestuff.c:224
#define __WALL
Definition: linux-ptrace.h:96
void reinsert_fast_tracepoint_jumps_at(CORE_ADDR where)
Definition: mem-break.c:689
static int read_one_ptr(CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
Definition: linux-low.c:6356
int linux_supports_tracefork(void)
Definition: linux-ptrace.c:535
uint32_t a_type
Definition: linux-low.c:116
struct arch_process_info *(* new_process)(void)
Definition: linux-low.h:183
struct pending_signals * pending_signals
Definition: linux-low.h:318
#define errno
Definition: wincecompat.h:24
static int linux_supports_non_stop(void)
Definition: linux-low.c:5689
int(* insert_point)(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
Definition: linux-low.h:159
struct simple_pid_list * stopped_pids
Definition: linux-low.c:200
void run_breakpoint_commands(CORE_ADDR where)
Definition: mem-break.c:1374
int linux_pid_exe_is_elf_64_file(int pid, unsigned int *machine)
Definition: linux-low.c:379
int linux_mntns_unlink(pid_t pid, const char *filename)
CORE_ADDR stopped_data_address
Definition: linux-low.h:296
LONGEST linux_common_xfer_osdata(const char *annex, gdb_byte *readbuf, ULONGEST offset, ULONGEST len)
static ptid_t linux_wait_1(ptid_t ptid, struct target_waitstatus *ourstatus, int target_options)
Definition: linux-low.c:2793
static int lwp_is_marked_dead(struct lwp_info *lwp)
Definition: linux-low.c:3562
#define stabilize_threads()
Definition: target.h:531
int(* remove_point)(enum raw_bkpt_type type, CORE_ADDR addr, int size, struct raw_breakpoint *bp)
Definition: linux-low.h:161
int ptid_get_pid(ptid_t ptid)
Definition: ptid.c:52
#define PTRACE_GETSIGINFO
Definition: linux-ptrace.h:42
#define target_is_async_p()
Definition: linux-low.c:328
struct btrace_config_pt pt
#define USE_SIGTRAP_SIGINFO
Definition: linux-ptrace.h:117
struct emit_ops *(* emit_ops)(void)
Definition: linux-low.h:223
Definition: ax.h:91
static int check_ptrace_stopped_lwp_gone(struct lwp_info *lp)
Definition: linux-low.c:3980
PTR xrealloc(PTR ptr, size_t size)
Definition: common-utils.c:51
#define WIFSTOPPED(w)
Definition: gdb_wait.h:62
int linux_attach_lwp(ptid_t ptid)
Definition: linux-low.c:846
void add_file_handler(gdb_fildes_t fd, handler_func *proc, gdb_client_data client_data)
Definition: event-loop.c:325
void(* prepare_to_resume)(struct lwp_info *)
Definition: linux-low.h:194
void throw_exception(struct gdb_exception exception)
static int linux_get_min_fast_tracepoint_insn_len(void)
Definition: linux-low.c:6128
int(* supports_tracepoints)(void)
Definition: linux-low.h:200
#define buffer_grow_str0(BUFFER, STRING)
Definition: buffer.h:56
bfd_byte gdb_byte
Definition: common-types.h:38
static int linux_thread_alive(ptid_t ptid)
Definition: linux-low.c:1403
static void stop_all_lwps(int suspend, struct lwp_info *except)
Definition: linux-low.c:3714
void xfree(void *ptr)
Definition: common-utils.c:97
enum stopping_threads_kind stopping_threads
Definition: linux-low.c:247
int linux_proc_pid_is_gone(pid_t pid)
Definition: linux-procfs.c:108
static int need_step_over_p(struct inferior_list_entry *entry, void *dummy)
Definition: linux-low.c:4133
static void linux_handle_new_gdb_connection(void)
Definition: linux-low.c:5817
unsigned int size
ptid_t null_ptid
Definition: ptid.c:25
int(* siginfo_fixup)(siginfo_t *native, void *inf, int direction)
Definition: linux-low.h:178
void force_unlock_trace_buffer(void)
Definition: tracepoint.c:5553
int(* install_fast_tracepoint_jump_pad)(CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector, CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry, CORE_ADDR *trampoline, ULONGEST *trampoline_size, unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size, CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end, char *err)
Definition: linux-low.h:208
#define lwpid_of(inf)
Definition: inferiors.h:77
ptid_t current_lwp_ptid(void)
Definition: linux-low.c:6814
ptid_t thread
Definition: target.h:42
int stepping
Definition: linux-low.h:304
static int status_pending_p_callback(struct inferior_list_entry *entry, void *arg)
Definition: linux-low.c:1492
static int spu_enumerate_spu_ids(long pid, unsigned char *buf, CORE_ADDR offset, int len)
Definition: linux-low.c:5852
static void lwp_suspended_inc(struct lwp_info *lwp)
Definition: linux-low.c:1702
int count
Definition: linux-low.c:1015
void reinsert_breakpoints_at(CORE_ADDR pc)
Definition: mem-break.c:1512
int tracepoint_finished_step(struct thread_info *tinfo, CORE_ADDR stop_pc)
Definition: tracepoint.c:4369
int xsnprintf(char *str, size_t size, const char *format,...)
Definition: common-utils.c:134
void * alloca(size_t)
static int linux_stopped_by_watchpoint(void)
Definition: linux-low.c:5520
enum btrace_error linux_disable_btrace(struct btrace_target_info *tinfo)
#define PTRACE_SETSIGINFO
Definition: linux-ptrace.h:43
void linux_enable_event_reporting(pid_t pid, int options)
Definition: linux-ptrace.c:489
static void lwp_suspended_decr(struct lwp_info *lwp)
Definition: linux-low.c:1718
#define PTRACE_EVENT_VFORK
Definition: linux-ptrace.h:72
static int handle_extended_wait(struct lwp_info *event_lwp, int wstat)
Definition: linux-low.c:427
int num_regs
Definition: linux-low.h:80
static int send_sigstop_callback(struct inferior_list_entry *entry, void *except)
Definition: linux-low.c:3497
struct thread_resume * resume
Definition: linux-low.h:322
void regcache_invalidate_thread(struct thread_info *thread)
Definition: regcache.c:75
#define debug_enter()
Definition: debug.h:48
void delete_reinsert_breakpoints(void)
Definition: mem-break.c:1402
const char * target_pid_to_str(ptid_t ptid)
Definition: target.c:193
int(* supports_range_stepping)(void)
Definition: linux-low.h:230
struct pending_signals * pending_signals_to_report
Definition: linux-low.h:334
void(* new_thread)(struct lwp_info *)
Definition: linux-low.h:188
void hostio_last_error_from_errno(char *buf)
Definition: hostio-errno.c:28
static void send_sigstop(struct lwp_info *lwp)
Definition: linux-low.c:3473
static struct emit_ops * linux_emit_ops(void)
Definition: linux-low.c:6119
static void resume_stopped_resumed_lwps(struct inferior_list_entry *entry)
Definition: linux-low.c:2274
static void mark_lwp_dead(struct lwp_info *lwp, int wstat)
Definition: linux-low.c:3532
#define pid_of(inf)
Definition: inferiors.h:76
static int linux_detach(int pid)
Definition: linux-low.c:1318
struct regcache * get_thread_regcache(struct thread_info *thread, int fetch)
Definition: regcache.c:27
struct target_waitstatus last_status
Definition: gdbthread.h:39
static void linux_unpause_all(int unfreeze)
Definition: linux-low.c:6072
int thread_db_handle_monitor_command(char *)
Definition: thread-db.c:1001
int linux_proc_pid_is_zombie(pid_t pid)
Definition: linux-procfs.c:183
void perror_with_name(const char *string)
Definition: utils.c:57
void remove_thread(struct thread_info *thread)
Definition: inferiors.c:163
int insert_memory_breakpoint(struct raw_breakpoint *bp)
Definition: mem-break.c:299
static int suspend_and_send_sigstop_callback(struct inferior_list_entry *entry, void *except)
Definition: linux-low.c:3516
enum target_stop_reason stop_reason
Definition: linux-low.h:290
int breakpoint_inserted_here(CORE_ADDR addr)
Definition: mem-break.c:1614
static CORE_ADDR linux_read_pc(struct regcache *regcache)
Definition: linux-low.c:6038
#define TRAP_HWBKPT
Definition: linux-ptrace.h:147
static int dequeue_one_deferred_signal(struct lwp_info *lwp, int *wstat)
Definition: linux-low.c:1993
static int linux_kill(int pid)
Definition: linux-low.c:1160
static void linux_write_pc(struct regcache *regcache, CORE_ADDR pc)
Definition: linux-low.c:6047
unsigned char stepping
Definition: btrace-common.h:92
unsigned long long ULONGEST
Definition: common-types.h:53
PTR xmalloc(size_t size)
Definition: common-utils.c:34
static CORE_ADDR get_r_debug(const int pid, const int is_elf64)
Definition: linux-low.c:6279
int my_waitpid(int pid, int *status, int flags)
Definition: linux-waitpid.c:81
static int linux_supports_agent(void)
Definition: linux-low.c:5836
struct breakpoint * set_breakpoint_at(CORE_ADDR where, int(*handler)(CORE_ADDR))
Definition: mem-break.c:773
int linux_mntns_open_cloexec(pid_t pid, const char *filename, int flags, mode_t mode)
int status_pending_p
Definition: linux-low.h:285
static int linux_prepare_to_access_memory(void)
Definition: linux-low.c:6078
static void check_zombie_leaders(void)
Definition: linux-low.c:1617
#define WIFSIGNALED(w)
Definition: gdb_wait.h:48
long ptid_get_lwp(ptid_t ptid)
Definition: ptid.c:60
Definition: inferiors.h:29
const char * gdb_signal_to_string(enum gdb_signal)
Definition: signals.c:68
int program_signals_p
Definition: server.c:74
struct breakpoint * breakpoints
Definition: inferiors.h:61
static int linux_xfer_siginfo(const char *annex, unsigned char *readbuf, unsigned const char *writebuf, CORE_ADDR offset, int len)
Definition: linux-low.c:5613
static int linux_set_resume_request(struct inferior_list_entry *entry, void *arg)
Definition: linux-low.c:4041
static int linux_supports_z_point_type(char z_type)
Definition: linux-low.c:5434
struct breakpoint * exit_jump_pad_bkpt
Definition: linux-low.h:338
void linux_proc_attach_tgid_threads(pid_t pid, linux_proc_attach_lwp_func attach_lwp)
Definition: linux-procfs.c:191
static void delete_lwp(struct lwp_info *lwp)
Definition: linux-low.c:388
ssize_t linux_mntns_readlink(pid_t pid, const char *filename, char *buf, size_t bufsiz)
void(* supply_ptrace_register)(struct regcache *regcache, int regno, const char *buf)
Definition: linux-low.h:171
struct lwp_info * iterate_over_lwps(ptid_t filter, iterate_over_lwps_ftype callback, void *data)
Definition: linux-low.c:1599
static CORE_ADDR linux_stopped_data_address(void)
Definition: linux-low.c:5528
struct btrace_target_info * linux_enable_btrace(ptid_t ptid, const struct btrace_config *conf)
enum btrace_format format
int(* get_min_fast_tracepoint_insn_len)(void)
Definition: linux-low.h:227
void lwp_set_arch_private_info(struct lwp_info *lwp, struct arch_lwp_info *info)
Definition: linux-low.c:155
stopping_threads_kind
Definition: linux-low.c:234
static int linux_event_pipe[2]
Definition: linux-low.c:325
CORE_ADDR(* breakpoint_reinsert_addr)(void)
Definition: linux-low.h:151
void(* arch_setup)(void)
Definition: linux-low.h:130
void initialize_low(void)
Definition: linux-low.c:6935
#define S_ISDIR(m)
Definition: stat.h:431
void debug_printf(const char *fmt,...)
Definition: common-debug.c:30
#define USE_THREAD_DB
Definition: config.h:347
static void linux_request_interrupt(void)
Definition: linux-low.c:5393
static void linux_resume(struct thread_resume *resume_info, size_t n)
Definition: linux-low.c:4522
void btrace_data_init(struct btrace_data *data)
Definition: btrace-common.c:47
void linux_stop_lwp(struct lwp_info *lwp)
Definition: linux-low.c:3467
static void siginfo_fixup(siginfo_t *siginfo, void *inf_siginfo, int direction)
Definition: linux-low.c:5594
static int linux_start_non_stop(int nonstop)
Definition: linux-low.c:5750
static int linux_write_memory(CORE_ADDR memaddr, const unsigned char *myaddr, int len)
Definition: linux-low.c:5289
CORE_ADDR stop_pc
Definition: linux-low.h:281
void supply_register(struct regcache *regcache, int n, const void *buf)
Definition: regcache.c:330
#define SPUFS_MAGIC
Definition: linux-low.c:59
int * regmap
Definition: linux-low.h:83
int remove_memory_breakpoint(struct raw_breakpoint *bp)
Definition: mem-break.c:348
static int get_phdr_phnum_from_proc_auxv(const int pid, const int is_elf64, CORE_ADDR *phdr_memaddr, int *num_phdr)
Definition: linux-low.c:6136
struct thread_info * add_thread(ptid_t ptid, void *target_data)
Definition: inferiors.c:106
PTR xcalloc(size_t number, size_t size)
Definition: common-utils.c:71
static int select_singlestep_lwp_callback(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:2543
static int kill_lwp(unsigned long lwpid, int signo)
Definition: linux-low.c:3441
#define PTRACE_O_EXITKILL
Definition: linux-ptrace.h:82
#define PTRACE_XFER_TYPE
Definition: linux-low.h:30
struct arch_lwp_info * lwp_arch_private_info(struct lwp_info *lwp)
Definition: linux-low.c:164
static int count_events_callback(struct inferior_list_entry *entry, void *data)
Definition: linux-low.c:2524
void error(const char *fmt,...)
Definition: errors.c:38
int delete_breakpoint(struct breakpoint *todel)
Definition: mem-break.c:888
#define ALL_INFERIORS(list, cur, tmp)
Definition: inferiors.h:117
int suspended
Definition: linux-low.h:263
ptid_t minus_one_ptid
Definition: ptid.c:26
static int pull_pid_from_list(struct simple_pid_list **listp, int pid, int *statusp)
Definition: linux-low.c:217
#define use_linux_regsets
Definition: linux-low.c:4953
int pass_signals[GDB_SIGNAL_LAST]
Definition: server.c:72
void do_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:175
char * strerror(int)
int gdb_breakpoint_here(CORE_ADDR where)
Definition: mem-break.c:1385
void buffer_grow(struct buffer *buffer, const char *data, size_t size)
Definition: buffer.c:25
struct inferior_list_entry * find_inferior(struct inferior_list *list, int(*func)(struct inferior_list_entry *, void *), void *arg)
Definition: inferiors.c:188
#define O_LARGEFILE
Definition: linux-low.c:70
struct thread_info * thread
Definition: linux-low.h:251
CORE_ADDR(* get_pc)(struct regcache *regcache)
Definition: linux-low.h:147
static void linux_look_up_symbols(void)
Definition: linux-low.c:5377
#define usr_fetch_inferior_registers(regs_info, regcache, regno, all)
Definition: linux-low.c:5131
int swbreak_feature
Definition: server.c:63
int linux_proc_pid_is_trace_stopped_nowarn(pid_t pid)
Definition: linux-procfs.c:158
static int linux_wait_for_event_filtered(ptid_t wait_ptid, ptid_t filter_ptid, int *wstat, int options)
Definition: linux-low.c:2307