GDB (xrefs)
/tmp/gdb-7.10/gdb/linux-nat.c
Go to the documentation of this file.
1 /* GNU/Linux native-dependent code common to multiple platforms.
2 
3  Copyright (C) 2001-2015 Free Software Foundation, Inc.
4 
5  This file is part of GDB.
6 
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of the GNU General Public License as published by
9  the Free Software Foundation; either version 3 of the License, or
10  (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "nat/linux-personality.h"
36 #include "linux-fork.h"
37 #include "gdbthread.h"
38 #include "gdbcmd.h"
39 #include "regcache.h"
40 #include "regset.h"
41 #include "inf-child.h"
42 #include "inf-ptrace.h"
43 #include "auxv.h"
44 #include <sys/procfs.h> /* for elf_gregset etc. */
45 #include "elf-bfd.h" /* for elfcore_write_* */
46 #include "gregset.h" /* for gregset */
47 #include "gdbcore.h" /* for get_exec_file */
48 #include <ctype.h> /* for isdigit */
49 #include <sys/stat.h> /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include <dirent.h>
57 #include "xml-support.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "nat/linux-osdata.h"
61 #include "linux-tdep.h"
62 #include "symfile.h"
63 #include "agent.h"
64 #include "tracepoint.h"
65 #include "buffer.h"
66 #include "target-descriptions.h"
67 #include "filestuff.h"
68 #include "objfiles.h"
69 #include "nat/linux-namespaces.h"
70 #include "fileio.h"
71 
72 #ifndef SPUFS_MAGIC
73 #define SPUFS_MAGIC 0x23c9b64e
74 #endif
75 
76 /* This comment documents high-level logic of this file.
77 
78 Waiting for events in sync mode
79 ===============================
80 
81 When waiting for an event in a specific thread, we just use waitpid, passing
82 the specific pid, and not passing WNOHANG.
83 
84 When waiting for an event in all threads, waitpid is not quite good. Prior to
85 version 2.4, Linux can either wait for event in main thread, or in secondary
86 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
87 miss an event. The solution is to use non-blocking waitpid, together with
88 sigsuspend. First, we use non-blocking waitpid to get an event in the main
89 process, if any. Second, we use non-blocking waitpid with the __WCLONED
90 flag to check for events in cloned processes. If nothing is found, we use
91 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
92 happened to a child process -- and SIGCHLD will be delivered both for events
93 in main debugged process and in cloned processes. As soon as we know there's
94 an event, we get back to calling nonblocking waitpid with and without
95 __WCLONED.
96 
97 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
98 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
99 blocked, the signal becomes pending and sigsuspend immediately
100 notices it and returns.
101 
102 Waiting for events in async mode
103 ================================
104 
105 In async mode, GDB should always be ready to handle both user input
106 and target events, so neither blocking waitpid nor sigsuspend are
107 viable options. Instead, we should asynchronously notify the GDB main
108 event loop whenever there's an unprocessed event from the target. We
109 detect asynchronous target events by handling SIGCHLD signals. To
110 notify the event loop about target events, the self-pipe trick is used
111 --- a pipe is registered as waitable event source in the event loop,
112 the event loop select/poll's on the read end of this pipe (as well on
113 other event sources, e.g., stdin), and the SIGCHLD handler writes a
114 byte to this pipe. This is more portable than relying on
115 pselect/ppoll, since on kernels that lack those syscalls, libc
116 emulates them with select/poll+sigprocmask, and that is racy
117 (a.k.a. plain broken).
118 
119 Obviously, if we fail to notify the event loop if there's a target
120 event, it's bad. OTOH, if we notify the event loop when there's no
121 event from the target, linux_nat_wait will detect that there's no real
122 event to report, and return event of type TARGET_WAITKIND_IGNORE.
123 This is mostly harmless, but it will waste time and is better avoided.
124 
125 The main design point is that every time GDB is outside linux-nat.c,
126 we have a SIGCHLD handler installed that is called when something
127 happens to the target and notifies the GDB event loop. Whenever GDB
128 core decides to handle the event, and calls into linux-nat.c, we
129 process things as in sync mode, except that the we never block in
130 sigsuspend.
131 
132 While processing an event, we may end up momentarily blocked in
133 waitpid calls. Those waitpid calls, while blocking, are guarantied to
134 return quickly. E.g., in all-stop mode, before reporting to the core
135 that an LWP hit a breakpoint, all LWPs are stopped by sending them
136 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
137 Note that this is different from blocking indefinitely waiting for the
138 next event --- here, we're already handling an event.
139 
140 Use of signals
141 ==============
142 
143 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
144 signal is not entirely significant; we just need for a signal to be delivered,
145 so that we can intercept it. SIGSTOP's advantage is that it can not be
146 blocked. A disadvantage is that it is not a real-time signal, so it can only
147 be queued once; we do not keep track of other sources of SIGSTOP.
148 
149 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
150 use them, because they have special behavior when the signal is generated -
151 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
152 kills the entire thread group.
153 
154 A delivered SIGSTOP would stop the entire thread group, not just the thread we
155 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
156 cancel it (by PTRACE_CONT without passing SIGSTOP).
157 
158 We could use a real-time signal instead. This would solve those problems; we
159 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
160 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
161 generates it, and there are races with trying to find a signal that is not
162 blocked. */
163 
164 #ifndef O_LARGEFILE
165 #define O_LARGEFILE 0
166 #endif
167 
168 /* Does the current host support PTRACE_GETREGSET? */
170 
171 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
172  the use of the multi-threaded target. */
173 static struct target_ops *linux_ops;
175 
176 /* The method to call, if any, when a new thread is attached. */
177 static void (*linux_nat_new_thread) (struct lwp_info *);
178 
179 /* The method to call, if any, when a new fork is attached. */
181 
182 /* The method to call, if any, when a process is no longer
183  attached. */
185 
186 /* Hook to call prior to resuming a thread. */
187 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
188 
189 /* The method to call, if any, when the siginfo object needs to be
190  converted between the layout returned by ptrace, and the layout in
191  the architecture of the inferior. */
192 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
193  gdb_byte *,
194  int);
195 
196 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
197  Called by our to_xfer_partial. */
199 
200 /* The saved to_close method, inherited from inf-ptrace.c.
201  Called by our to_close. */
202 static void (*super_close) (struct target_ops *);
203 
204 static unsigned int debug_linux_nat;
205 static void
206 show_debug_linux_nat (struct ui_file *file, int from_tty,
207  struct cmd_list_element *c, const char *value)
208 {
209  fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
210  value);
211 }
212 
214 {
215  int pid;
216  int status;
218 };
220 
221 /* Async mode support. */
222 
223 /* The read/write ends of the pipe registered as waitable file in the
224  event loop. */
225 static int linux_nat_event_pipe[2] = { -1, -1 };
226 
227 /* True if we're currently in async mode. */
228 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
229 
230 /* Flush the event pipe. */
231 
232 static void
234 {
235  int ret;
236  char buf;
237 
238  do
239  {
240  ret = read (linux_nat_event_pipe[0], &buf, 1);
241  }
242  while (ret >= 0 || (ret == -1 && errno == EINTR));
243 }
244 
245 /* Put something (anything, doesn't matter what, or how much) in event
246  pipe, so that the select/poll in the event-loop realizes we have
247  something to process. */
248 
249 static void
251 {
252  int ret;
253 
254  /* It doesn't really matter what the pipe contains, as long we end
255  up with something in it. Might as well flush the previous
256  left-overs. */
257  async_file_flush ();
258 
259  do
260  {
261  ret = write (linux_nat_event_pipe[1], "+", 1);
262  }
263  while (ret == -1 && errno == EINTR);
264 
265  /* Ignore EAGAIN. If the pipe is full, the event loop will already
266  be awakened anyway. */
267 }
268 
269 static int kill_lwp (int lwpid, int signo);
270 
271 static int stop_callback (struct lwp_info *lp, void *data);
272 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
273 
274 static void block_child_signals (sigset_t *prev_mask);
275 static void restore_child_signals_mask (sigset_t *prev_mask);
276 
277 struct lwp_info;
278 static struct lwp_info *add_lwp (ptid_t ptid);
279 static void purge_lwp_list (int pid);
280 static void delete_lwp (ptid_t ptid);
281 static struct lwp_info *find_lwp_pid (ptid_t ptid);
282 
283 static int lwp_status_pending_p (struct lwp_info *lp);
284 
285 static int check_stopped_by_breakpoint (struct lwp_info *lp);
286 static int sigtrap_is_event (int status);
288 
289 
290 /* LWP accessors. */
291 
292 /* See nat/linux-nat.h. */
293 
294 ptid_t
295 ptid_of_lwp (struct lwp_info *lwp)
296 {
297  return lwp->ptid;
298 }
299 
300 /* See nat/linux-nat.h. */
301 
302 void
304  struct arch_lwp_info *info)
305 {
306  lwp->arch_private = info;
307 }
308 
309 /* See nat/linux-nat.h. */
310 
311 struct arch_lwp_info *
313 {
314  return lwp->arch_private;
315 }
316 
317 /* See nat/linux-nat.h. */
318 
319 int
321 {
322  return lwp->stopped;
323 }
324 
325 /* See nat/linux-nat.h. */
326 
329 {
330  return lwp->stop_reason;
331 }
332 
333 
334 /* Trivial list manipulation functions to keep track of a list of
335  new stopped processes. */
336 static void
337 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
338 {
339  struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
340 
341  new_pid->pid = pid;
342  new_pid->status = status;
343  new_pid->next = *listp;
344  *listp = new_pid;
345 }
346 
347 static int
348 in_pid_list_p (struct simple_pid_list *list, int pid)
349 {
350  struct simple_pid_list *p;
351 
352  for (p = list; p != NULL; p = p->next)
353  if (p->pid == pid)
354  return 1;
355  return 0;
356 }
357 
358 static int
359 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
360 {
361  struct simple_pid_list **p;
362 
363  for (p = listp; *p != NULL; p = &(*p)->next)
364  if ((*p)->pid == pid)
365  {
366  struct simple_pid_list *next = (*p)->next;
367 
368  *statusp = (*p)->status;
369  xfree (*p);
370  *p = next;
371  return 1;
372  }
373  return 0;
374 }
375 
376 /* Return the ptrace options that we want to try to enable. */
377 
378 static int
380 {
381  int options = 0;
382 
383  if (!attached)
384  options |= PTRACE_O_EXITKILL;
385 
386  options |= (PTRACE_O_TRACESYSGOOD
391 
392  return options;
393 }
394 
395 /* Initialize ptrace warnings and check for supported ptrace
396  features given PID.
397 
398  ATTACHED should be nonzero iff we attached to the inferior. */
399 
400 static void
401 linux_init_ptrace (pid_t pid, int attached)
402 {
403  int options = linux_nat_ptrace_options (attached);
404 
405  linux_enable_event_reporting (pid, options);
407 }
408 
409 static void
411 {
412  linux_init_ptrace (pid, 1);
413 }
414 
415 static void
417 {
418  linux_init_ptrace (ptid_get_pid (ptid), 0);
419 }
420 
421 /* Return the number of known LWPs in the tgid given by PID. */
422 
423 static int
425 {
426  int count = 0;
427  struct lwp_info *lp;
428 
429  for (lp = lwp_list; lp; lp = lp->next)
430  if (ptid_get_pid (lp->ptid) == pid)
431  count++;
432 
433  return count;
434 }
435 
436 /* Call delete_lwp with prototype compatible for make_cleanup. */
437 
438 static void
439 delete_lwp_cleanup (void *lp_voidp)
440 {
441  struct lwp_info *lp = lp_voidp;
442 
443  delete_lwp (lp->ptid);
444 }
445 
446 /* Target hook for follow_fork. On entry inferior_ptid must be the
447  ptid of the followed inferior. At return, inferior_ptid will be
448  unchanged. */
449 
450 static int
451 linux_child_follow_fork (struct target_ops *ops, int follow_child,
452  int detach_fork)
453 {
454  if (!follow_child)
455  {
456  struct lwp_info *child_lp = NULL;
457  int status = W_STOPCODE (0);
458  struct cleanup *old_chain;
459  int has_vforked;
460  ptid_t parent_ptid, child_ptid;
461  int parent_pid, child_pid;
462 
463  has_vforked = (inferior_thread ()->pending_follow.kind
465  parent_ptid = inferior_ptid;
467  parent_pid = ptid_get_lwp (parent_ptid);
468  child_pid = ptid_get_lwp (child_ptid);
469 
470  /* We're already attached to the parent, by default. */
471  old_chain = save_inferior_ptid ();
472  inferior_ptid = child_ptid;
473  child_lp = add_lwp (inferior_ptid);
474  child_lp->stopped = 1;
475  child_lp->last_resume_kind = resume_stop;
476 
477  /* Detach new forked process? */
478  if (detach_fork)
479  {
480  make_cleanup (delete_lwp_cleanup, child_lp);
481 
482  if (linux_nat_prepare_to_resume != NULL)
483  linux_nat_prepare_to_resume (child_lp);
484 
485  /* When debugging an inferior in an architecture that supports
486  hardware single stepping on a kernel without commit
487  6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
488  process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
489  set if the parent process had them set.
490  To work around this, single step the child process
491  once before detaching to clear the flags. */
492 
494  (child_lp->ptid)))
495  {
496  linux_disable_event_reporting (child_pid);
497  if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
498  perror_with_name (_("Couldn't do single step"));
499  if (my_waitpid (child_pid, &status, 0) < 0)
500  perror_with_name (_("Couldn't wait vfork process"));
501  }
502 
503  if (WIFSTOPPED (status))
504  {
505  int signo;
506 
507  signo = WSTOPSIG (status);
508  if (signo != 0
510  signo = 0;
511  ptrace (PTRACE_DETACH, child_pid, 0, signo);
512  }
513 
514  /* Resets value of inferior_ptid to parent ptid. */
515  do_cleanups (old_chain);
516  }
517  else
518  {
519  /* Let the thread_db layer learn about this new process. */
521  }
522 
523  do_cleanups (old_chain);
524 
525  if (has_vforked)
526  {
527  struct lwp_info *parent_lp;
528 
529  parent_lp = find_lwp_pid (parent_ptid);
531 
533  {
534  if (debug_linux_nat)
536  "LCFF: waiting for VFORK_DONE on %d\n",
537  parent_pid);
538  parent_lp->stopped = 1;
539 
540  /* We'll handle the VFORK_DONE event like any other
541  event, in target_wait. */
542  }
543  else
544  {
545  /* We can't insert breakpoints until the child has
546  finished with the shared memory region. We need to
547  wait until that happens. Ideal would be to just
548  call:
549  - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
550  - waitpid (parent_pid, &status, __WALL);
551  However, most architectures can't handle a syscall
552  being traced on the way out if it wasn't traced on
553  the way in.
554 
555  We might also think to loop, continuing the child
556  until it exits or gets a SIGTRAP. One problem is
557  that the child might call ptrace with PTRACE_TRACEME.
558 
559  There's no simple and reliable way to figure out when
560  the vforked child will be done with its copy of the
561  shared memory. We could step it out of the syscall,
562  two instructions, let it go, and then single-step the
563  parent once. When we have hardware single-step, this
564  would work; with software single-step it could still
565  be made to work but we'd have to be able to insert
566  single-step breakpoints in the child, and we'd have
567  to insert -just- the single-step breakpoint in the
568  parent. Very awkward.
569 
570  In the end, the best we can do is to make sure it
571  runs for a little while. Hopefully it will be out of
572  range of any breakpoints we reinsert. Usually this
573  is only the single-step breakpoint at vfork's return
574  point. */
575 
576  if (debug_linux_nat)
578  "LCFF: no VFORK_DONE "
579  "support, sleeping a bit\n");
580 
581  usleep (10000);
582 
583  /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
584  and leave it pending. The next linux_nat_resume call
585  will notice a pending event, and bypasses actually
586  resuming the inferior. */
587  parent_lp->status = 0;
589  parent_lp->stopped = 1;
590 
591  /* If we're in async mode, need to tell the event loop
592  there's something here to process. */
593  if (target_is_async_p ())
594  async_file_mark ();
595  }
596  }
597  }
598  else
599  {
600  struct lwp_info *child_lp;
601 
602  child_lp = add_lwp (inferior_ptid);
603  child_lp->stopped = 1;
604  child_lp->last_resume_kind = resume_stop;
605 
606  /* Let the thread_db layer learn about this new process. */
608  }
609 
610  return 0;
611 }
612 
613 
614 static int
616 {
617  return !linux_supports_tracefork ();
618 }
619 
620 static int
622 {
623  return 0;
624 }
625 
626 static int
628 {
629  return !linux_supports_tracefork ();
630 }
631 
632 static int
634 {
635  return 0;
636 }
637 
638 static int
640 {
641  return !linux_supports_tracefork ();
642 }
643 
644 static int
646 {
647  return 0;
648 }
649 
650 static int
652  int pid, int needed, int any_count,
653  int table_size, int *table)
654 {
656  return 1;
657 
658  /* On GNU/Linux, we ignore the arguments. It means that we only
659  enable the syscall catchpoints, but do not disable them.
660 
661  Also, we do not use the `table' information because we do not
662  filter system calls here. We let GDB do the logic for us. */
663  return 0;
664 }
665 
666 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
667  are processes sharing the same VM space. A multi-threaded process
668  is basically a group of such processes. However, such a grouping
669  is almost entirely a user-space issue; the kernel doesn't enforce
670  such a grouping at all (this might change in the future). In
671  general, we'll rely on the threads library (i.e. the GNU/Linux
672  Threads library) to provide such a grouping.
673 
674  It is perfectly well possible to write a multi-threaded application
675  without the assistance of a threads library, by using the clone
676  system call directly. This module should be able to give some
677  rudimentary support for debugging such applications if developers
678  specify the CLONE_PTRACE flag in the clone system call, and are
679  using the Linux kernel 2.4 or above.
680 
681  Note that there are some peculiarities in GNU/Linux that affect
682  this code:
683 
684  - In general one should specify the __WCLONE flag to waitpid in
685  order to make it report events for any of the cloned processes
686  (and leave it out for the initial process). However, if a cloned
687  process has exited the exit status is only reported if the
688  __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
689  we cannot use it since GDB must work on older systems too.
690 
691  - When a traced, cloned process exits and is waited for by the
692  debugger, the kernel reassigns it to the original parent and
693  keeps it around as a "zombie". Somehow, the GNU/Linux Threads
694  library doesn't notice this, which leads to the "zombie problem":
695  When debugged a multi-threaded process that spawns a lot of
696  threads will run out of processes, even if the threads exit,
697  because the "zombies" stay around. */
698 
699 /* List of known LWPs. */
701 
702 
703 /* Original signal mask. */
704 static sigset_t normal_mask;
705 
706 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
707  _initialize_linux_nat. */
708 static sigset_t suspend_mask;
709 
710 /* Signals to block to make that sigsuspend work. */
711 static sigset_t blocked_mask;
712 
713 /* SIGCHLD action. */
714 struct sigaction sigchld_action;
715 
716 /* Block child signals (SIGCHLD and linux threads signals), and store
717  the previous mask in PREV_MASK. */
718 
719 static void
720 block_child_signals (sigset_t *prev_mask)
721 {
722  /* Make sure SIGCHLD is blocked. */
723  if (!sigismember (&blocked_mask, SIGCHLD))
724  sigaddset (&blocked_mask, SIGCHLD);
725 
726  sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
727 }
728 
729 /* Restore child signals mask, previously returned by
730  block_child_signals. */
731 
732 static void
733 restore_child_signals_mask (sigset_t *prev_mask)
734 {
735  sigprocmask (SIG_SETMASK, prev_mask, NULL);
736 }
737 
738 /* Mask of signals to pass directly to the inferior. */
739 static sigset_t pass_mask;
740 
741 /* Update signals to pass to the inferior. */
742 static void
744  int numsigs, unsigned char *pass_signals)
745 {
746  int signo;
747 
748  sigemptyset (&pass_mask);
749 
750  for (signo = 1; signo < NSIG; signo++)
751  {
752  int target_signo = gdb_signal_from_host (signo);
753  if (target_signo < numsigs && pass_signals[target_signo])
754  sigaddset (&pass_mask, signo);
755  }
756 }
757 
758 
759 
760 /* Prototypes for local functions. */
761 static int stop_wait_callback (struct lwp_info *lp, void *data);
762 static int linux_thread_alive (ptid_t ptid);
763 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
764 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
765 
766 
767 
768 /* Destroy and free LP. */
769 
770 static void
771 lwp_free (struct lwp_info *lp)
772 {
773  xfree (lp->arch_private);
774  xfree (lp);
775 }
776 
777 /* Remove all LWPs belong to PID from the lwp list. */
778 
779 static void
781 {
782  struct lwp_info *lp, *lpprev, *lpnext;
783 
784  lpprev = NULL;
785 
786  for (lp = lwp_list; lp; lp = lpnext)
787  {
788  lpnext = lp->next;
789 
790  if (ptid_get_pid (lp->ptid) == pid)
791  {
792  if (lp == lwp_list)
793  lwp_list = lp->next;
794  else
795  lpprev->next = lp->next;
796 
797  lwp_free (lp);
798  }
799  else
800  lpprev = lp;
801  }
802 }
803 
804 /* Add the LWP specified by PTID to the list. PTID is the first LWP
805  in the process. Return a pointer to the structure describing the
806  new LWP.
807 
808  This differs from add_lwp in that we don't let the arch specific
809  bits know about this new thread. Current clients of this callback
810  take the opportunity to install watchpoints in the new thread, and
811  we shouldn't do that for the first thread. If we're spawning a
812  child ("run"), the thread executes the shell wrapper first, and we
813  shouldn't touch it until it execs the program we want to debug.
814  For "attach", it'd be okay to call the callback, but it's not
815  necessary, because watchpoints can't yet have been inserted into
816  the inferior. */
817 
818 static struct lwp_info *
820 {
821  struct lwp_info *lp;
822 
823  gdb_assert (ptid_lwp_p (ptid));
824 
825  lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
826 
827  memset (lp, 0, sizeof (struct lwp_info));
828 
831 
832  lp->ptid = ptid;
833  lp->core = -1;
834 
835  lp->next = lwp_list;
836  lwp_list = lp;
837 
838  return lp;
839 }
840 
841 /* Add the LWP specified by PID to the list. Return a pointer to the
842  structure describing the new LWP. The LWP should already be
843  stopped. */
844 
845 static struct lwp_info *
847 {
848  struct lwp_info *lp;
849 
850  lp = add_initial_lwp (ptid);
851 
852  /* Let the arch specific bits know about this new thread. Current
853  clients of this callback take the opportunity to install
854  watchpoints in the new thread. We don't do this for the first
855  thread though. See add_initial_lwp. */
856  if (linux_nat_new_thread != NULL)
858 
859  return lp;
860 }
861 
862 /* Remove the LWP specified by PID from the list. */
863 
864 static void
866 {
867  struct lwp_info *lp, *lpprev;
868 
869  lpprev = NULL;
870 
871  for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
872  if (ptid_equal (lp->ptid, ptid))
873  break;
874 
875  if (!lp)
876  return;
877 
878  if (lpprev)
879  lpprev->next = lp->next;
880  else
881  lwp_list = lp->next;
882 
883  lwp_free (lp);
884 }
885 
886 /* Return a pointer to the structure describing the LWP corresponding
887  to PID. If no corresponding LWP could be found, return NULL. */
888 
889 static struct lwp_info *
891 {
892  struct lwp_info *lp;
893  int lwp;
894 
895  if (ptid_lwp_p (ptid))
896  lwp = ptid_get_lwp (ptid);
897  else
898  lwp = ptid_get_pid (ptid);
899 
900  for (lp = lwp_list; lp; lp = lp->next)
901  if (lwp == ptid_get_lwp (lp->ptid))
902  return lp;
903 
904  return NULL;
905 }
906 
907 /* See nat/linux-nat.h. */
908 
909 struct lwp_info *
911  iterate_over_lwps_ftype callback,
912  void *data)
913 {
914  struct lwp_info *lp, *lpnext;
915 
916  for (lp = lwp_list; lp; lp = lpnext)
917  {
918  lpnext = lp->next;
919 
920  if (ptid_match (lp->ptid, filter))
921  {
922  if ((*callback) (lp, data) != 0)
923  return lp;
924  }
925  }
926 
927  return NULL;
928 }
929 
930 /* Update our internal state when changing from one checkpoint to
931  another indicated by NEW_PTID. We can only switch single-threaded
932  applications, so we only create one new LWP, and the previous list
933  is discarded. */
934 
935 void
937 {
938  struct lwp_info *lp;
939 
941 
942  lp = add_lwp (new_ptid);
943  lp->stopped = 1;
944 
945  /* This changes the thread's ptid while preserving the gdb thread
946  num. Also changes the inferior pid, while preserving the
947  inferior num. */
948  thread_change_ptid (inferior_ptid, new_ptid);
949 
950  /* We've just told GDB core that the thread changed target id, but,
951  in fact, it really is a different thread, with different register
952  contents. */
954 }
955 
956 /* Handle the exit of a single thread LP. */
957 
958 static void
959 exit_lwp (struct lwp_info *lp)
960 {
961  struct thread_info *th = find_thread_ptid (lp->ptid);
962 
963  if (th)
964  {
966  printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
967 
968  delete_thread (lp->ptid);
969  }
970 
971  delete_lwp (lp->ptid);
972 }
973 
974 /* Wait for the LWP specified by LP, which we have just attached to.
975  Returns a wait status for that LWP, to cache. */
976 
977 static int
978 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
979  int *signalled)
980 {
981  pid_t new_pid, pid = ptid_get_lwp (ptid);
982  int status;
983 
984  if (linux_proc_pid_is_stopped (pid))
985  {
986  if (debug_linux_nat)
988  "LNPAW: Attaching to a stopped process\n");
989 
990  /* The process is definitely stopped. It is in a job control
991  stop, unless the kernel predates the TASK_STOPPED /
992  TASK_TRACED distinction, in which case it might be in a
993  ptrace stop. Make sure it is in a ptrace stop; from there we
994  can kill it, signal it, et cetera.
995 
996  First make sure there is a pending SIGSTOP. Since we are
997  already attached, the process can not transition from stopped
998  to running without a PTRACE_CONT; so we know this signal will
999  go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1000  probably already in the queue (unless this kernel is old
1001  enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1002  is not an RT signal, it can only be queued once. */
1003  kill_lwp (pid, SIGSTOP);
1004 
1005  /* Finally, resume the stopped process. This will deliver the SIGSTOP
1006  (or a higher priority signal, just like normal PTRACE_ATTACH). */
1007  ptrace (PTRACE_CONT, pid, 0, 0);
1008  }
1009 
1010  /* Make sure the initial process is stopped. The user-level threads
1011  layer might want to poke around in the inferior, and that won't
1012  work if things haven't stabilized yet. */
1013  new_pid = my_waitpid (pid, &status, 0);
1014  if (new_pid == -1 && errno == ECHILD)
1015  {
1016  if (first)
1017  warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1018 
1019  /* Try again with __WCLONE to check cloned processes. */
1020  new_pid = my_waitpid (pid, &status, __WCLONE);
1021  *cloned = 1;
1022  }
1023 
1024  gdb_assert (pid == new_pid);
1025 
1026  if (!WIFSTOPPED (status))
1027  {
1028  /* The pid we tried to attach has apparently just exited. */
1029  if (debug_linux_nat)
1030  fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1031  pid, status_to_str (status));
1032  return status;
1033  }
1034 
1035  if (WSTOPSIG (status) != SIGSTOP)
1036  {
1037  *signalled = 1;
1038  if (debug_linux_nat)
1040  "LNPAW: Received %s after attaching\n",
1041  status_to_str (status));
1042  }
1043 
1044  return status;
1045 }
1046 
1047 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1048  the new LWP could not be attached, or 1 if we're already auto
1049  attached to this thread, but haven't processed the
1050  PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1051  its existance, without considering it an error. */
1052 
1053 int
1055 {
1056  struct lwp_info *lp;
1057  int lwpid;
1058 
1059  gdb_assert (ptid_lwp_p (ptid));
1060 
1061  lp = find_lwp_pid (ptid);
1062  lwpid = ptid_get_lwp (ptid);
1063 
1064  /* We assume that we're already attached to any LWP that is already
1065  in our list of LWPs. If we're not seeing exit events from threads
1066  and we've had PID wraparound since we last tried to stop all threads,
1067  this assumption might be wrong; fortunately, this is very unlikely
1068  to happen. */
1069  if (lp == NULL)
1070  {
1071  int status, cloned = 0, signalled = 0;
1072 
1073  if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1074  {
1075  if (linux_supports_tracefork ())
1076  {
1077  /* If we haven't stopped all threads when we get here,
1078  we may have seen a thread listed in thread_db's list,
1079  but not processed the PTRACE_EVENT_CLONE yet. If
1080  that's the case, ignore this new thread, and let
1081  normal event handling discover it later. */
1082  if (in_pid_list_p (stopped_pids, lwpid))
1083  {
1084  /* We've already seen this thread stop, but we
1085  haven't seen the PTRACE_EVENT_CLONE extended
1086  event yet. */
1087  if (debug_linux_nat)
1089  "LLAL: attach failed, but already seen "
1090  "this thread %s stop\n",
1091  target_pid_to_str (ptid));
1092  return 1;
1093  }
1094  else
1095  {
1096  int new_pid;
1097  int status;
1098 
1099  if (debug_linux_nat)
1101  "LLAL: attach failed, and haven't seen "
1102  "this thread %s stop yet\n",
1103  target_pid_to_str (ptid));
1104 
1105  /* We may or may not be attached to the LWP already.
1106  Try waitpid on it. If that errors, we're not
1107  attached to the LWP yet. Otherwise, we're
1108  already attached. */
1109  gdb_assert (lwpid > 0);
1110  new_pid = my_waitpid (lwpid, &status, WNOHANG);
1111  if (new_pid == -1 && errno == ECHILD)
1112  new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1113  if (new_pid != -1)
1114  {
1115  if (new_pid == 0)
1116  {
1117  /* The child hasn't stopped for its initial
1118  SIGSTOP stop yet. */
1119  if (debug_linux_nat)
1121  "LLAL: child hasn't "
1122  "stopped yet\n");
1123  }
1124  else if (WIFSTOPPED (status))
1125  {
1126  if (debug_linux_nat)
1128  "LLAL: adding to stopped_pids\n");
1129  add_to_pid_list (&stopped_pids, lwpid, status);
1130  }
1131  return 1;
1132  }
1133  }
1134  }
1135 
1136  /* If we fail to attach to the thread, issue a warning,
1137  but continue. One way this can happen is if thread
1138  creation is interrupted; as of Linux kernel 2.6.19, a
1139  bug may place threads in the thread list and then fail
1140  to create them. */
1141  warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1142  safe_strerror (errno));
1143  return -1;
1144  }
1145 
1146  if (debug_linux_nat)
1148  "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1149  target_pid_to_str (ptid));
1150 
1151  status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1152  if (!WIFSTOPPED (status))
1153  return 1;
1154 
1155  lp = add_lwp (ptid);
1156  lp->stopped = 1;
1158  lp->cloned = cloned;
1159  lp->signalled = signalled;
1160  if (WSTOPSIG (status) != SIGSTOP)
1161  {
1162  lp->resumed = 1;
1163  lp->status = status;
1164  }
1165 
1167 
1168  if (debug_linux_nat)
1169  {
1171  "LLAL: waitpid %s received %s\n",
1172  target_pid_to_str (ptid),
1173  status_to_str (status));
1174  }
1175  }
1176 
1177  return 0;
1178 }
1179 
1180 static void
1182  char *exec_file, char *allargs, char **env,
1183  int from_tty)
1184 {
1187 
1188  /* The fork_child mechanism is synchronous and calls target_wait, so
1189  we have to mask the async mode. */
1190 
1191  /* Make sure we report all signals during startup. */
1192  linux_nat_pass_signals (ops, 0, NULL);
1193 
1194  linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1195 
1196  do_cleanups (restore_personality);
1197 }
1198 
1199 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1200  already attached. Returns true if a new LWP is found, false
1201  otherwise. */
1202 
1203 static int
1205 {
1206  struct lwp_info *lp;
1207 
1208  /* Ignore LWPs we're already attached to. */
1209  lp = find_lwp_pid (ptid);
1210  if (lp == NULL)
1211  {
1212  int lwpid = ptid_get_lwp (ptid);
1213 
1214  if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1215  {
1216  int err = errno;
1217 
1218  /* Be quiet if we simply raced with the thread exiting.
1219  EPERM is returned if the thread's task still exists, and
1220  is marked as exited or zombie, as well as other
1221  conditions, so in that case, confirm the status in
1222  /proc/PID/status. */
1223  if (err == ESRCH
1224  || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1225  {
1226  if (debug_linux_nat)
1227  {
1229  "Cannot attach to lwp %d: "
1230  "thread is gone (%d: %s)\n",
1231  lwpid, err, safe_strerror (err));
1232  }
1233  }
1234  else
1235  {
1236  warning (_("Cannot attach to lwp %d: %s"),
1237  lwpid,
1239  err));
1240  }
1241  }
1242  else
1243  {
1244  if (debug_linux_nat)
1246  "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1247  target_pid_to_str (ptid));
1248 
1249  lp = add_lwp (ptid);
1250  lp->cloned = 1;
1251 
1252  /* The next time we wait for this LWP we'll see a SIGSTOP as
1253  PTRACE_ATTACH brings it to a halt. */
1254  lp->signalled = 1;
1255 
1256  /* We need to wait for a stop before being able to make the
1257  next ptrace call on this LWP. */
1258  lp->must_set_ptrace_flags = 1;
1259  }
1260 
1261  return 1;
1262  }
1263  return 0;
1264 }
1265 
1266 static void
1267 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1268 {
1269  struct lwp_info *lp;
1270  int status;
1271  ptid_t ptid;
1272 
1273  /* Make sure we report all signals during attach. */
1274  linux_nat_pass_signals (ops, 0, NULL);
1275 
1276  TRY
1277  {
1278  linux_ops->to_attach (ops, args, from_tty);
1279  }
1280  CATCH (ex, RETURN_MASK_ERROR)
1281  {
1282  pid_t pid = parse_pid_to_attach (args);
1283  struct buffer buffer;
1284  char *message, *buffer_s;
1285 
1286  message = xstrdup (ex.message);
1287  make_cleanup (xfree, message);
1288 
1289  buffer_init (&buffer);
1290  linux_ptrace_attach_fail_reason (pid, &buffer);
1291 
1292  buffer_grow_str0 (&buffer, "");
1293  buffer_s = buffer_finish (&buffer);
1294  make_cleanup (xfree, buffer_s);
1295 
1296  if (*buffer_s != '\0')
1297  throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1298  else
1299  throw_error (ex.error, "%s", message);
1300  }
1301  END_CATCH
1302 
1303  /* The ptrace base target adds the main thread with (pid,0,0)
1304  format. Decorate it with lwp info. */
1307  0);
1309 
1310  /* Add the initial process as the first LWP to the list. */
1311  lp = add_initial_lwp (ptid);
1312 
1313  status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1314  &lp->signalled);
1315  if (!WIFSTOPPED (status))
1316  {
1317  if (WIFEXITED (status))
1318  {
1319  int exit_code = WEXITSTATUS (status);
1320 
1323  if (exit_code == 0)
1324  error (_("Unable to attach: program exited normally."));
1325  else
1326  error (_("Unable to attach: program exited with code %d."),
1327  exit_code);
1328  }
1329  else if (WIFSIGNALED (status))
1330  {
1331  enum gdb_signal signo;
1332 
1335 
1336  signo = gdb_signal_from_host (WTERMSIG (status));
1337  error (_("Unable to attach: program terminated with signal "
1338  "%s, %s."),
1339  gdb_signal_to_name (signo),
1340  gdb_signal_to_string (signo));
1341  }
1342 
1343  internal_error (__FILE__, __LINE__,
1344  _("unexpected status %d for PID %ld"),
1345  status, (long) ptid_get_lwp (ptid));
1346  }
1347 
1348  lp->stopped = 1;
1349 
1350  /* Save the wait status to report later. */
1351  lp->resumed = 1;
1352  if (debug_linux_nat)
1354  "LNA: waitpid %ld, saving status %s\n",
1355  (long) ptid_get_pid (lp->ptid), status_to_str (status));
1356 
1357  lp->status = status;
1358 
1359  /* We must attach to every LWP. If /proc is mounted, use that to
1360  find them now. The inferior may be using raw clone instead of
1361  using pthreads. But even if it is using pthreads, thread_db
1362  walks structures in the inferior's address space to find the list
1363  of threads/LWPs, and those structures may well be corrupted.
1364  Note that once thread_db is loaded, we'll still use it to list
1365  threads and associate pthread info with each LWP. */
1368 
1369  if (target_can_async_p ())
1370  target_async (1);
1371 }
1372 
1373 /* Get pending status of LP. */
1374 static int
1376 {
1377  enum gdb_signal signo = GDB_SIGNAL_0;
1378 
1379  /* If we paused threads momentarily, we may have stored pending
1380  events in lp->status or lp->waitstatus (see stop_wait_callback),
1381  and GDB core hasn't seen any signal for those threads.
1382  Otherwise, the last signal reported to the core is found in the
1383  thread object's stop_signal.
1384 
1385  There's a corner case that isn't handled here at present. Only
1386  if the thread stopped with a TARGET_WAITKIND_STOPPED does
1387  stop_signal make sense as a real signal to pass to the inferior.
1388  Some catchpoint related events, like
1389  TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1390  to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1391  those traps are debug API (ptrace in our case) related and
1392  induced; the inferior wouldn't see them if it wasn't being
1393  traced. Hence, we should never pass them to the inferior, even
1394  when set to pass state. Since this corner case isn't handled by
1395  infrun.c when proceeding with a signal, for consistency, neither
1396  do we handle it here (or elsewhere in the file we check for
1397  signal pass state). Normally SIGTRAP isn't set to pass state, so
1398  this is really a corner case. */
1399 
1401  signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1402  else if (lp->status)
1403  signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1404  else if (non_stop && !is_executing (lp->ptid))
1405  {
1406  struct thread_info *tp = find_thread_ptid (lp->ptid);
1407 
1408  signo = tp->suspend.stop_signal;
1409  }
1410  else if (!non_stop)
1411  {
1412  struct target_waitstatus last;
1413  ptid_t last_ptid;
1414 
1415  get_last_target_status (&last_ptid, &last);
1416 
1417  if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1418  {
1419  struct thread_info *tp = find_thread_ptid (lp->ptid);
1420 
1421  signo = tp->suspend.stop_signal;
1422  }
1423  }
1424 
1425  *status = 0;
1426 
1427  if (signo == GDB_SIGNAL_0)
1428  {
1429  if (debug_linux_nat)
1431  "GPT: lwp %s has no pending signal\n",
1432  target_pid_to_str (lp->ptid));
1433  }
1434  else if (!signal_pass_state (signo))
1435  {
1436  if (debug_linux_nat)
1438  "GPT: lwp %s had signal %s, "
1439  "but it is in no pass state\n",
1440  target_pid_to_str (lp->ptid),
1441  gdb_signal_to_string (signo));
1442  }
1443  else
1444  {
1445  *status = W_STOPCODE (gdb_signal_to_host (signo));
1446 
1447  if (debug_linux_nat)
1449  "GPT: lwp %s has pending signal %s\n",
1450  target_pid_to_str (lp->ptid),
1451  gdb_signal_to_string (signo));
1452  }
1453 
1454  return 0;
1455 }
1456 
1457 static int
1458 detach_callback (struct lwp_info *lp, void *data)
1459 {
1460  gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1461 
1462  if (debug_linux_nat && lp->status)
1463  fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1464  strsignal (WSTOPSIG (lp->status)),
1465  target_pid_to_str (lp->ptid));
1466 
1467  /* If there is a pending SIGSTOP, get rid of it. */
1468  if (lp->signalled)
1469  {
1470  if (debug_linux_nat)
1472  "DC: Sending SIGCONT to %s\n",
1473  target_pid_to_str (lp->ptid));
1474 
1475  kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1476  lp->signalled = 0;
1477  }
1478 
1479  /* We don't actually detach from the LWP that has an id equal to the
1480  overall process id just yet. */
1481  if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1482  {
1483  int status = 0;
1484 
1485  /* Pass on any pending signal for this LWP. */
1486  get_pending_status (lp, &status);
1487 
1488  if (linux_nat_prepare_to_resume != NULL)
1490  errno = 0;
1491  if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1492  WSTOPSIG (status)) < 0)
1493  error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1494  safe_strerror (errno));
1495 
1496  if (debug_linux_nat)
1498  "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1499  target_pid_to_str (lp->ptid),
1500  strsignal (WSTOPSIG (status)));
1501 
1502  delete_lwp (lp->ptid);
1503  }
1504 
1505  return 0;
1506 }
1507 
1508 static void
1509 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1510 {
1511  int pid;
1512  int status;
1513  struct lwp_info *main_lwp;
1514 
1515  pid = ptid_get_pid (inferior_ptid);
1516 
1517  /* Don't unregister from the event loop, as there may be other
1518  inferiors running. */
1519 
1520  /* Stop all threads before detaching. ptrace requires that the
1521  thread is stopped to sucessfully detach. */
1523  /* ... and wait until all of them have reported back that
1524  they're no longer running. */
1526 
1528 
1529  /* Only the initial process should be left right now. */
1531 
1532  main_lwp = find_lwp_pid (pid_to_ptid (pid));
1533 
1534  /* Pass on any pending signal for the last LWP. */
1535  if ((args == NULL || *args == '\0')
1536  && get_pending_status (main_lwp, &status) != -1
1537  && WIFSTOPPED (status))
1538  {
1539  char *tem;
1540 
1541  /* Put the signal number in ARGS so that inf_ptrace_detach will
1542  pass it along with PTRACE_DETACH. */
1543  tem = alloca (8);
1544  xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1545  args = tem;
1546  if (debug_linux_nat)
1548  "LND: Sending signal %s to %s\n",
1549  args,
1550  target_pid_to_str (main_lwp->ptid));
1551  }
1552 
1553  if (linux_nat_prepare_to_resume != NULL)
1554  linux_nat_prepare_to_resume (main_lwp);
1555  delete_lwp (main_lwp->ptid);
1556 
1557  if (forks_exist_p ())
1558  {
1559  /* Multi-fork case. The current inferior_ptid is being detached
1560  from, but there are other viable forks to debug. Detach from
1561  the current fork, and context-switch to the first
1562  available. */
1563  linux_fork_detach (args, from_tty);
1564  }
1565  else
1566  linux_ops->to_detach (ops, args, from_tty);
1567 }
1568 
1569 /* Resume execution of the inferior process. If STEP is nonzero,
1570  single-step it. If SIGNAL is nonzero, give it that signal. */
1571 
1572 static void
1574  enum gdb_signal signo)
1575 {
1576  lp->step = step;
1577 
1578  /* stop_pc doubles as the PC the LWP had when it was last resumed.
1579  We only presently need that if the LWP is stepped though (to
1580  handle the case of stepping a breakpoint instruction). */
1581  if (step)
1582  {
1583  struct regcache *regcache = get_thread_regcache (lp->ptid);
1584 
1585  lp->stop_pc = regcache_read_pc (regcache);
1586  }
1587  else
1588  lp->stop_pc = 0;
1589 
1590  if (linux_nat_prepare_to_resume != NULL)
1592  linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
1593 
1594  /* Successfully resumed. Clear state that no longer makes sense,
1595  and mark the LWP as running. Must not do this before resuming
1596  otherwise if that fails other code will be confused. E.g., we'd
1597  later try to stop the LWP and hang forever waiting for a stop
1598  status. Note that we must not throw after this is cleared,
1599  otherwise handle_zombie_lwp_error would get confused. */
1600  lp->stopped = 0;
1603 }
1604 
1605 /* Called when we try to resume a stopped LWP and that errors out. If
1606  the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1607  or about to become), discard the error, clear any pending status
1608  the LWP may have, and return true (we'll collect the exit status
1609  soon enough). Otherwise, return false. */
1610 
1611 static int
1613 {
1614  /* If we get an error after resuming the LWP successfully, we'd
1615  confuse !T state for the LWP being gone. */
1616  gdb_assert (lp->stopped);
1617 
1618  /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1619  because even if ptrace failed with ESRCH, the tracee may be "not
1620  yet fully dead", but already refusing ptrace requests. In that
1621  case the tracee has 'R (Running)' state for a little bit
1622  (observed in Linux 3.18). See also the note on ESRCH in the
1623  ptrace(2) man page. Instead, check whether the LWP has any state
1624  other than ptrace-stopped. */
1625 
1626  /* Don't assume anything if /proc/PID/status can't be read. */
1628  {
1630  lp->status = 0;
1632  return 1;
1633  }
1634  return 0;
1635 }
1636 
1637 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1638  disappears while we try to resume it. */
1639 
1640 static void
1641 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1642 {
1643  TRY
1644  {
1645  linux_resume_one_lwp_throw (lp, step, signo);
1646  }
1647  CATCH (ex, RETURN_MASK_ERROR)
1648  {
1650  throw_exception (ex);
1651  }
1652  END_CATCH
1653 }
1654 
1655 /* Resume LP. */
1656 
1657 static void
1658 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1659 {
1660  if (lp->stopped)
1661  {
1662  struct inferior *inf = find_inferior_ptid (lp->ptid);
1663 
1664  if (inf->vfork_child != NULL)
1665  {
1666  if (debug_linux_nat)
1668  "RC: Not resuming %s (vfork parent)\n",
1669  target_pid_to_str (lp->ptid));
1670  }
1671  else if (!lwp_status_pending_p (lp))
1672  {
1673  if (debug_linux_nat)
1675  "RC: Resuming sibling %s, %s, %s\n",
1676  target_pid_to_str (lp->ptid),
1677  (signo != GDB_SIGNAL_0
1678  ? strsignal (gdb_signal_to_host (signo))
1679  : "0"),
1680  step ? "step" : "resume");
1681 
1682  linux_resume_one_lwp (lp, step, signo);
1683  }
1684  else
1685  {
1686  if (debug_linux_nat)
1688  "RC: Not resuming sibling %s (has pending)\n",
1689  target_pid_to_str (lp->ptid));
1690  }
1691  }
1692  else
1693  {
1694  if (debug_linux_nat)
1696  "RC: Not resuming sibling %s (not stopped)\n",
1697  target_pid_to_str (lp->ptid));
1698  }
1699 }
1700 
1701 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1702  Resume LWP with the last stop signal, if it is in pass state. */
1703 
1704 static int
1705 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1706 {
1707  enum gdb_signal signo = GDB_SIGNAL_0;
1708 
1709  if (lp == except)
1710  return 0;
1711 
1712  if (lp->stopped)
1713  {
1714  struct thread_info *thread;
1715 
1716  thread = find_thread_ptid (lp->ptid);
1717  if (thread != NULL)
1718  {
1719  signo = thread->suspend.stop_signal;
1720  thread->suspend.stop_signal = GDB_SIGNAL_0;
1721  }
1722  }
1723 
1724  resume_lwp (lp, 0, signo);
1725  return 0;
1726 }
1727 
1728 static int
1729 resume_clear_callback (struct lwp_info *lp, void *data)
1730 {
1731  lp->resumed = 0;
1733  return 0;
1734 }
1735 
1736 static int
1737 resume_set_callback (struct lwp_info *lp, void *data)
1738 {
1739  lp->resumed = 1;
1741  return 0;
1742 }
1743 
1744 static void
1746  ptid_t ptid, int step, enum gdb_signal signo)
1747 {
1748  struct lwp_info *lp;
1749  int resume_many;
1750 
1751  if (debug_linux_nat)
1753  "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1754  step ? "step" : "resume",
1755  target_pid_to_str (ptid),
1756  (signo != GDB_SIGNAL_0
1757  ? strsignal (gdb_signal_to_host (signo)) : "0"),
1759 
1760  /* A specific PTID means `step only this process id'. */
1761  resume_many = (ptid_equal (minus_one_ptid, ptid)
1762  || ptid_is_pid (ptid));
1763 
1764  /* Mark the lwps we're resuming as resumed. */
1765  iterate_over_lwps (ptid, resume_set_callback, NULL);
1766 
1767  /* See if it's the current inferior that should be handled
1768  specially. */
1769  if (resume_many)
1770  lp = find_lwp_pid (inferior_ptid);
1771  else
1772  lp = find_lwp_pid (ptid);
1773  gdb_assert (lp != NULL);
1774 
1775  /* Remember if we're stepping. */
1777 
1778  /* If we have a pending wait status for this thread, there is no
1779  point in resuming the process. But first make sure that
1780  linux_nat_wait won't preemptively handle the event - we
1781  should never take this short-circuit if we are going to
1782  leave LP running, since we have skipped resuming all the
1783  other threads. This bit of code needs to be synchronized
1784  with linux_nat_wait. */
1785 
1786  if (lp->status && WIFSTOPPED (lp->status))
1787  {
1788  if (!lp->step
1789  && WSTOPSIG (lp->status)
1790  && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1791  {
1792  if (debug_linux_nat)
1794  "LLR: Not short circuiting for ignored "
1795  "status 0x%x\n", lp->status);
1796 
1797  /* FIXME: What should we do if we are supposed to continue
1798  this thread with a signal? */
1799  gdb_assert (signo == GDB_SIGNAL_0);
1800  signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1801  lp->status = 0;
1802  }
1803  }
1804 
1805  if (lwp_status_pending_p (lp))
1806  {
1807  /* FIXME: What should we do if we are supposed to continue
1808  this thread with a signal? */
1809  gdb_assert (signo == GDB_SIGNAL_0);
1810 
1811  if (debug_linux_nat)
1813  "LLR: Short circuiting for status 0x%x\n",
1814  lp->status);
1815 
1816  if (target_can_async_p ())
1817  {
1818  target_async (1);
1819  /* Tell the event loop we have something to process. */
1820  async_file_mark ();
1821  }
1822  return;
1823  }
1824 
1825  if (resume_many)
1827 
1828  if (debug_linux_nat)
1830  "LLR: %s %s, %s (resume event thread)\n",
1831  step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1832  target_pid_to_str (lp->ptid),
1833  (signo != GDB_SIGNAL_0
1834  ? strsignal (gdb_signal_to_host (signo)) : "0"));
1835 
1836  linux_resume_one_lwp (lp, step, signo);
1837 
1838  if (target_can_async_p ())
1839  target_async (1);
1840 }
1841 
1842 /* Send a signal to an LWP. */
1843 
1844 static int
1845 kill_lwp (int lwpid, int signo)
1846 {
1847  /* Use tkill, if possible, in case we are using nptl threads. If tkill
1848  fails, then we are not using nptl threads and we should be using kill. */
1849 
1850 #ifdef HAVE_TKILL_SYSCALL
1851  {
1852  static int tkill_failed;
1853 
1854  if (!tkill_failed)
1855  {
1856  int ret;
1857 
1858  errno = 0;
1859  ret = syscall (__NR_tkill, lwpid, signo);
1860  if (errno != ENOSYS)
1861  return ret;
1862  tkill_failed = 1;
1863  }
1864  }
1865 #endif
1866 
1867  return kill (lwpid, signo);
1868 }
1869 
1870 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1871  event, check if the core is interested in it: if not, ignore the
1872  event, and keep waiting; otherwise, we need to toggle the LWP's
1873  syscall entry/exit status, since the ptrace event itself doesn't
1874  indicate it, and report the trap to higher layers. */
1875 
1876 static int
1877 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1878 {
1879  struct target_waitstatus *ourstatus = &lp->waitstatus;
1881  int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1882 
1883  if (stopping)
1884  {
1885  /* If we're stopping threads, there's a SIGSTOP pending, which
1886  makes it so that the LWP reports an immediate syscall return,
1887  followed by the SIGSTOP. Skip seeing that "return" using
1888  PTRACE_CONT directly, and let stop_wait_callback collect the
1889  SIGSTOP. Later when the thread is resumed, a new syscall
1890  entry event. If we didn't do this (and returned 0), we'd
1891  leave a syscall entry pending, and our caller, by using
1892  PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1893  itself. Later, when the user re-resumes this LWP, we'd see
1894  another syscall entry event and we'd mistake it for a return.
1895 
1896  If stop_wait_callback didn't force the SIGSTOP out of the LWP
1897  (leaving immediately with LWP->signalled set, without issuing
1898  a PTRACE_CONT), it would still be problematic to leave this
1899  syscall enter pending, as later when the thread is resumed,
1900  it would then see the same syscall exit mentioned above,
1901  followed by the delayed SIGSTOP, while the syscall didn't
1902  actually get to execute. It seems it would be even more
1903  confusing to the user. */
1904 
1905  if (debug_linux_nat)
1907  "LHST: ignoring syscall %d "
1908  "for LWP %ld (stopping threads), "
1909  "resuming with PTRACE_CONT for SIGSTOP\n",
1910  syscall_number,
1911  ptid_get_lwp (lp->ptid));
1912 
1914  ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1915  lp->stopped = 0;
1916  return 1;
1917  }
1918 
1919  if (catch_syscall_enabled ())
1920  {
1921  /* Always update the entry/return state, even if this particular
1922  syscall isn't interesting to the core now. In async mode,
1923  the user could install a new catchpoint for this syscall
1924  between syscall enter/return, and we'll need to know to
1925  report a syscall return if that happens. */
1929 
1930  if (catching_syscall_number (syscall_number))
1931  {
1932  /* Alright, an event to report. */
1933  ourstatus->kind = lp->syscall_state;
1934  ourstatus->value.syscall_number = syscall_number;
1935 
1936  if (debug_linux_nat)
1938  "LHST: stopping for %s of syscall %d"
1939  " for LWP %ld\n",
1940  lp->syscall_state
1942  ? "entry" : "return",
1943  syscall_number,
1944  ptid_get_lwp (lp->ptid));
1945  return 0;
1946  }
1947 
1948  if (debug_linux_nat)
1950  "LHST: ignoring %s of syscall %d "
1951  "for LWP %ld\n",
1953  ? "entry" : "return",
1954  syscall_number,
1955  ptid_get_lwp (lp->ptid));
1956  }
1957  else
1958  {
1959  /* If we had been syscall tracing, and hence used PT_SYSCALL
1960  before on this LWP, it could happen that the user removes all
1961  syscall catchpoints before we get to process this event.
1962  There are two noteworthy issues here:
1963 
1964  - When stopped at a syscall entry event, resuming with
1965  PT_STEP still resumes executing the syscall and reports a
1966  syscall return.
1967 
1968  - Only PT_SYSCALL catches syscall enters. If we last
1969  single-stepped this thread, then this event can't be a
1970  syscall enter. If we last single-stepped this thread, this
1971  has to be a syscall exit.
1972 
1973  The points above mean that the next resume, be it PT_STEP or
1974  PT_CONTINUE, can not trigger a syscall trace event. */
1975  if (debug_linux_nat)
1977  "LHST: caught syscall event "
1978  "with no syscall catchpoints."
1979  " %d for LWP %ld, ignoring\n",
1980  syscall_number,
1981  ptid_get_lwp (lp->ptid));
1983  }
1984 
1985  /* The core isn't interested in this event. For efficiency, avoid
1986  stopping all threads only to have the core resume them all again.
1987  Since we're not stopping threads, if we're still syscall tracing
1988  and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1989  subsequent syscall. Simply resume using the inf-ptrace layer,
1990  which knows when to use PT_SYSCALL or PT_CONTINUE. */
1991 
1992  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1993  return 1;
1994 }
1995 
1996 /* Handle a GNU/Linux extended wait response. If we see a clone
1997  event, we need to add the new LWP to our list (and not report the
1998  trap to higher layers). This function returns non-zero if the
1999  event should be ignored and we should wait again. If STOPPING is
2000  true, the new LWP remains stopped, otherwise it is continued. */
2001 
2002 static int
2004 {
2005  int pid = ptid_get_lwp (lp->ptid);
2006  struct target_waitstatus *ourstatus = &lp->waitstatus;
2007  int event = linux_ptrace_get_extended_event (status);
2008 
2009  if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2010  || event == PTRACE_EVENT_CLONE)
2011  {
2012  unsigned long new_pid;
2013  int ret;
2014 
2015  ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2016 
2017  /* If we haven't already seen the new PID stop, wait for it now. */
2018  if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2019  {
2020  /* The new child has a pending SIGSTOP. We can't affect it until it
2021  hits the SIGSTOP, but we're already attached. */
2022  ret = my_waitpid (new_pid, &status,
2023  (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2024  if (ret == -1)
2025  perror_with_name (_("waiting for new child"));
2026  else if (ret != new_pid)
2027  internal_error (__FILE__, __LINE__,
2028  _("wait returned unexpected PID %d"), ret);
2029  else if (!WIFSTOPPED (status))
2030  internal_error (__FILE__, __LINE__,
2031  _("wait returned unexpected status 0x%x"), status);
2032  }
2033 
2034  ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2035 
2036  if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2037  {
2038  /* The arch-specific native code may need to know about new
2039  forks even if those end up never mapped to an
2040  inferior. */
2041  if (linux_nat_new_fork != NULL)
2042  linux_nat_new_fork (lp, new_pid);
2043  }
2044 
2045  if (event == PTRACE_EVENT_FORK
2047  {
2048  /* Handle checkpointing by linux-fork.c here as a special
2049  case. We don't want the follow-fork-mode or 'catch fork'
2050  to interfere with this. */
2051 
2052  /* This won't actually modify the breakpoint list, but will
2053  physically remove the breakpoints from the child. */
2054  detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2055 
2056  /* Retain child fork in ptrace (stopped) state. */
2057  if (!find_fork_pid (new_pid))
2058  add_fork (new_pid);
2059 
2060  /* Report as spurious, so that infrun doesn't want to follow
2061  this fork. We're actually doing an infcall in
2062  linux-fork.c. */
2063  ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2064 
2065  /* Report the stop to the core. */
2066  return 0;
2067  }
2068 
2069  if (event == PTRACE_EVENT_FORK)
2070  ourstatus->kind = TARGET_WAITKIND_FORKED;
2071  else if (event == PTRACE_EVENT_VFORK)
2072  ourstatus->kind = TARGET_WAITKIND_VFORKED;
2073  else if (event == PTRACE_EVENT_CLONE)
2074  {
2075  struct lwp_info *new_lp;
2076 
2077  ourstatus->kind = TARGET_WAITKIND_IGNORE;
2078 
2079  if (debug_linux_nat)
2081  "LHEW: Got clone event "
2082  "from LWP %d, new child is LWP %ld\n",
2083  pid, new_pid);
2084 
2085  new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
2086  new_lp->cloned = 1;
2087  new_lp->stopped = 1;
2088  new_lp->resumed = 1;
2089 
2090  /* If the thread_db layer is active, let it record the user
2091  level thread id and status, and add the thread to GDB's
2092  list. */
2093  if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
2094  {
2095  /* The process is not using thread_db. Add the LWP to
2096  GDB's list. */
2097  target_post_attach (ptid_get_lwp (new_lp->ptid));
2098  add_thread (new_lp->ptid);
2099  }
2100 
2101  /* Even if we're stopping the thread for some reason
2102  internal to this module, from the perspective of infrun
2103  and the user/frontend, this new thread is running until
2104  it next reports a stop. */
2105  set_running (new_lp->ptid, 1);
2106  set_executing (new_lp->ptid, 1);
2107 
2108  if (WSTOPSIG (status) != SIGSTOP)
2109  {
2110  /* This can happen if someone starts sending signals to
2111  the new thread before it gets a chance to run, which
2112  have a lower number than SIGSTOP (e.g. SIGUSR1).
2113  This is an unlikely case, and harder to handle for
2114  fork / vfork than for clone, so we do not try - but
2115  we handle it for clone events here. */
2116 
2117  new_lp->signalled = 1;
2118 
2119  /* We created NEW_LP so it cannot yet contain STATUS. */
2120  gdb_assert (new_lp->status == 0);
2121 
2122  /* Save the wait status to report later. */
2123  if (debug_linux_nat)
2125  "LHEW: waitpid of new LWP %ld, "
2126  "saving status %s\n",
2127  (long) ptid_get_lwp (new_lp->ptid),
2128  status_to_str (status));
2129  new_lp->status = status;
2130  }
2131 
2132  return 1;
2133  }
2134 
2135  return 0;
2136  }
2137 
2138  if (event == PTRACE_EVENT_EXEC)
2139  {
2140  if (debug_linux_nat)
2142  "LHEW: Got exec event from LWP %ld\n",
2143  ptid_get_lwp (lp->ptid));
2144 
2145  ourstatus->kind = TARGET_WAITKIND_EXECD;
2146  ourstatus->value.execd_pathname
2147  = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2148 
2149  /* The thread that execed must have been resumed, but, when a
2150  thread execs, it changes its tid to the tgid, and the old
2151  tgid thread might have not been resumed. */
2152  lp->resumed = 1;
2153  return 0;
2154  }
2155 
2156  if (event == PTRACE_EVENT_VFORK_DONE)
2157  {
2158  if (current_inferior ()->waiting_for_vfork_done)
2159  {
2160  if (debug_linux_nat)
2162  "LHEW: Got expected PTRACE_EVENT_"
2163  "VFORK_DONE from LWP %ld: stopping\n",
2164  ptid_get_lwp (lp->ptid));
2165 
2166  ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2167  return 0;
2168  }
2169 
2170  if (debug_linux_nat)
2172  "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2173  "from LWP %ld: ignoring\n",
2174  ptid_get_lwp (lp->ptid));
2175  return 1;
2176  }
2177 
2178  internal_error (__FILE__, __LINE__,
2179  _("unknown ptrace event %d"), event);
2180 }
2181 
2182 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2183  exited. */
2184 
2185 static int
2186 wait_lwp (struct lwp_info *lp)
2187 {
2188  pid_t pid;
2189  int status = 0;
2190  int thread_dead = 0;
2191  sigset_t prev_mask;
2192 
2193  gdb_assert (!lp->stopped);
2194  gdb_assert (lp->status == 0);
2195 
2196  /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2197  block_child_signals (&prev_mask);
2198 
2199  for (;;)
2200  {
2201  /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2202  was right and we should just call sigsuspend. */
2203 
2204  pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2205  if (pid == -1 && errno == ECHILD)
2206  pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2207  if (pid == -1 && errno == ECHILD)
2208  {
2209  /* The thread has previously exited. We need to delete it
2210  now because, for some vendor 2.4 kernels with NPTL
2211  support backported, there won't be an exit event unless
2212  it is the main thread. 2.6 kernels will report an exit
2213  event for each thread that exits, as expected. */
2214  thread_dead = 1;
2215  if (debug_linux_nat)
2216  fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2217  target_pid_to_str (lp->ptid));
2218  }
2219  if (pid != 0)
2220  break;
2221 
2222  /* Bugs 10970, 12702.
2223  Thread group leader may have exited in which case we'll lock up in
2224  waitpid if there are other threads, even if they are all zombies too.
2225  Basically, we're not supposed to use waitpid this way.
2226  __WCLONE is not applicable for the leader so we can't use that.
2227  LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2228  process; it gets ESRCH both for the zombie and for running processes.
2229 
2230  As a workaround, check if we're waiting for the thread group leader and
2231  if it's a zombie, and avoid calling waitpid if it is.
2232 
2233  This is racy, what if the tgl becomes a zombie right after we check?
2234  Therefore always use WNOHANG with sigsuspend - it is equivalent to
2235  waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2236 
2237  if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2239  {
2240  thread_dead = 1;
2241  if (debug_linux_nat)
2243  "WL: Thread group leader %s vanished.\n",
2244  target_pid_to_str (lp->ptid));
2245  break;
2246  }
2247 
2248  /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2249  get invoked despite our caller had them intentionally blocked by
2250  block_child_signals. This is sensitive only to the loop of
2251  linux_nat_wait_1 and there if we get called my_waitpid gets called
2252  again before it gets to sigsuspend so we can safely let the handlers
2253  get executed here. */
2254 
2255  if (debug_linux_nat)
2256  fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2257  sigsuspend (&suspend_mask);
2258  }
2259 
2260  restore_child_signals_mask (&prev_mask);
2261 
2262  if (!thread_dead)
2263  {
2264  gdb_assert (pid == ptid_get_lwp (lp->ptid));
2265 
2266  if (debug_linux_nat)
2267  {
2269  "WL: waitpid %s received %s\n",
2270  target_pid_to_str (lp->ptid),
2271  status_to_str (status));
2272  }
2273 
2274  /* Check if the thread has exited. */
2275  if (WIFEXITED (status) || WIFSIGNALED (status))
2276  {
2277  if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2278  {
2279  if (debug_linux_nat)
2280  fprintf_unfiltered (gdb_stdlog, "WL: Process %d exited.\n",
2281  ptid_get_pid (lp->ptid));
2282 
2283  /* This is the leader exiting, it means the whole
2284  process is gone. Store the status to report to the
2285  core. Store it in lp->waitstatus, because lp->status
2286  would be ambiguous (W_EXITCODE(0,0) == 0). */
2287  store_waitstatus (&lp->waitstatus, status);
2288  return 0;
2289  }
2290 
2291  thread_dead = 1;
2292  if (debug_linux_nat)
2293  fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2294  target_pid_to_str (lp->ptid));
2295  }
2296  }
2297 
2298  if (thread_dead)
2299  {
2300  exit_lwp (lp);
2301  return 0;
2302  }
2303 
2304  gdb_assert (WIFSTOPPED (status));
2305  lp->stopped = 1;
2306 
2307  if (lp->must_set_ptrace_flags)
2308  {
2309  struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2310  int options = linux_nat_ptrace_options (inf->attach_flag);
2311 
2313  lp->must_set_ptrace_flags = 0;
2314  }
2315 
2316  /* Handle GNU/Linux's syscall SIGTRAPs. */
2317  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2318  {
2319  /* No longer need the sysgood bit. The ptrace event ends up
2320  recorded in lp->waitstatus if we care for it. We can carry
2321  on handling the event like a regular SIGTRAP from here
2322  on. */
2323  status = W_STOPCODE (SIGTRAP);
2324  if (linux_handle_syscall_trap (lp, 1))
2325  return wait_lwp (lp);
2326  }
2327 
2328  /* Handle GNU/Linux's extended waitstatus for trace events. */
2329  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2330  && linux_is_extended_waitstatus (status))
2331  {
2332  if (debug_linux_nat)
2334  "WL: Handling extended status 0x%06x\n",
2335  status);
2336  linux_handle_extended_wait (lp, status);
2337  return 0;
2338  }
2339 
2340  return status;
2341 }
2342 
2343 /* Send a SIGSTOP to LP. */
2344 
2345 static int
2346 stop_callback (struct lwp_info *lp, void *data)
2347 {
2348  if (!lp->stopped && !lp->signalled)
2349  {
2350  int ret;
2351 
2352  if (debug_linux_nat)
2353  {
2355  "SC: kill %s **<SIGSTOP>**\n",
2356  target_pid_to_str (lp->ptid));
2357  }
2358  errno = 0;
2359  ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2360  if (debug_linux_nat)
2361  {
2363  "SC: lwp kill %d %s\n",
2364  ret,
2365  errno ? safe_strerror (errno) : "ERRNO-OK");
2366  }
2367 
2368  lp->signalled = 1;
2369  gdb_assert (lp->status == 0);
2370  }
2371 
2372  return 0;
2373 }
2374 
2375 /* Request a stop on LWP. */
2376 
2377 void
2379 {
2380  stop_callback (lwp, NULL);
2381 }
2382 
2383 /* See linux-nat.h */
2384 
2385 void
2387 {
2388  /* Stop all LWP's ... */
2390 
2391  /* ... and wait until all of them have reported back that
2392  they're no longer running. */
2394 }
2395 
2396 /* See linux-nat.h */
2397 
2398 void
2400 {
2403 }
2404 
2405 /* Return non-zero if LWP PID has a pending SIGINT. */
2406 
2407 static int
2409 {
2410  sigset_t pending, blocked, ignored;
2411 
2412  linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2413 
2414  if (sigismember (&pending, SIGINT)
2415  && !sigismember (&ignored, SIGINT))
2416  return 1;
2417 
2418  return 0;
2419 }
2420 
2421 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2422 
2423 static int
2424 set_ignore_sigint (struct lwp_info *lp, void *data)
2425 {
2426  /* If a thread has a pending SIGINT, consume it; otherwise, set a
2427  flag to consume the next one. */
2428  if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2429  && WSTOPSIG (lp->status) == SIGINT)
2430  lp->status = 0;
2431  else
2432  lp->ignore_sigint = 1;
2433 
2434  return 0;
2435 }
2436 
2437 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2438  This function is called after we know the LWP has stopped; if the LWP
2439  stopped before the expected SIGINT was delivered, then it will never have
2440  arrived. Also, if the signal was delivered to a shared queue and consumed
2441  by a different thread, it will never be delivered to this LWP. */
2442 
2443 static void
2445 {
2446  if (!lp->ignore_sigint)
2447  return;
2448 
2450  {
2451  if (debug_linux_nat)
2453  "MCIS: Clearing bogus flag for %s\n",
2454  target_pid_to_str (lp->ptid));
2455  lp->ignore_sigint = 0;
2456  }
2457 }
2458 
2459 /* Fetch the possible triggered data watchpoint info and store it in
2460  LP.
2461 
2462  On some archs, like x86, that use debug registers to set
2463  watchpoints, it's possible that the way to know which watched
2464  address trapped, is to check the register that is used to select
2465  which address to watch. Problem is, between setting the watchpoint
2466  and reading back which data address trapped, the user may change
2467  the set of watchpoints, and, as a consequence, GDB changes the
2468  debug registers in the inferior. To avoid reading back a stale
2469  stopped-data-address when that happens, we cache in LP the fact
2470  that a watchpoint trapped, and the corresponding data address, as
2471  soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2472  registers meanwhile, we have the cached data we can rely on. */
2473 
2474 static int
2476 {
2477  struct cleanup *old_chain;
2478 
2479  if (linux_ops->to_stopped_by_watchpoint == NULL)
2480  return 0;
2481 
2482  old_chain = save_inferior_ptid ();
2483  inferior_ptid = lp->ptid;
2484 
2485  if (linux_ops->to_stopped_by_watchpoint (linux_ops))
2486  {
2488 
2489  if (linux_ops->to_stopped_data_address != NULL)
2492  &lp->stopped_data_address);
2493  else
2494  lp->stopped_data_address_p = 0;
2495  }
2496 
2497  do_cleanups (old_chain);
2498 
2500 }
2501 
2502 /* Called when the LWP stopped for a trap that could be explained by a
2503  watchpoint or a breakpoint. */
2504 
2505 static void
2507 {
2509  gdb_assert (lp->status != 0);
2510 
2511  /* Check first if this was a SW/HW breakpoint before checking
2512  watchpoints, because at least s390 can't tell the data address of
2513  hardware watchpoint hits, and the kernel returns
2514  stopped-by-watchpoint as long as there's a watchpoint set. */
2517 
2518  /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2519  or hardware watchpoint. Check which is which if we got
2520  TARGET_STOPPED_BY_HW_BREAKPOINT. */
2524 }
2525 
2526 /* Returns true if the LWP had stopped for a watchpoint. */
2527 
2528 static int
2530 {
2531  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2532 
2533  gdb_assert (lp != NULL);
2534 
2536 }
2537 
2538 static int
2540 {
2541  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2542 
2543  gdb_assert (lp != NULL);
2544 
2545  *addr_p = lp->stopped_data_address;
2546 
2547  return lp->stopped_data_address_p;
2548 }
2549 
2550 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2551 
2552 static int
2554 {
2555  return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2556 }
2557 
2558 /* Set alternative SIGTRAP-like events recognizer. If
2559  breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2560  applied. */
2561 
2562 void
2564  int (*status_is_event) (int status))
2565 {
2566  linux_nat_status_is_event = status_is_event;
2567 }
2568 
2569 /* Wait until LP is stopped. */
2570 
2571 static int
2572 stop_wait_callback (struct lwp_info *lp, void *data)
2573 {
2574  struct inferior *inf = find_inferior_ptid (lp->ptid);
2575 
2576  /* If this is a vfork parent, bail out, it is not going to report
2577  any SIGSTOP until the vfork is done with. */
2578  if (inf->vfork_child != NULL)
2579  return 0;
2580 
2581  if (!lp->stopped)
2582  {
2583  int status;
2584 
2585  status = wait_lwp (lp);
2586  if (status == 0)
2587  return 0;
2588 
2589  if (lp->ignore_sigint && WIFSTOPPED (status)
2590  && WSTOPSIG (status) == SIGINT)
2591  {
2592  lp->ignore_sigint = 0;
2593 
2594  errno = 0;
2595  ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2596  lp->stopped = 0;
2597  if (debug_linux_nat)
2599  "PTRACE_CONT %s, 0, 0 (%s) "
2600  "(discarding SIGINT)\n",
2601  target_pid_to_str (lp->ptid),
2602  errno ? safe_strerror (errno) : "OK");
2603 
2604  return stop_wait_callback (lp, NULL);
2605  }
2606 
2608 
2609  if (WSTOPSIG (status) != SIGSTOP)
2610  {
2611  /* The thread was stopped with a signal other than SIGSTOP. */
2612 
2613  if (debug_linux_nat)
2615  "SWC: Pending event %s in %s\n",
2616  status_to_str ((int) status),
2617  target_pid_to_str (lp->ptid));
2618 
2619  /* Save the sigtrap event. */
2620  lp->status = status;
2621  gdb_assert (lp->signalled);
2622  save_sigtrap (lp);
2623  }
2624  else
2625  {
2626  /* We caught the SIGSTOP that we intended to catch, so
2627  there's no SIGSTOP pending. */
2628 
2629  if (debug_linux_nat)
2631  "SWC: Expected SIGSTOP caught for %s.\n",
2632  target_pid_to_str (lp->ptid));
2633 
2634  /* Reset SIGNALLED only after the stop_wait_callback call
2635  above as it does gdb_assert on SIGNALLED. */
2636  lp->signalled = 0;
2637  }
2638  }
2639 
2640  return 0;
2641 }
2642 
2643 /* Return non-zero if LP has a wait status pending. Discard the
2644  pending event and resume the LWP if the event that originally
2645  caused the stop became uninteresting. */
2646 
2647 static int
2648 status_callback (struct lwp_info *lp, void *data)
2649 {
2650  /* Only report a pending wait status if we pretend that this has
2651  indeed been resumed. */
2652  if (!lp->resumed)
2653  return 0;
2654 
2655  if (!lwp_status_pending_p (lp))
2656  return 0;
2657 
2660  {
2661  struct regcache *regcache = get_thread_regcache (lp->ptid);
2662  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2663  CORE_ADDR pc;
2664  int discard = 0;
2665 
2666  pc = regcache_read_pc (regcache);
2667 
2668  if (pc != lp->stop_pc)
2669  {
2670  if (debug_linux_nat)
2672  "SC: PC of %s changed. was=%s, now=%s\n",
2673  target_pid_to_str (lp->ptid),
2674  paddress (target_gdbarch (), lp->stop_pc),
2675  paddress (target_gdbarch (), pc));
2676  discard = 1;
2677  }
2678 
2679 #if !USE_SIGTRAP_SIGINFO
2680  else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2681  {
2682  if (debug_linux_nat)
2684  "SC: previous breakpoint of %s, at %s gone\n",
2685  target_pid_to_str (lp->ptid),
2686  paddress (target_gdbarch (), lp->stop_pc));
2687 
2688  discard = 1;
2689  }
2690 #endif
2691 
2692  if (discard)
2693  {
2694  if (debug_linux_nat)
2696  "SC: pending event of %s cancelled.\n",
2697  target_pid_to_str (lp->ptid));
2698 
2699  lp->status = 0;
2700  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2701  return 0;
2702  }
2703  }
2704 
2705  return 1;
2706 }
2707 
2708 /* Return non-zero if LP isn't stopped. */
2709 
2710 static int
2711 running_callback (struct lwp_info *lp, void *data)
2712 {
2713  return (!lp->stopped
2714  || (lwp_status_pending_p (lp) && lp->resumed));
2715 }
2716 
2717 /* Count the LWP's that have had events. */
2718 
2719 static int
2721 {
2722  int *count = data;
2723 
2724  gdb_assert (count != NULL);
2725 
2726  /* Select only resumed LWPs that have an event pending. */
2727  if (lp->resumed && lwp_status_pending_p (lp))
2728  (*count)++;
2729 
2730  return 0;
2731 }
2732 
2733 /* Select the LWP (if any) that is currently being single-stepped. */
2734 
2735 static int
2737 {
2738  if (lp->last_resume_kind == resume_step
2739  && lp->status != 0)
2740  return 1;
2741  else
2742  return 0;
2743 }
2744 
2745 /* Returns true if LP has a status pending. */
2746 
2747 static int
2749 {
2750  /* We check for lp->waitstatus in addition to lp->status, because we
2751  can have pending process exits recorded in lp->status and
2752  W_EXITCODE(0,0) happens to be 0. */
2753  return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2754 }
2755 
2756 /* Select the Nth LWP that has had an event. */
2757 
2758 static int
2760 {
2761  int *selector = data;
2762 
2763  gdb_assert (selector != NULL);
2764 
2765  /* Select only resumed LWPs that have an event pending. */
2766  if (lp->resumed && lwp_status_pending_p (lp))
2767  if ((*selector)-- == 0)
2768  return 1;
2769 
2770  return 0;
2771 }
2772 
2773 /* Called when the LWP got a signal/trap that could be explained by a
2774  software or hardware breakpoint. */
2775 
2776 static int
2778 {
2779  /* Arrange for a breakpoint to be hit again later. We don't keep
2780  the SIGTRAP status and don't forward the SIGTRAP signal to the
2781  LWP. We will handle the current event, eventually we will resume
2782  this LWP, and this breakpoint will trap again.
2783 
2784  If we do not do this, then we run the risk that the user will
2785  delete or disable the breakpoint, but the LWP will have already
2786  tripped on it. */
2787 
2788  struct regcache *regcache = get_thread_regcache (lp->ptid);
2789  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2790  CORE_ADDR pc;
2791  CORE_ADDR sw_bp_pc;
2792 #if USE_SIGTRAP_SIGINFO
2793  siginfo_t siginfo;
2794 #endif
2795 
2796  pc = regcache_read_pc (regcache);
2797  sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2798 
2799 #if USE_SIGTRAP_SIGINFO
2800  if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2801  {
2802  if (siginfo.si_signo == SIGTRAP)
2803  {
2804  if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
2805  {
2806  if (debug_linux_nat)
2808  "CSBB: %s stopped by software "
2809  "breakpoint\n",
2810  target_pid_to_str (lp->ptid));
2811 
2812  /* Back up the PC if necessary. */
2813  if (pc != sw_bp_pc)
2814  regcache_write_pc (regcache, sw_bp_pc);
2815 
2816  lp->stop_pc = sw_bp_pc;
2818  return 1;
2819  }
2820  else if (siginfo.si_code == TRAP_HWBKPT)
2821  {
2822  if (debug_linux_nat)
2824  "CSBB: %s stopped by hardware "
2825  "breakpoint/watchpoint\n",
2826  target_pid_to_str (lp->ptid));
2827 
2828  lp->stop_pc = pc;
2830  return 1;
2831  }
2832  else if (siginfo.si_code == TRAP_TRACE)
2833  {
2834  if (debug_linux_nat)
2836  "CSBB: %s stopped by trace\n",
2837  target_pid_to_str (lp->ptid));
2838  }
2839  }
2840  }
2841 #else
2842  if ((!lp->step || lp->stop_pc == sw_bp_pc)
2844  sw_bp_pc))
2845  {
2846  /* The LWP was either continued, or stepped a software
2847  breakpoint instruction. */
2848  if (debug_linux_nat)
2850  "CSBB: %s stopped by software breakpoint\n",
2851  target_pid_to_str (lp->ptid));
2852 
2853  /* Back up the PC if necessary. */
2854  if (pc != sw_bp_pc)
2855  regcache_write_pc (regcache, sw_bp_pc);
2856 
2857  lp->stop_pc = sw_bp_pc;
2859  return 1;
2860  }
2861 
2863  {
2864  if (debug_linux_nat)
2866  "CSBB: stopped by hardware breakpoint %s\n",
2867  target_pid_to_str (lp->ptid));
2868 
2869  lp->stop_pc = pc;
2871  return 1;
2872  }
2873 #endif
2874 
2875  return 0;
2876 }
2877 
2878 
2879 /* Returns true if the LWP had stopped for a software breakpoint. */
2880 
2881 static int
2883 {
2884  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2885 
2886  gdb_assert (lp != NULL);
2887 
2889 }
2890 
2891 /* Implement the supports_stopped_by_sw_breakpoint method. */
2892 
2893 static int
2895 {
2896  return USE_SIGTRAP_SIGINFO;
2897 }
2898 
2899 /* Returns true if the LWP had stopped for a hardware
2900  breakpoint/watchpoint. */
2901 
2902 static int
2904 {
2905  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2906 
2907  gdb_assert (lp != NULL);
2908 
2910 }
2911 
2912 /* Implement the supports_stopped_by_hw_breakpoint method. */
2913 
2914 static int
2916 {
2917  return USE_SIGTRAP_SIGINFO;
2918 }
2919 
2920 /* Select one LWP out of those that have events pending. */
2921 
2922 static void
2923 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2924 {
2925  int num_events = 0;
2926  int random_selector;
2927  struct lwp_info *event_lp = NULL;
2928 
2929  /* Record the wait status for the original LWP. */
2930  (*orig_lp)->status = *status;
2931 
2932  /* In all-stop, give preference to the LWP that is being
2933  single-stepped. There will be at most one, and it will be the
2934  LWP that the core is most interested in. If we didn't do this,
2935  then we'd have to handle pending step SIGTRAPs somehow in case
2936  the core later continues the previously-stepped thread, as
2937  otherwise we'd report the pending SIGTRAP then, and the core, not
2938  having stepped the thread, wouldn't understand what the trap was
2939  for, and therefore would report it to the user as a random
2940  signal. */
2941  if (!non_stop)
2942  {
2943  event_lp = iterate_over_lwps (filter,
2945  if (event_lp != NULL)
2946  {
2947  if (debug_linux_nat)
2949  "SEL: Select single-step %s\n",
2950  target_pid_to_str (event_lp->ptid));
2951  }
2952  }
2953 
2954  if (event_lp == NULL)
2955  {
2956  /* Pick one at random, out of those which have had events. */
2957 
2958  /* First see how many events we have. */
2959  iterate_over_lwps (filter, count_events_callback, &num_events);
2960  gdb_assert (num_events > 0);
2961 
2962  /* Now randomly pick a LWP out of those that have had
2963  events. */
2964  random_selector = (int)
2965  ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2966 
2967  if (debug_linux_nat && num_events > 1)
2969  "SEL: Found %d events, selecting #%d\n",
2970  num_events, random_selector);
2971 
2972  event_lp = iterate_over_lwps (filter,
2974  &random_selector);
2975  }
2976 
2977  if (event_lp != NULL)
2978  {
2979  /* Switch the event LWP. */
2980  *orig_lp = event_lp;
2981  *status = event_lp->status;
2982  }
2983 
2984  /* Flush the wait status for the event LWP. */
2985  (*orig_lp)->status = 0;
2986 }
2987 
2988 /* Return non-zero if LP has been resumed. */
2989 
2990 static int
2991 resumed_callback (struct lwp_info *lp, void *data)
2992 {
2993  return lp->resumed;
2994 }
2995 
2996 /* Stop an active thread, verify it still exists, then resume it. If
2997  the thread ends up with a pending status, then it is not resumed,
2998  and *DATA (really a pointer to int), is set. */
2999 
3000 static int
3001 stop_and_resume_callback (struct lwp_info *lp, void *data)
3002 {
3003  if (!lp->stopped)
3004  {
3005  ptid_t ptid = lp->ptid;
3006 
3007  stop_callback (lp, NULL);
3008  stop_wait_callback (lp, NULL);
3009 
3010  /* Resume if the lwp still exists, and the core wanted it
3011  running. */
3012  lp = find_lwp_pid (ptid);
3013  if (lp != NULL)
3014  {
3015  if (lp->last_resume_kind == resume_stop
3016  && !lwp_status_pending_p (lp))
3017  {
3018  /* The core wanted the LWP to stop. Even if it stopped
3019  cleanly (with SIGSTOP), leave the event pending. */
3020  if (debug_linux_nat)
3022  "SARC: core wanted LWP %ld stopped "
3023  "(leaving SIGSTOP pending)\n",
3024  ptid_get_lwp (lp->ptid));
3025  lp->status = W_STOPCODE (SIGSTOP);
3026  }
3027 
3028  if (!lwp_status_pending_p (lp))
3029  {
3030  if (debug_linux_nat)
3032  "SARC: re-resuming LWP %ld\n",
3033  ptid_get_lwp (lp->ptid));
3034  resume_lwp (lp, lp->step, GDB_SIGNAL_0);
3035  }
3036  else
3037  {
3038  if (debug_linux_nat)
3040  "SARC: not re-resuming LWP %ld "
3041  "(has pending)\n",
3042  ptid_get_lwp (lp->ptid));
3043  }
3044  }
3045  }
3046  return 0;
3047 }
3048 
3049 /* Check if we should go on and pass this event to common code.
3050  Return the affected lwp if we are, or NULL otherwise. */
3051 
3052 static struct lwp_info *
3054 {
3055  struct lwp_info *lp;
3056  int event = linux_ptrace_get_extended_event (status);
3057 
3058  lp = find_lwp_pid (pid_to_ptid (lwpid));
3059 
3060  /* Check for stop events reported by a process we didn't already
3061  know about - anything not already in our LWP list.
3062 
3063  If we're expecting to receive stopped processes after
3064  fork, vfork, and clone events, then we'll just add the
3065  new one to our list and go back to waiting for the event
3066  to be reported - the stopped process might be returned
3067  from waitpid before or after the event is.
3068 
3069  But note the case of a non-leader thread exec'ing after the
3070  leader having exited, and gone from our lists. The non-leader
3071  thread changes its tid to the tgid. */
3072 
3073  if (WIFSTOPPED (status) && lp == NULL
3074  && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
3075  {
3076  /* A multi-thread exec after we had seen the leader exiting. */
3077  if (debug_linux_nat)
3079  "LLW: Re-adding thread group leader LWP %d.\n",
3080  lwpid);
3081 
3082  lp = add_lwp (ptid_build (lwpid, lwpid, 0));
3083  lp->stopped = 1;
3084  lp->resumed = 1;
3085  add_thread (lp->ptid);
3086  }
3087 
3088  if (WIFSTOPPED (status) && !lp)
3089  {
3090  if (debug_linux_nat)
3092  "LHEW: saving LWP %ld status %s in stopped_pids list\n",
3093  (long) lwpid, status_to_str (status));
3094  add_to_pid_list (&stopped_pids, lwpid, status);
3095  return NULL;
3096  }
3097 
3098  /* Make sure we don't report an event for the exit of an LWP not in
3099  our list, i.e. not part of the current process. This can happen
3100  if we detach from a program we originally forked and then it
3101  exits. */
3102  if (!WIFSTOPPED (status) && !lp)
3103  return NULL;
3104 
3105  /* This LWP is stopped now. (And if dead, this prevents it from
3106  ever being continued.) */
3107  lp->stopped = 1;
3108 
3109  if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3110  {
3111  struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3112  int options = linux_nat_ptrace_options (inf->attach_flag);
3113 
3115  lp->must_set_ptrace_flags = 0;
3116  }
3117 
3118  /* Handle GNU/Linux's syscall SIGTRAPs. */
3119  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3120  {
3121  /* No longer need the sysgood bit. The ptrace event ends up
3122  recorded in lp->waitstatus if we care for it. We can carry
3123  on handling the event like a regular SIGTRAP from here
3124  on. */
3125  status = W_STOPCODE (SIGTRAP);
3126  if (linux_handle_syscall_trap (lp, 0))
3127  return NULL;
3128  }
3129 
3130  /* Handle GNU/Linux's extended waitstatus for trace events. */
3131  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3132  && linux_is_extended_waitstatus (status))
3133  {
3134  if (debug_linux_nat)
3136  "LLW: Handling extended status 0x%06x\n",
3137  status);
3138  if (linux_handle_extended_wait (lp, status))
3139  return NULL;
3140  }
3141 
3142  /* Check if the thread has exited. */
3143  if (WIFEXITED (status) || WIFSIGNALED (status))
3144  {
3145  if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
3146  {
3147  /* If this is the main thread, we must stop all threads and
3148  verify if they are still alive. This is because in the
3149  nptl thread model on Linux 2.4, there is no signal issued
3150  for exiting LWPs other than the main thread. We only get
3151  the main thread exit signal once all child threads have
3152  already exited. If we stop all the threads and use the
3153  stop_wait_callback to check if they have exited we can
3154  determine whether this signal should be ignored or
3155  whether it means the end of the debugged application,
3156  regardless of which threading model is being used. */
3157  if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
3158  {
3160  stop_and_resume_callback, NULL);
3161  }
3162 
3163  if (debug_linux_nat)
3165  "LLW: %s exited.\n",
3166  target_pid_to_str (lp->ptid));
3167 
3168  if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
3169  {
3170  /* If there is at least one more LWP, then the exit signal
3171  was not the end of the debugged application and should be
3172  ignored. */
3173  exit_lwp (lp);
3174  return NULL;
3175  }
3176  }
3177 
3178  gdb_assert (lp->resumed);
3179 
3180  if (debug_linux_nat)
3182  "Process %ld exited\n",
3183  ptid_get_lwp (lp->ptid));
3184 
3185  /* This was the last lwp in the process. Since events are
3186  serialized to GDB core, we may not be able report this one
3187  right now, but GDB core and the other target layers will want
3188  to be notified about the exit code/signal, leave the status
3189  pending for the next time we're able to report it. */
3190 
3191  /* Dead LWP's aren't expected to reported a pending sigstop. */
3192  lp->signalled = 0;
3193 
3194  /* Store the pending event in the waitstatus, because
3195  W_EXITCODE(0,0) == 0. */
3196  store_waitstatus (&lp->waitstatus, status);
3197  return lp;
3198  }
3199 
3200  /* Check if the current LWP has previously exited. In the nptl
3201  thread model, LWPs other than the main thread do not issue
3202  signals when they exit so we must check whenever the thread has
3203  stopped. A similar check is made in stop_wait_callback(). */
3204  if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3205  {
3207 
3208  if (debug_linux_nat)
3210  "LLW: %s exited.\n",
3211  target_pid_to_str (lp->ptid));
3212 
3213  exit_lwp (lp);
3214 
3215  /* Make sure there is at least one thread running. */
3217 
3218  /* Discard the event. */
3219  return NULL;
3220  }
3221 
3222  /* Make sure we don't report a SIGSTOP that we sent ourselves in
3223  an attempt to stop an LWP. */
3224  if (lp->signalled
3225  && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3226  {
3227  lp->signalled = 0;
3228 
3229  if (lp->last_resume_kind == resume_stop)
3230  {
3231  if (debug_linux_nat)
3233  "LLW: resume_stop SIGSTOP caught for %s.\n",
3234  target_pid_to_str (lp->ptid));
3235  }
3236  else
3237  {
3238  /* This is a delayed SIGSTOP. Filter out the event. */
3239 
3240  if (debug_linux_nat)
3242  "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
3243  lp->step ?
3244  "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3245  target_pid_to_str (lp->ptid));
3246 
3247  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3248  gdb_assert (lp->resumed);
3249  return NULL;
3250  }
3251  }
3252 
3253  /* Make sure we don't report a SIGINT that we have already displayed
3254  for another thread. */
3255  if (lp->ignore_sigint
3256  && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3257  {
3258  if (debug_linux_nat)
3260  "LLW: Delayed SIGINT caught for %s.\n",
3261  target_pid_to_str (lp->ptid));
3262 
3263  /* This is a delayed SIGINT. */
3264  lp->ignore_sigint = 0;
3265 
3266  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3267  if (debug_linux_nat)
3269  "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3270  lp->step ?
3271  "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3272  target_pid_to_str (lp->ptid));
3273  gdb_assert (lp->resumed);
3274 
3275  /* Discard the event. */
3276  return NULL;
3277  }
3278 
3279  /* Don't report signals that GDB isn't interested in, such as
3280  signals that are neither printed nor stopped upon. Stopping all
3281  threads can be a bit time-consuming so if we want decent
3282  performance with heavily multi-threaded programs, especially when
3283  they're using a high frequency timer, we'd better avoid it if we
3284  can. */
3285  if (WIFSTOPPED (status))
3286  {
3287  enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3288 
3289  if (!non_stop)
3290  {
3291  /* Only do the below in all-stop, as we currently use SIGSTOP
3292  to implement target_stop (see linux_nat_stop) in
3293  non-stop. */
3294  if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3295  {
3296  /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3297  forwarded to the entire process group, that is, all LWPs
3298  will receive it - unless they're using CLONE_THREAD to
3299  share signals. Since we only want to report it once, we
3300  mark it as ignored for all LWPs except this one. */
3302  set_ignore_sigint, NULL);
3303  lp->ignore_sigint = 0;
3304  }
3305  else
3307  }
3308 
3309  /* When using hardware single-step, we need to report every signal.
3310  Otherwise, signals in pass_mask may be short-circuited
3311  except signals that might be caused by a breakpoint. */
3312  if (!lp->step
3313  && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3314  && !linux_wstatus_maybe_breakpoint (status))
3315  {
3316  linux_resume_one_lwp (lp, lp->step, signo);
3317  if (debug_linux_nat)
3319  "LLW: %s %s, %s (preempt 'handle')\n",
3320  lp->step ?
3321  "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3322  target_pid_to_str (lp->ptid),
3323  (signo != GDB_SIGNAL_0
3324  ? strsignal (gdb_signal_to_host (signo))
3325  : "0"));
3326  return NULL;
3327  }
3328  }
3329 
3330  /* An interesting event. */
3331  gdb_assert (lp);
3332  lp->status = status;
3333  save_sigtrap (lp);
3334  return lp;
3335 }
3336 
3337 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3338  their exits until all other threads in the group have exited. */
3339 
3340 static void
3342 {
3343  struct inferior *inf;
3344 
3345  ALL_INFERIORS (inf)
3346  {
3347  struct lwp_info *leader_lp;
3348 
3349  if (inf->pid == 0)
3350  continue;
3351 
3352  leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3353  if (leader_lp != NULL
3354  /* Check if there are other threads in the group, as we may
3355  have raced with the inferior simply exiting. */
3356  && num_lwps (inf->pid) > 1
3357  && linux_proc_pid_is_zombie (inf->pid))
3358  {
3359  if (debug_linux_nat)
3361  "CZL: Thread group leader %d zombie "
3362  "(it exited, or another thread execd).\n",
3363  inf->pid);
3364 
3365  /* A leader zombie can mean one of two things:
3366 
3367  - It exited, and there's an exit status pending
3368  available, or only the leader exited (not the whole
3369  program). In the latter case, we can't waitpid the
3370  leader's exit status until all other threads are gone.
3371 
3372  - There are 3 or more threads in the group, and a thread
3373  other than the leader exec'd. On an exec, the Linux
3374  kernel destroys all other threads (except the execing
3375  one) in the thread group, and resets the execing thread's
3376  tid to the tgid. No exit notification is sent for the
3377  execing thread -- from the ptracer's perspective, it
3378  appears as though the execing thread just vanishes.
3379  Until we reap all other threads except the leader and the
3380  execing thread, the leader will be zombie, and the
3381  execing thread will be in `D (disc sleep)'. As soon as
3382  all other threads are reaped, the execing thread changes
3383  it's tid to the tgid, and the previous (zombie) leader
3384  vanishes, giving place to the "new" leader. We could try
3385  distinguishing the exit and exec cases, by waiting once
3386  more, and seeing if something comes out, but it doesn't
3387  sound useful. The previous leader _does_ go away, and
3388  we'll re-add the new one once we see the exec event
3389  (which is just the same as what would happen if the
3390  previous leader did exit voluntarily before some other
3391  thread execs). */
3392 
3393  if (debug_linux_nat)
3395  "CZL: Thread group leader %d vanished.\n",
3396  inf->pid);
3397  exit_lwp (leader_lp);
3398  }
3399  }
3400 }
3401 
3402 static ptid_t
3404  ptid_t ptid, struct target_waitstatus *ourstatus,
3405  int target_options)
3406 {
3407  sigset_t prev_mask;
3409  struct lwp_info *lp;
3410  int status;
3411 
3412  if (debug_linux_nat)
3413  fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3414 
3415  /* The first time we get here after starting a new inferior, we may
3416  not have added it to the LWP list yet - this is the earliest
3417  moment at which we know its PID. */
3418  if (ptid_is_pid (inferior_ptid))
3419  {
3420  /* Upgrade the main thread's ptid. */
3423  ptid_get_pid (inferior_ptid), 0));
3424 
3426  lp->resumed = 1;
3427  }
3428 
3429  /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3430  block_child_signals (&prev_mask);
3431 
3432  /* First check if there is a LWP with a wait status pending. */
3433  lp = iterate_over_lwps (ptid, status_callback, NULL);
3434  if (lp != NULL)
3435  {
3436  if (debug_linux_nat)
3438  "LLW: Using pending wait status %s for %s.\n",
3439  status_to_str (lp->status),
3440  target_pid_to_str (lp->ptid));
3441  }
3442 
3443  if (!target_is_async_p ())
3444  {
3445  /* Causes SIGINT to be passed on to the attached process. */
3446  set_sigint_trap ();
3447  }
3448 
3449  /* But if we don't find a pending event, we'll have to wait. Always
3450  pull all events out of the kernel. We'll randomly select an
3451  event LWP out of all that have events, to prevent starvation. */
3452 
3453  while (lp == NULL)
3454  {
3455  pid_t lwpid;
3456 
3457  /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3458  quirks:
3459 
3460  - If the thread group leader exits while other threads in the
3461  thread group still exist, waitpid(TGID, ...) hangs. That
3462  waitpid won't return an exit status until the other threads
3463  in the group are reapped.
3464 
3465  - When a non-leader thread execs, that thread just vanishes
3466  without reporting an exit (so we'd hang if we waited for it
3467  explicitly in that case). The exec event is reported to
3468  the TGID pid. */
3469 
3470  errno = 0;
3471  lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3472  if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3473  lwpid = my_waitpid (-1, &status, WNOHANG);
3474 
3475  if (debug_linux_nat)
3477  "LNW: waitpid(-1, ...) returned %d, %s\n",
3478  lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3479 
3480  if (lwpid > 0)
3481  {
3482  if (debug_linux_nat)
3483  {
3485  "LLW: waitpid %ld received %s\n",
3486  (long) lwpid, status_to_str (status));
3487  }
3488 
3489  linux_nat_filter_event (lwpid, status);
3490  /* Retry until nothing comes out of waitpid. A single
3491  SIGCHLD can indicate more than one child stopped. */
3492  continue;
3493  }
3494 
3495  /* Now that we've pulled all events out of the kernel, resume
3496  LWPs that don't have an interesting event to report. */
3499 
3500  /* ... and find an LWP with a status to report to the core, if
3501  any. */
3502  lp = iterate_over_lwps (ptid, status_callback, NULL);
3503  if (lp != NULL)
3504  break;
3505 
3506  /* Check for zombie thread group leaders. Those can't be reaped
3507  until all other threads in the thread group are. */
3509 
3510  /* If there are no resumed children left, bail. We'd be stuck
3511  forever in the sigsuspend call below otherwise. */
3512  if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3513  {
3514  if (debug_linux_nat)
3515  fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3516 
3517  ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3518 
3519  if (!target_is_async_p ())
3520  clear_sigint_trap ();
3521 
3522  restore_child_signals_mask (&prev_mask);
3523  return minus_one_ptid;
3524  }
3525 
3526  /* No interesting event to report to the core. */
3527 
3528  if (target_options & TARGET_WNOHANG)
3529  {
3530  if (debug_linux_nat)
3531  fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3532 
3533  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3534  restore_child_signals_mask (&prev_mask);
3535  return minus_one_ptid;
3536  }
3537 
3538  /* We shouldn't end up here unless we want to try again. */
3539  gdb_assert (lp == NULL);
3540 
3541  /* Block until we get an event reported with SIGCHLD. */
3542  if (debug_linux_nat)
3543  fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3544  sigsuspend (&suspend_mask);
3545  }
3546 
3547  if (!target_is_async_p ())
3548  clear_sigint_trap ();
3549 
3550  gdb_assert (lp);
3551 
3552  status = lp->status;
3553  lp->status = 0;
3554 
3555  if (!non_stop)
3556  {
3557  /* Now stop all other LWP's ... */
3559 
3560  /* ... and wait until all of them have reported back that
3561  they're no longer running. */
3563  }
3564 
3565  /* If we're not waiting for a specific LWP, choose an event LWP from
3566  among those that have had events. Giving equal priority to all
3567  LWPs that have had events helps prevent starvation. */
3568  if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3569  select_event_lwp (ptid, &lp, &status);
3570 
3571  gdb_assert (lp != NULL);
3572 
3573  /* Now that we've selected our final event LWP, un-adjust its PC if
3574  it was a software breakpoint, and we can't reliably support the
3575  "stopped by software breakpoint" stop reason. */
3577  && !USE_SIGTRAP_SIGINFO)
3578  {
3579  struct regcache *regcache = get_thread_regcache (lp->ptid);
3580  struct gdbarch *gdbarch = get_regcache_arch (regcache);
3581  int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3582 
3583  if (decr_pc != 0)
3584  {
3585  CORE_ADDR pc;
3586 
3587  pc = regcache_read_pc (regcache);
3588  regcache_write_pc (regcache, pc + decr_pc);
3589  }
3590  }
3591 
3592  /* We'll need this to determine whether to report a SIGSTOP as
3593  GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3594  clears it. */
3595  last_resume_kind = lp->last_resume_kind;
3596 
3597  if (!non_stop)
3598  {
3599  /* In all-stop, from the core's perspective, all LWPs are now
3600  stopped until a new resume action is sent over. */
3602  }
3603  else
3604  {
3605  resume_clear_callback (lp, NULL);
3606  }
3607 
3608  if (linux_nat_status_is_event (status))
3609  {
3610  if (debug_linux_nat)
3612  "LLW: trap ptid is %s.\n",
3613  target_pid_to_str (lp->ptid));
3614  }
3615 
3617  {
3618  *ourstatus = lp->waitstatus;
3620  }
3621  else
3622  store_waitstatus (ourstatus, status);
3623 
3624  if (debug_linux_nat)
3625  fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3626 
3627  restore_child_signals_mask (&prev_mask);
3628 
3629  if (last_resume_kind == resume_stop
3630  && ourstatus->kind == TARGET_WAITKIND_STOPPED
3631  && WSTOPSIG (status) == SIGSTOP)
3632  {
3633  /* A thread that has been requested to stop by GDB with
3634  target_stop, and it stopped cleanly, so report as SIG0. The
3635  use of SIGSTOP is an implementation detail. */
3636  ourstatus->value.sig = GDB_SIGNAL_0;
3637  }
3638 
3639  if (ourstatus->kind == TARGET_WAITKIND_EXITED
3640  || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3641  lp->core = -1;
3642  else
3644 
3645  return lp->ptid;
3646 }
3647 
3648 /* Resume LWPs that are currently stopped without any pending status
3649  to report, but are resumed from the core's perspective. */
3650 
3651 static int
3653 {
3654  ptid_t *wait_ptid_p = data;
3655 
3656  if (!lp->stopped)
3657  {
3658  if (debug_linux_nat)
3660  "RSRL: NOT resuming LWP %s, not stopped\n",
3661  target_pid_to_str (lp->ptid));
3662  }
3663  else if (!lp->resumed)
3664  {
3665  if (debug_linux_nat)
3667  "RSRL: NOT resuming LWP %s, not resumed\n",
3668  target_pid_to_str (lp->ptid));
3669  }
3670  else if (lwp_status_pending_p (lp))
3671  {
3672  if (debug_linux_nat)
3674  "RSRL: NOT resuming LWP %s, has pending status\n",
3675  target_pid_to_str (lp->ptid));
3676  }
3677  else
3678  {
3679  struct regcache *regcache = get_thread_regcache (lp->ptid);
3680  struct gdbarch *gdbarch = get_regcache_arch (regcache);
3681 
3682  TRY
3683  {
3684  CORE_ADDR pc = regcache_read_pc (regcache);
3685  int leave_stopped = 0;
3686 
3687  /* Don't bother if there's a breakpoint at PC that we'd hit
3688  immediately, and we're not waiting for this LWP. */
3689  if (!ptid_match (lp->ptid, *wait_ptid_p))
3690  {
3691  if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3692  leave_stopped = 1;
3693  }
3694 
3695  if (!leave_stopped)
3696  {
3697  if (debug_linux_nat)
3699  "RSRL: resuming stopped-resumed LWP %s at "
3700  "%s: step=%d\n",
3701  target_pid_to_str (lp->ptid),
3702  paddress (gdbarch, pc),
3703  lp->step);
3704 
3705  linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3706  }
3707  }
3708  CATCH (ex, RETURN_MASK_ERROR)
3709  {
3711  throw_exception (ex);
3712  }
3713  END_CATCH
3714  }
3715 
3716  return 0;
3717 }
3718 
3719 static ptid_t
3721  ptid_t ptid, struct target_waitstatus *ourstatus,
3722  int target_options)
3723 {
3724  ptid_t event_ptid;
3725 
3726  if (debug_linux_nat)
3727  {
3728  char *options_string;
3729 
3730  options_string = target_options_to_string (target_options);
3732  "linux_nat_wait: [%s], [%s]\n",
3733  target_pid_to_str (ptid),
3734  options_string);
3735  xfree (options_string);
3736  }
3737 
3738  /* Flush the async file first. */
3739  if (target_is_async_p ())
3740  async_file_flush ();
3741 
3742  /* Resume LWPs that are currently stopped without any pending status
3743  to report, but are resumed from the core's perspective. LWPs get
3744  in this state if we find them stopping at a time we're not
3745  interested in reporting the event (target_wait on a
3746  specific_process, for example, see linux_nat_wait_1), and
3747  meanwhile the event became uninteresting. Don't bother resuming
3748  LWPs we're not going to wait for if they'd stop immediately. */
3749  if (non_stop)
3751 
3752  event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3753 
3754  /* If we requested any event, and something came out, assume there
3755  may be more. If we requested a specific lwp or process, also
3756  assume there may be more. */
3757  if (target_is_async_p ()
3758  && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3759  && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3760  || !ptid_equal (ptid, minus_one_ptid)))
3761  async_file_mark ();
3762 
3763  return event_ptid;
3764 }
3765 
3766 static int
3767 kill_callback (struct lwp_info *lp, void *data)
3768 {
3769  /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3770 
3771  errno = 0;
3772  kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3773  if (debug_linux_nat)
3774  {
3775  int save_errno = errno;
3776 
3778  "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3779  target_pid_to_str (lp->ptid),
3780  save_errno ? safe_strerror (save_errno) : "OK");
3781  }
3782 
3783  /* Some kernels ignore even SIGKILL for processes under ptrace. */
3784 
3785  errno = 0;
3786  ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3787  if (debug_linux_nat)
3788  {
3789  int save_errno = errno;
3790 
3792  "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3793  target_pid_to_str (lp->ptid),
3794  save_errno ? safe_strerror (save_errno) : "OK");
3795  }
3796 
3797  return 0;
3798 }
3799 
3800 static int
3801 kill_wait_callback (struct lwp_info *lp, void *data)
3802 {
3803  pid_t pid;
3804 
3805  /* We must make sure that there are no pending events (delayed
3806  SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3807  program doesn't interfere with any following debugging session. */
3808 
3809  /* For cloned processes we must check both with __WCLONE and
3810  without, since the exit status of a cloned process isn't reported
3811  with __WCLONE. */
3812  if (lp->cloned)
3813  {
3814  do
3815  {
3816  pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3817  if (pid != (pid_t) -1)
3818  {
3819  if (debug_linux_nat)
3821  "KWC: wait %s received unknown.\n",
3822  target_pid_to_str (lp->ptid));
3823  /* The Linux kernel sometimes fails to kill a thread
3824  completely after PTRACE_KILL; that goes from the stop
3825  point in do_fork out to the one in
3826  get_signal_to_deliever and waits again. So kill it
3827  again. */
3828  kill_callback (lp, NULL);
3829  }
3830  }
3831  while (pid == ptid_get_lwp (lp->ptid));
3832 
3833  gdb_assert (pid == -1 && errno == ECHILD);
3834  }
3835 
3836  do
3837  {
3838  pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3839  if (pid != (pid_t) -1)
3840  {
3841  if (debug_linux_nat)
3843  "KWC: wait %s received unk.\n",
3844  target_pid_to_str (lp->ptid));
3845  /* See the call to kill_callback above. */
3846  kill_callback (lp, NULL);
3847  }
3848  }
3849  while (pid == ptid_get_lwp (lp->ptid));
3850 
3851  gdb_assert (pid == -1 && errno == ECHILD);
3852  return 0;
3853 }
3854 
3855 static void
3857 {
3858  struct target_waitstatus last;
3859  ptid_t last_ptid;
3860  int status;
3861 
3862  /* If we're stopped while forking and we haven't followed yet,
3863  kill the other task. We need to do this first because the
3864  parent will be sleeping if this is a vfork. */
3865 
3866  get_last_target_status (&last_ptid, &last);
3867 
3868  if (last.kind == TARGET_WAITKIND_FORKED
3869  || last.kind == TARGET_WAITKIND_VFORKED)
3870  {
3871  ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3872  wait (&status);
3873 
3874  /* Let the arch-specific native code know this process is
3875  gone. */
3877  }
3878 
3879  if (forks_exist_p ())
3880  linux_fork_killall ();
3881  else
3882  {
3884 
3885  /* Stop all threads before killing them, since ptrace requires
3886  that the thread is stopped to sucessfully PTRACE_KILL. */
3887  iterate_over_lwps (ptid, stop_callback, NULL);
3888  /* ... and wait until all of them have reported back that
3889  they're no longer running. */
3890  iterate_over_lwps (ptid, stop_wait_callback, NULL);
3891 
3892  /* Kill all LWP's ... */
3893  iterate_over_lwps (ptid, kill_callback, NULL);
3894 
3895  /* ... and wait until we've flushed all events. */
3896  iterate_over_lwps (ptid, kill_wait_callback, NULL);
3897  }
3898 
3900 }
3901 
3902 static void
3904 {
3905  int pid = ptid_get_pid (inferior_ptid);
3906 
3907  purge_lwp_list (pid);
3908 
3909  if (! forks_exist_p ())
3910  /* Normal case, no other forks available. */
3911  linux_ops->to_mourn_inferior (ops);
3912  else
3913  /* Multi-fork case. The current inferior_ptid has exited, but
3914  there are other viable forks to debug. Delete the exiting
3915  one and context-switch to the first available. */
3917 
3918  /* Let the arch-specific native code know this process is gone. */
3920 }
3921 
3922 /* Convert a native/host siginfo object, into/from the siginfo in the
3923  layout of the inferiors' architecture. */
3924 
3925 static void
3926 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3927 {
3928  int done = 0;
3929 
3930  if (linux_nat_siginfo_fixup != NULL)
3931  done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3932 
3933  /* If there was no callback, or the callback didn't do anything,
3934  then just do a straight memcpy. */
3935  if (!done)
3936  {
3937  if (direction == 1)
3938  memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3939  else
3940  memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3941  }
3942 }
3943 
3944 static enum target_xfer_status
3945 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3946  const char *annex, gdb_byte *readbuf,
3947  const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3948  ULONGEST *xfered_len)
3949 {
3950  int pid;
3951  siginfo_t siginfo;
3952  gdb_byte inf_siginfo[sizeof (siginfo_t)];
3953 
3955  gdb_assert (readbuf || writebuf);
3956 
3957  pid = ptid_get_lwp (inferior_ptid);
3958  if (pid == 0)
3959  pid = ptid_get_pid (inferior_ptid);
3960 
3961  if (offset > sizeof (siginfo))
3962  return TARGET_XFER_E_IO;
3963 
3964  errno = 0;
3965  ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3966  if (errno != 0)
3967  return TARGET_XFER_E_IO;
3968 
3969  /* When GDB is built as a 64-bit application, ptrace writes into
3970  SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3971  inferior with a 64-bit GDB should look the same as debugging it
3972  with a 32-bit GDB, we need to convert it. GDB core always sees
3973  the converted layout, so any read/write will have to be done
3974  post-conversion. */
3975  siginfo_fixup (&siginfo, inf_siginfo, 0);
3976 
3977  if (offset + len > sizeof (siginfo))
3978  len = sizeof (siginfo) - offset;
3979 
3980  if (readbuf != NULL)
3981  memcpy (readbuf, inf_siginfo + offset, len);
3982  else
3983  {
3984  memcpy (inf_siginfo + offset, writebuf, len);
3985 
3986  /* Convert back to ptrace layout before flushing it out. */
3987  siginfo_fixup (&siginfo, inf_siginfo, 1);
3988 
3989  errno = 0;
3990  ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3991  if (errno != 0)
3992  return TARGET_XFER_E_IO;
3993  }
3994 
3995  *xfered_len = len;
3996  return TARGET_XFER_OK;
3997 }
3998 
3999 static enum target_xfer_status
4001  const char *annex, gdb_byte *readbuf,
4002  const gdb_byte *writebuf,
4003  ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4004 {
4005  struct cleanup *old_chain;
4006  enum target_xfer_status xfer;
4007 
4008  if (object == TARGET_OBJECT_SIGNAL_INFO)
4009  return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4010  offset, len, xfered_len);
4011 
4012  /* The target is connected but no live inferior is selected. Pass
4013  this request down to a lower stratum (e.g., the executable
4014  file). */
4016  return TARGET_XFER_EOF;
4017 
4018  old_chain = save_inferior_ptid ();
4019 
4020  if (ptid_lwp_p (inferior_ptid))
4022 
4023  xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4024  offset, len, xfered_len);
4025 
4026  do_cleanups (old_chain);
4027  return xfer;
4028 }
4029 
4030 static int
4032 {
4033  int err, tmp_errno;
4034 
4035  gdb_assert (ptid_lwp_p (ptid));
4036 
4037  /* Send signal 0 instead of anything ptrace, because ptracing a
4038  running thread errors out claiming that the thread doesn't
4039  exist. */
4040  err = kill_lwp (ptid_get_lwp (ptid), 0);
4041  tmp_errno = errno;
4042  if (debug_linux_nat)
4044  "LLTA: KILL(SIG0) %s (%s)\n",
4045  target_pid_to_str (ptid),
4046  err ? safe_strerror (tmp_errno) : "OK");
4047 
4048  if (err != 0)
4049  return 0;
4050 
4051  return 1;
4052 }
4053 
4054 static int
4056 {
4057  return linux_thread_alive (ptid);
4058 }
4059 
4060 /* Implement the to_update_thread_list target method for this
4061  target. */
4062 
4063 static void
4065 {
4067  {
4068  /* With support for clone events, we add/delete threads from the
4069  list as clone/exit events are processed, so just try deleting
4070  exited threads still in the thread list. */
4072  }
4073  else
4074  prune_threads ();
4075 }
4076 
4077 static char *
4079 {
4080  static char buf[64];
4081 
4082  if (ptid_lwp_p (ptid)
4083  && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
4084  || num_lwps (ptid_get_pid (ptid)) > 1))
4085  {
4086  snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
4087  return buf;
4088  }
4089 
4090  return normal_pid_to_str (ptid);
4091 }
4092 
4093 static char *
4094 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4095 {
4096  int pid = ptid_get_pid (thr->ptid);
4097  long lwp = ptid_get_lwp (thr->ptid);
4098 #define FORMAT "/proc/%d/task/%ld/comm"
4099  char buf[sizeof (FORMAT) + 30];
4100  FILE *comm_file;
4101  char *result = NULL;
4102 
4103  snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4104  comm_file = gdb_fopen_cloexec (buf, "r");
4105  if (comm_file)
4106  {
4107  /* Not exported by the kernel, so we define it here. */
4108 #define COMM_LEN 16
4109  static char line[COMM_LEN + 1];
4110 
4111  if (fgets (line, sizeof (line), comm_file))
4112  {
4113  char *nl = strchr (line, '\n');
4114 
4115  if (nl)
4116  *nl = '\0';
4117  if (*line != '\0')
4118  result = line;
4119  }
4120 
4121  fclose (comm_file);
4122  }
4123 
4124 #undef COMM_LEN
4125 #undef FORMAT
4126 
4127  return result;
4128 }
4129 
4130 /* Accepts an integer PID; Returns a string representing a file that
4131  can be opened to get the symbols for the child process. */
4132 
4133 static char *
4135 {
4136  return linux_proc_pid_to_exec_file (pid);
4137 }
4138 
4139 /* Implement the to_xfer_partial interface for memory reads using the /proc
4140  filesystem. Because we can use a single read() call for /proc, this
4141  can be much more efficient than banging away at PTRACE_PEEKTEXT,
4142  but it doesn't support writes. */
4143 
4144 static enum target_xfer_status
4146  const char *annex, gdb_byte *readbuf,
4147  const gdb_byte *writebuf,
4148  ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
4149 {
4150  LONGEST ret;
4151  int fd;
4152  char filename[64];
4153 
4154  if (object != TARGET_OBJECT_MEMORY || !readbuf)
4155  return 0;
4156 
4157  /* Don't bother for one word. */
4158  if (len < 3 * sizeof (long))
4159  return TARGET_XFER_EOF;
4160 
4161  /* We could keep this file open and cache it - possibly one per
4162  thread. That requires some juggling, but is even faster. */
4163  xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4165  fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
4166  if (fd == -1)
4167  return TARGET_XFER_EOF;
4168 
4169  /* If pread64 is available, use it. It's faster if the kernel
4170  supports it (only one syscall), and it's 64-bit safe even on
4171  32-bit platforms (for instance, SPARC debugging a SPARC64
4172  application). */
4173 #ifdef HAVE_PREAD64
4174  if (pread64 (fd, readbuf, len, offset) != len)
4175 #else
4176  if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4177 #endif
4178  ret = 0;
4179  else
4180  ret = len;
4181 
4182  close (fd);
4183 
4184  if (ret == 0)
4185  return TARGET_XFER_EOF;
4186  else
4187  {
4188  *xfered_len = ret;
4189  return TARGET_XFER_OK;
4190  }
4191 }
4192 
4193 
4194 /* Enumerate spufs IDs for process PID. */
4195 static LONGEST
4197 {
4198  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4199  LONGEST pos = 0;
4200  LONGEST written = 0;
4201  char path[128];
4202  DIR *dir;
4203  struct dirent *entry;
4204 
4205  xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4206  dir = opendir (path);
4207  if (!dir)
4208  return -1;
4209 
4210  rewinddir (dir);
4211  while ((entry = readdir (dir)) != NULL)
4212  {
4213  struct stat st;
4214  struct statfs stfs;
4215  int fd;
4216 
4217  fd = atoi (entry->d_name);
4218  if (!fd)
4219  continue;
4220 
4221  xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4222  if (stat (path, &st) != 0)
4223  continue;
4224  if (!S_ISDIR (st.st_mode))
4225  continue;
4226 
4227  if (statfs (path, &stfs) != 0)
4228  continue;
4229  if (stfs.f_type != SPUFS_MAGIC)
4230  continue;
4231 
4232  if (pos >= offset && pos + 4 <= offset + len)
4233  {
4234  store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4235  written += 4;
4236  }
4237  pos += 4;
4238  }
4239 
4240  closedir (dir);
4241  return written;
4242 }
4243 
4244 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4245  object type, using the /proc file system. */
4246 
4247 static enum target_xfer_status
4249  const char *annex, gdb_byte *readbuf,
4250  const gdb_byte *writebuf,
4251  ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4252 {
4253  char buf[128];
4254  int fd = 0;
4255  int ret = -1;
4256  int pid = ptid_get_pid (inferior_ptid);
4257 
4258  if (!annex)
4259  {
4260  if (!readbuf)
4261  return TARGET_XFER_E_IO;
4262  else
4263  {
4264  LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4265 
4266  if (l < 0)
4267  return TARGET_XFER_E_IO;
4268  else if (l == 0)
4269  return TARGET_XFER_EOF;
4270  else
4271  {
4272  *xfered_len = (ULONGEST) l;
4273  return TARGET_XFER_OK;
4274  }
4275  }
4276  }
4277 
4278  xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4279  fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4280  if (fd <= 0)
4281  return TARGET_XFER_E_IO;
4282 
4283  if (offset != 0
4284  && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4285  {
4286  close (fd);
4287  return TARGET_XFER_EOF;
4288  }
4289 
4290  if (writebuf)
4291  ret = write (fd, writebuf, (size_t) len);
4292  else if (readbuf)
4293  ret = read (fd, readbuf, (size_t) len);
4294 
4295  close (fd);
4296 
4297  if (ret < 0)
4298  return TARGET_XFER_E_IO;
4299  else if (ret == 0)
4300  return TARGET_XFER_EOF;
4301  else
4302  {
4303  *xfered_len = (ULONGEST) ret;
4304  return TARGET_XFER_OK;
4305  }
4306 }
4307 
4308 
4309 /* Parse LINE as a signal set and add its set bits to SIGS. */
4310 
4311 static void
4312 add_line_to_sigset (const char *line, sigset_t *sigs)
4313 {
4314  int len = strlen (line) - 1;
4315  const char *p;
4316  int signum;
4317 
4318  if (line[len] != '\n')
4319  error (_("Could not parse signal set: %s"), line);
4320 
4321  p = line;
4322  signum = len * 4;
4323  while (len-- > 0)
4324  {
4325  int digit;
4326 
4327  if (*p >= '0' && *p <= '9')
4328  digit = *p - '0';
4329  else if (*p >= 'a' && *p <= 'f')
4330  digit = *p - 'a' + 10;
4331  else
4332  error (_("Could not parse signal set: %s"), line);
4333 
4334  signum -= 4;
4335 
4336  if (digit & 1)
4337  sigaddset (sigs, signum + 1);
4338  if (digit & 2)
4339  sigaddset (sigs, signum + 2);
4340  if (digit & 4)
4341  sigaddset (sigs, signum + 3);
4342  if (digit & 8)
4343  sigaddset (sigs, signum + 4);
4344 
4345  p++;
4346  }
4347 }
4348 
4349 /* Find process PID's pending signals from /proc/pid/status and set
4350  SIGS to match. */
4351 
4352 void
4354  sigset_t *blocked, sigset_t *ignored)
4355 {
4356  FILE *procfile;
4357  char buffer[PATH_MAX], fname[PATH_MAX];
4358  struct cleanup *cleanup;
4359 
4360  sigemptyset (pending);
4361  sigemptyset (blocked);
4362  sigemptyset (ignored);
4363  xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4364  procfile = gdb_fopen_cloexec (fname, "r");
4365  if (procfile == NULL)
4366  error (_("Could not open %s"), fname);
4367  cleanup = make_cleanup_fclose (procfile);
4368 
4369  while (fgets (buffer, PATH_MAX, procfile) != NULL)
4370  {
4371  /* Normal queued signals are on the SigPnd line in the status
4372  file. However, 2.6 kernels also have a "shared" pending
4373  queue for delivering signals to a thread group, so check for
4374  a ShdPnd line also.
4375 
4376  Unfortunately some Red Hat kernels include the shared pending
4377  queue but not the ShdPnd status field. */
4378 
4379  if (startswith (buffer, "SigPnd:\t"))
4380  add_line_to_sigset (buffer + 8, pending);
4381  else if (startswith (buffer, "ShdPnd:\t"))
4382  add_line_to_sigset (buffer + 8, pending);
4383  else if (startswith (buffer, "SigBlk:\t"))
4384  add_line_to_sigset (buffer + 8, blocked);
4385  else if (startswith (buffer, "SigIgn:\t"))
4386  add_line_to_sigset (buffer + 8, ignored);
4387  }
4388 
4389  do_cleanups (cleanup);
4390 }
4391 
4392 static enum target_xfer_status
4394  const char *annex, gdb_byte *readbuf,
4395  const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4396  ULONGEST *xfered_len)
4397 {
4398  gdb_assert (object == TARGET_OBJECT_OSDATA);
4399 
4400  *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4401  if (*xfered_len == 0)
4402  return TARGET_XFER_EOF;
4403  else
4404  return TARGET_XFER_OK;
4405 }
4406 
4407 static enum target_xfer_status
4408 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4409  const char *annex, gdb_byte *readbuf,
4410  const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4411  ULONGEST *xfered_len)
4412 {
4413  enum target_xfer_status xfer;
4414 
4415  if (object == TARGET_OBJECT_AUXV)
4416  return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4417  offset, len, xfered_len);
4418 
4419  if (object == TARGET_OBJECT_OSDATA)
4420  return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4421  offset, len, xfered_len);
4422 
4423  if (object == TARGET_OBJECT_SPU)
4424  return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4425  offset, len, xfered_len);
4426 
4427  /* GDB calculates all the addresses in possibly larget width of the address.
4428  Address width needs to be masked before its final use - either by
4429  linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4430 
4431  Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4432 
4433  if (object == TARGET_OBJECT_MEMORY)
4434  {
4435  int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4436 
4437  if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4438  offset &= ((ULONGEST) 1 << addr_bit) - 1;
4439  }
4440 
4441  xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4442  offset, len, xfered_len);
4443  if (xfer != TARGET_XFER_EOF)
4444  return xfer;
4445 
4446  return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4447  offset, len, xfered_len);
4448 }
4449 
4450 static void
4452 {
4453  ptid_t *ptid = (ptid_t *) arg;
4454 
4455  gdb_assert (arg != NULL);
4456 
4457  /* Unpause all */
4458  target_resume (*ptid, 0, GDB_SIGNAL_0);
4459 }
4460 
4462 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4463  const char *strid)
4464 {
4465  char s[IPA_CMD_BUF_SIZE];
4466  struct cleanup *old_chain;
4467  int pid = ptid_get_pid (inferior_ptid);
4468  VEC(static_tracepoint_marker_p) *markers = NULL;
4469  struct static_tracepoint_marker *marker = NULL;
4470  char *p = s;
4471  ptid_t ptid = ptid_build (pid, 0, 0);
4472 
4473  /* Pause all */
4474  target_stop (ptid);
4475 
4476  memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4477  s[sizeof ("qTfSTM")] = 0;
4478 
4479  agent_run_command (pid, s, strlen (s) + 1);
4480 
4481  old_chain = make_cleanup (free_current_marker, &marker);
4483 
4484  while (*p++ == 'm')
4485  {
4486  if (marker == NULL)
4487  marker = XCNEW (struct static_tracepoint_marker);
4488 
4489  do
4490  {
4492 
4493  if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4494  {
4496  markers, marker);
4497  marker = NULL;
4498  }
4499  else
4500  {
4502  memset (marker, 0, sizeof (*marker));
4503  }
4504  }
4505  while (*p++ == ','); /* comma-separated list */
4506 
4507  memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4508  s[sizeof ("qTsSTM")] = 0;
4509  agent_run_command (pid, s, strlen (s) + 1);
4510  p = s;
4511  }
4512 
4513  do_cleanups (old_chain);
4514 
4515  return markers;
4516 }
4517 
4518 /* Create a prototype generic GNU/Linux target. The client can override
4519  it with local methods. */
4520 
4521 static void
4523 {
4535 
4536  super_xfer_partial = t->to_xfer_partial;
4538 
4539  t->to_static_tracepoint_markers_by_strid
4540  = linux_child_static_tracepoint_markers_by_strid;
4541 }
4542 
4543 struct target_ops *
4545 {
4546  struct target_ops *t;
4547 
4548  t = inf_ptrace_target ();
4550 
4551  return t;
4552 }
4553 
4554 struct target_ops *
4555 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4556 {
4557  struct target_ops *t;
4558 
4559  t = inf_ptrace_trad_target (register_u_offset);
4561 
4562  return t;
4563 }
4564 
4565 /* target_is_async_p implementation. */
4566 
4567 static int
4569 {
4570  return linux_is_async_p ();
4571 }
4572 
4573 /* target_can_async_p implementation. */
4574 
4575 static int
4577 {
4578  /* NOTE: palves 2008-03-21: We're only async when the user requests
4579  it explicitly with the "set target-async" command.
4580  Someday, linux will always be async. */
4581  return target_async_permitted;
4582 }
4583 
4584 static int
4586 {
4587  return 1;
4588 }
4589 
4590 /* True if we want to support multi-process. To be removed when GDB
4591  supports multi-exec. */
4592 
4593 int linux_multi_process = 1;
4594 
4595 static int
4597 {
4598  return linux_multi_process;
4599 }
4600 
4601 static int
4603 {
4604 #ifdef HAVE_PERSONALITY
4605  return 1;
4606 #else
4607  return 0;
4608 #endif
4609 }
4610 
4611 static int async_terminal_is_ours = 1;
4612 
4613 /* target_terminal_inferior implementation.
4614 
4615  This is a wrapper around child_terminal_inferior to add async support. */
4616 
4617 static void
4619 {
4620  /* Like target_terminal_inferior, use target_can_async_p, not
4621  target_is_async_p, since at this point the target is not async
4622  yet. If it can async, then we know it will become async prior to
4623  resume. */
4624  if (!target_can_async_p ())
4625  {
4626  /* Async mode is disabled. */
4627  child_terminal_inferior (self);
4628  return;
4629  }
4630 
4631  child_terminal_inferior (self);
4632 
4633  /* Calls to target_terminal_*() are meant to be idempotent. */
4634  if (!async_terminal_is_ours)
4635  return;
4636 
4638  async_terminal_is_ours = 0;
4639  set_sigint_trap ();
4640 }
4641 
4642 /* target_terminal_ours implementation.
4643 
4644  This is a wrapper around child_terminal_ours to add async support (and
4645  implement the target_terminal_ours vs target_terminal_ours_for_output
4646  distinction). child_terminal_ours is currently no different than
4647  child_terminal_ours_for_output.
4648  We leave target_terminal_ours_for_output alone, leaving it to
4649  child_terminal_ours_for_output. */
4650 
4651 static void
4653 {
4654  /* GDB should never give the terminal to the inferior if the
4655  inferior is running in the background (run&, continue&, etc.),
4656  but claiming it sure should. */
4657  child_terminal_ours (self);
4658 
4659  if (async_terminal_is_ours)
4660  return;
4661 
4662  clear_sigint_trap ();
4664  async_terminal_is_ours = 1;
4665 }
4666 
4667 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4668  so we notice when any child changes state, and notify the
4669  event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4670  above to wait for the arrival of a SIGCHLD. */
4671 
4672 static void
4673 sigchld_handler (int signo)
4674 {
4675  int old_errno = errno;
4676 
4677  if (debug_linux_nat)
4679  "sigchld\n", sizeof ("sigchld\n") - 1);
4680 
4681  if (signo == SIGCHLD
4682  && linux_nat_event_pipe[0] != -1)
4683  async_file_mark (); /* Let the event loop know that there are
4684  events to handle. */
4685 
4686  errno = old_errno;
4687 }
4688 
4689 /* Callback registered with the target events file descriptor. */
4690 
4691 static void
4693 {
4695 }
4696 
4697 /* Create/destroy the target events pipe. Returns previous state. */
4698 
4699 static int
4701 {
4702  int previous = linux_is_async_p ();
4703 
4704  if (previous != enable)
4705  {
4706  sigset_t prev_mask;
4707 
4708  /* Block child signals while we create/destroy the pipe, as
4709  their handler writes to it. */
4710  block_child_signals (&prev_mask);
4711 
4712  if (enable)
4713  {
4714  if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4715  internal_error (__FILE__, __LINE__,
4716  "creating event pipe failed.");
4717 
4718  fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4719  fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4720  }
4721  else
4722  {
4723  close (linux_nat_event_pipe[0]);
4724  close (linux_nat_event_pipe[1]);
4725  linux_nat_event_pipe[0] = -1;
4726  linux_nat_event_pipe[1] = -1;
4727  }
4728 
4729  restore_child_signals_mask (&prev_mask);
4730  }
4731 
4732  return previous;
4733 }
4734 
4735 /* target_async implementation. */
4736 
4737 static void
4739 {
4740  if (enable)
4741  {
4742  if (!linux_async_pipe (1))
4743  {
4744  add_file_handler (linux_nat_event_pipe[0],
4745  handle_target_event, NULL);
4746  /* There may be pending events to handle. Tell the event loop
4747  to poll them. */
4748  async_file_mark ();
4749  }
4750  }
4751  else
4752  {
4753  delete_file_handler (linux_nat_event_pipe[0]);
4754  linux_async_pipe (0);
4755  }
4756  return;
4757 }
4758 
4759 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4760  event came out. */
4761 
4762 static int
4763 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4764 {
4765  if (!lwp->stopped)
4766  {
4767  if (debug_linux_nat)
4769  "LNSL: running -> suspending %s\n",
4770  target_pid_to_str (lwp->ptid));
4771 
4772 
4773  if (lwp->last_resume_kind == resume_stop)
4774  {
4775  if (debug_linux_nat)
4777  "linux-nat: already stopping LWP %ld at "
4778  "GDB's request\n",
4779  ptid_get_lwp (lwp->ptid));
4780  return 0;
4781  }
4782 
4783  stop_callback (lwp, NULL);
4785  }
4786  else
4787  {
4788  /* Already known to be stopped; do nothing. */
4789 
4790  if (debug_linux_nat)
4791  {
4792  if (find_thread_ptid (lwp->ptid)->stop_requested)
4794  "LNSL: already stopped/stop_requested %s\n",
4795  target_pid_to_str (lwp->ptid));
4796  else
4798  "LNSL: already stopped/no "
4799  "stop_requested yet %s\n",
4800  target_pid_to_str (lwp->ptid));
4801  }
4802  }
4803  return 0;
4804 }
4805 
4806 static void
4808 {
4809  if (non_stop)
4810  iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4811  else
4812  linux_ops->to_stop (linux_ops, ptid);
4813 }
4814 
4815 static void
4817 {
4818  /* Unregister from the event loop. */
4819  if (linux_nat_is_async_p (self))
4820  linux_nat_async (self, 0);
4821 
4822  if (linux_ops->to_close)
4823  linux_ops->to_close (linux_ops);
4824 
4825  super_close (self);
4826 }
4827 
4828 /* When requests are passed down from the linux-nat layer to the
4829  single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4830  used. The address space pointer is stored in the inferior object,
4831  but the common code that is passed such ptid can't tell whether
4832  lwpid is a "main" process id or not (it assumes so). We reverse
4833  look up the "main" process id from the lwp here. */
4834 
4835 static struct address_space *
4837 {
4838  struct lwp_info *lwp;
4839  struct inferior *inf;
4840  int pid;
4841 
4842  if (ptid_get_lwp (ptid) == 0)
4843  {
4844  /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4845  tgid. */
4846  lwp = find_lwp_pid (ptid);
4847  pid = ptid_get_pid (lwp->ptid);
4848  }
4849  else
4850  {
4851  /* A (pid,lwpid,0) ptid. */
4852  pid = ptid_get_pid (ptid);
4853  }
4854 
4855  inf = find_inferior_pid (pid);
4856  gdb_assert (inf != NULL);
4857  return inf->aspace;
4858 }
4859 
4860 /* Return the cached value of the processor core for thread PTID. */
4861 
4862 static int
4864 {
4865  struct lwp_info *info = find_lwp_pid (ptid);
4866 
4867  if (info)
4868  return info->core;
4869  return -1;
4870 }
4871 
4872 /* Implementation of to_filesystem_is_local. */
4873 
4874 static int
4876 {
4877  struct inferior *inf = current_inferior ();
4878 
4879  if (inf->fake_pid_p || inf->pid == 0)
4880  return 1;
4881 
4882  return linux_ns_same (inf->pid, LINUX_NS_MNT);
4883 }
4884 
4885 /* Convert the INF argument passed to a to_fileio_* method
4886  to a process ID suitable for passing to its corresponding
4887  linux_mntns_* function. If INF is non-NULL then the
4888  caller is requesting the filesystem seen by INF. If INF
4889  is NULL then the caller is requesting the filesystem seen
4890  by the GDB. We fall back to GDB's filesystem in the case
4891  that INF is non-NULL but its PID is unknown. */
4892 
4893 static pid_t
4895 {
4896  if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4897  return getpid ();
4898  else
4899  return inf->pid;
4900 }
4901 
4902 /* Implementation of to_fileio_open. */
4903 
4904 static int
4906  struct inferior *inf, const char *filename,
4907  int flags, int mode, int warn_if_slow,
4908  int *target_errno)
4909 {
4910  int nat_flags;
4911  mode_t nat_mode;
4912  int fd;
4913 
4914  if (fileio_to_host_openflags (flags, &nat_flags) == -1
4915  || fileio_to_host_mode (mode, &nat_mode) == -1)
4916  {
4917  *target_errno = FILEIO_EINVAL;
4918  return -1;
4919  }
4920 
4922  filename, nat_flags, nat_mode);
4923  if (fd == -1)
4924  *target_errno = host_to_fileio_error (errno);
4925 
4926  return fd;
4927 }
4928 
4929 /* Implementation of to_fileio_readlink. */
4930 
4931 static char *
4933  struct inferior *inf, const char *filename,
4934  int *target_errno)
4935 {
4936  char buf[PATH_MAX];
4937  int len;
4938  char *ret;
4939 
4941  filename, buf, sizeof (buf));
4942  if (len < 0)
4943  {
4944  *target_errno = host_to_fileio_error (errno);
4945  return NULL;
4946  }
4947 
4948  ret = xmalloc (len + 1);
4949  memcpy (ret, buf, len);
4950  ret[len] = '\0';
4951  return ret;
4952 }
4953 
4954 /* Implementation of to_fileio_unlink. */
4955 
4956 static int
4958  struct inferior *inf, const char *filename,
4959  int *target_errno)
4960 {
4961  int ret;
4962 
4964  filename);
4965  if (ret == -1)
4966  *target_errno = host_to_fileio_error (errno);
4967 
4968  return ret;
4969 }
4970 
4971 void
4973 {
4974  /* Save the provided single-threaded target. We save this in a separate
4975  variable because another target we've inherited from (e.g. inf-ptrace)
4976  may have saved a pointer to T; we want to use it for the final
4977  process stratum target. */
4978  linux_ops_saved = *t;
4979  linux_ops = &linux_ops_saved;
4980 
4981  /* Override some methods for multithreading. */
4986  t->to_wait = linux_nat_wait;
4989  t->to_kill = linux_nat_kill;
5003 
5010 
5011  super_close = t->to_close;
5013 
5014  /* Methods for non-stop support. */
5015  t->to_stop = linux_nat_stop;
5016 
5018 
5021 
5023 
5028 
5029  /* We don't change the stratum; this target will sit at
5030  process_stratum and thread_db will set at thread_stratum. This
5031  is a little strange, since this is a multi-threaded-capable
5032  target, but we want to be on the stack below thread_db, and we
5033  also want to be used for single-threaded processes. */
5034 
5035  add_target (t);
5036 }
5037 
5038 /* Register a method to call whenever a new thread is attached. */
5039 void
5041  void (*new_thread) (struct lwp_info *))
5042 {
5043  /* Save the pointer. We only support a single registered instance
5044  of the GNU/Linux native target, so we do not need to map this to
5045  T. */
5047 }
5048 
5049 /* See declaration in linux-nat.h. */
5050 
5051 void
5053  linux_nat_new_fork_ftype *new_fork)
5054 {
5055  /* Save the pointer. */
5056  linux_nat_new_fork = new_fork;
5057 }
5058 
5059 /* See declaration in linux-nat.h. */
5060 
5061 void
5064 {
5065  /* Save the pointer. */
5066  linux_nat_forget_process_hook = fn;
5067 }
5068 
5069 /* See declaration in linux-nat.h. */
5070 
5071 void
5073 {
5074  if (linux_nat_forget_process_hook != NULL)
5076 }
5077 
5078 /* Register a method that converts a siginfo object between the layout
5079  that ptrace returns, and the layout in the architecture of the
5080  inferior. */
5081 void
5083  int (*siginfo_fixup) (siginfo_t *,
5084  gdb_byte *,
5085  int))
5086 {
5087  /* Save the pointer. */
5089 }
5090 
5091 /* Register a method to call prior to resuming a thread. */
5092 
5093 void
5095  void (*prepare_to_resume) (struct lwp_info *))
5096 {
5097  /* Save the pointer. */
5098  linux_nat_prepare_to_resume = prepare_to_resume;
5099 }
5100 
5101 /* See linux-nat.h. */
5102 
5103 int
5104 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
5105 {
5106  int pid;
5107 
5108  pid = ptid_get_lwp (ptid);
5109  if (pid == 0)
5110  pid = ptid_get_pid (ptid);
5111 
5112  errno = 0;
5113  ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5114  if (errno != 0)
5115  {
5116  memset (siginfo, 0, sizeof (*siginfo));
5117  return 0;
5118  }
5119  return 1;
5120 }
5121 
5122 /* See nat/linux-nat.h. */
5123 
5124 ptid_t
5126 {
5128  return inferior_ptid;
5129 }
5130 
5131 /* Provide a prototype to silence -Wmissing-prototypes. */
5133 
5134 void
5136 {
5138  &debug_linux_nat, _("\
5139 Set debugging of GNU/Linux lwp module."), _("\
5140 Show debugging of GNU/Linux lwp module."), _("\
5141 Enables printf debugging output."),
5142  NULL,
5145 
5146  add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
5148 Set debugging of GNU/Linux namespaces module."), _("\
5149 Show debugging of GNU/Linux namespaces module."), _("\
5150 Enables printf debugging output."),
5151  NULL,
5152  NULL,
5154 
5155  /* Save this mask as the default. */
5156  sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5157 
5158  /* Install a SIGCHLD handler. */
5159  sigchld_action.sa_handler = sigchld_handler;
5160  sigemptyset (&sigchld_action.sa_mask);
5161  sigchld_action.sa_flags = SA_RESTART;
5162 
5163  /* Make it the default. */
5164  sigaction (SIGCHLD, &sigchld_action, NULL);
5165 
5166  /* Make sure we don't block SIGCHLD during a sigsuspend. */
5167  sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5168  sigdelset (&suspend_mask, SIGCHLD);
5169 
5170  sigemptyset (&blocked_mask);
5171 }
5172 
5173 
5174 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5175  the GNU/Linux Threads library and therefore doesn't really belong
5176  here. */
5177 
5178 /* Read variable NAME in the target and return its value if found.
5179  Otherwise return zero. It is assumed that the type of the variable
5180  is `int'. */
5181 
5182 static int
5183 get_signo (const char *name)
5184 {
5185  struct bound_minimal_symbol ms;
5186  int signo;
5187 
5188  ms = lookup_minimal_symbol (name, NULL, NULL);
5189  if (ms.minsym == NULL)
5190  return 0;
5191 
5192  if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5193  sizeof (signo)) != 0)
5194  return 0;
5195 
5196  return signo;
5197 }
5198 
5199 /* Return the set of signals used by the threads library in *SET. */
5200 
5201 void
5203 {
5204  struct sigaction action;
5205  int restart, cancel;
5206 
5207  sigemptyset (&blocked_mask);
5208  sigemptyset (set);
5209 
5210  restart = get_signo ("__pthread_sig_restart");
5211  cancel = get_signo ("__pthread_sig_cancel");
5212 
5213  /* LinuxThreads normally uses the first two RT signals, but in some legacy
5214  cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5215  not provide any way for the debugger to query the signal numbers -
5216  fortunately they don't change! */
5217 
5218  if (restart == 0)
5219  restart = __SIGRTMIN;
5220 
5221  if (cancel == 0)
5222  cancel = __SIGRTMIN + 1;
5223 
5224  sigaddset (set, restart);
5225  sigaddset (set, cancel);
5226 
5227  /* The GNU/Linux Threads library makes terminating threads send a
5228  special "cancel" signal instead of SIGCHLD. Make sure we catch
5229  those (to prevent them from terminating GDB itself, which is
5230  likely to be their default action) and treat them the same way as
5231  SIGCHLD. */
5232 
5233  action.sa_handler = sigchld_handler;
5234  sigemptyset (&action.sa_mask);
5235  action.sa_flags = SA_RESTART;
5236  sigaction (cancel, &action, NULL);
5237 
5238  /* We block the "cancel" signal throughout this code ... */
5239  sigaddset (&blocked_mask, cancel);
5240  sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5241 
5242  /* ... except during a sigsuspend. */
5243  sigdelset (&suspend_mask, cancel);
5244 }
void get_last_target_status(ptid_t *ptidp, struct target_waitstatus *status)
Definition: infrun.c:3408
struct gdbarch * target_gdbarch(void)
Definition: gdbarch.c:5143
void linux_unstop_all_lwps(void)
Definition: linux-nat.c:2399
int step
Definition: linux-nat.h:74
#define target_can_async_p()
Definition: target.h:1748
int gdbarch_software_single_step_p(struct gdbarch *gdbarch)
Definition: gdbarch.c:3009
struct address_space *(* to_thread_address_space)(struct target_ops *, ptid_t) TARGET_DEFAULT_FUNC(default_thread_address_space)
Definition: target.h:854
#define IPA_CMD_BUF_SIZE
Definition: agent.h:35
struct arch_lwp_info * arch_private
Definition: linux-nat.h:107
void add_target(struct target_ops *t)
Definition: target.c:395
static int get_signo(const char *name)
Definition: linux-nat.c:5183
void target_terminal_ours(void)
Definition: target.c:491
ssize_t read(int fd, void *buf, size_t count)
Definition: expect-read1.c:26
#define PTRACE_GETEVENTMSG
Definition: linux-ptrace.h:59
void release_static_tracepoint_marker(struct static_tracepoint_marker *marker)
Definition: tracepoint.c:3941
static void save_sigtrap(struct lwp_info *lp)
Definition: linux-nat.c:2506
struct thread_info * add_thread(ptid_t ptid)
Definition: thread.c:305
struct thread_info * find_thread_ptid(ptid_t ptid)
Definition: thread.c:393
int(* to_supports_stopped_by_hw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:513
int catch_syscall_enabled(void)
int(* to_supports_disable_randomization)(struct target_ops *)
Definition: target.h:821
static sigset_t blocked_mask
Definition: linux-nat.c:711
static void delete_lwp_cleanup(void *lp_voidp)
Definition: linux-nat.c:439
int(* to_is_async_p)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:662
static void sigchld_handler(int signo)
Definition: linux-nat.c:4673
void target_stop(ptid_t ptid)
Definition: target.c:3289
#define WNOHANG
Definition: gdb_wait.h:102
int ptid_is_pid(ptid_t ptid)
Definition: ptid.c:86
void add_setshow_zuinteger_cmd(const char *name, enum command_class theclass, unsigned int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_sfunc_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:763
static enum target_xfer_status linux_proc_xfer_spu(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:4248
struct arch_lwp_info * lwp_arch_private_info(struct lwp_info *lwp)
Definition: linux-nat.c:312
static int linux_child_remove_fork_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:621
int ptid_equal(ptid_t ptid1, ptid_t ptid2)
Definition: ptid.c:76
bfd_vma CORE_ADDR
Definition: common-types.h:41
Definition: target.h:98
void lin_thread_get_thread_signals(sigset_t *set)
Definition: linux-nat.c:5202
struct lwp_info * next
Definition: linux-nat.h:110
static struct lwp_info * add_lwp(ptid_t ptid)
Definition: linux-nat.c:846
void linux_proc_pending_signals(int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
Definition: linux-nat.c:4353
struct lwp_info * lwp_list
Definition: linux-nat.c:700
static struct target_ops linux_ops_saved
Definition: linux-nat.c:174
struct regcache * get_thread_regcache(ptid_t ptid)
Definition: regcache.c:529
void linux_stop_lwp(struct lwp_info *lwp)
Definition: linux-nat.c:2378
void delete_thread(ptid_t)
Definition: thread.c:368
static void block_child_signals(sigset_t *prev_mask)
Definition: linux-nat.c:720
int linux_fork_checkpointing_p(int pid)
Definition: linux-fork.c:617
static int kill_wait_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:3801
void xfree(void *)
Definition: common-utils.c:97
static void linux_nat_create_inferior(struct target_ops *ops, char *exec_file, char *allargs, char **env, int from_tty)
Definition: linux-nat.c:1181
void set_running(ptid_t ptid, int running)
Definition: thread.c:772
#define SYSCALL_SIGTRAP
Definition: linux-nat.h:31
static int linux_nat_event_pipe[2]
Definition: linux-nat.c:225
static int linux_nat_resume_callback(struct lwp_info *lp, void *except)
Definition: linux-nat.c:1705
struct target_waitstatus pending_follow
Definition: gdbthread.h:258
#define PTRACE_EVENT_EXEC
Definition: linux-ptrace.h:74
static sigset_t normal_mask
Definition: linux-nat.c:704
#define TRAP_HWBKPT
struct gdbarch * get_regcache_arch(const struct regcache *regcache)
Definition: regcache.c:297
void linux_nat_set_new_thread(struct target_ops *t, void(*new_thread)(struct lwp_info *))
Definition: linux-nat.c:5040
#define BMSYMBOL_VALUE_ADDRESS(symbol)
Definition: symtab.h:393
int hardware_breakpoint_inserted_here_p(struct address_space *aspace, CORE_ADDR pc)
Definition: breakpoint.c:4302
void warning(const char *fmt,...)
Definition: errors.c:26
struct sigaction sigchld_action
Definition: linux-nat.c:714
static int resume_stopped_resumed_lwps(struct lwp_info *lp, void *data)
Definition: linux-nat.c:3652
int fileio_to_host_mode(int fileio_mode, mode_t *mode_p)
Definition: fileio.c:115
static void add_line_to_sigset(const char *line, sigset_t *sigs)
Definition: linux-nat.c:4312
char *(* to_pid_to_str)(struct target_ops *, ptid_t) TARGET_DEFAULT_FUNC(default_pid_to_str)
Definition: target.h:631
int input_fd
Definition: event-top.c:120
static void linux_nat_attach(struct target_ops *ops, const char *args, int from_tty)
Definition: linux-nat.c:1267
int linux_wstatus_maybe_breakpoint(int wstat)
Definition: linux-ptrace.c:603
int lin_lwp_attach_lwp(ptid_t ptid)
Definition: linux-nat.c:1054
int disable_randomization
Definition: infrun.c:151
tuple inf
Definition: arm-linux.py:13
static struct address_space * linux_nat_thread_address_space(struct target_ops *t, ptid_t ptid)
Definition: linux-nat.c:4836
ptid_t ptid
Definition: linux-nat.h:34
int gdb_pipe_cloexec(int filedes[2])
Definition: filestuff.c:381
static int linux_child_remove_vfork_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:633
int linux_is_extended_waitstatus(int wstat)
Definition: linux-ptrace.c:595
static int pull_pid_from_list(struct simple_pid_list **listp, int pid, int *statusp)
Definition: linux-nat.c:359
static void linux_nat_detach(struct target_ops *ops, const char *args, int from_tty)
Definition: linux-nat.c:1509
int thread_db_notice_clone(ptid_t parent, ptid_t child)
void linux_fork_killall(void)
Definition: linux-fork.c:323
static linux_nat_forget_process_ftype * linux_nat_forget_process_hook
Definition: linux-nat.c:184
static void add_to_pid_list(struct simple_pid_list **listp, int pid, int status)
Definition: linux-nat.c:337
int(* to_core_of_thread)(struct target_ops *, ptid_t ptid) TARGET_DEFAULT_RETURN(-1)
Definition: target.h:1042
static void linux_nat_mourn_inferior(struct target_ops *ops)
Definition: linux-nat.c:3903
int pid
Definition: inferior.h:299
int(* to_follow_fork)(struct target_ops *, int, int) TARGET_DEFAULT_FUNC(default_follow_fork)
Definition: target.h:596
int ptid_match(ptid_t ptid, ptid_t filter)
Definition: ptid.c:120
void internal_error(const char *file, int line, const char *fmt,...)
Definition: errors.c:50
struct thread_info * inferior_thread(void)
Definition: thread.c:85
static int linux_nat_has_pending_sigint(int pid)
Definition: linux-nat.c:2408
void(* to_close)(struct target_ops *)
Definition: target.h:448
void parse_static_tracepoint_marker_definition(char *line, char **pp, struct static_tracepoint_marker *marker)
Definition: tracepoint.c:3903
static int linux_nat_stopped_by_hw_breakpoint(struct target_ops *ops)
Definition: linux-nat.c:2903
enum target_stop_reason lwp_stop_reason(struct lwp_info *lwp)
Definition: linux-nat.c:328
#define linux_is_async_p()
Definition: linux-nat.c:228
#define __WCLONE
Definition: gdb_wait.h:110
static enum target_xfer_status linux_xfer_partial(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:4408
#define GDB_ARCH_TRAP_BRKPT
Definition: linux-ptrace.h:143
static enum target_xfer_status linux_xfer_siginfo(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:3945
void linux_fork_mourn_inferior(void)
Definition: linux-fork.c:355
static sigset_t pass_mask
Definition: linux-nat.c:739
void set_sigint_trap(void)
Definition: inflow.c:795
void child_terminal_inferior(struct target_ops *self)
Definition: inflow.c:287
#define VEC_safe_push(T, V, O)
Definition: vec.h:260
#define target_post_attach(pid)
Definition: target.h:1278
struct inferior * find_inferior_ptid(ptid_t ptid)
Definition: inferior.c:373
FILE * gdb_fopen_cloexec(const char *filename, const char *opentype)
Definition: filestuff.c:304
ptid_t(* to_wait)(struct target_ops *, ptid_t, struct target_waitstatus *, int TARGET_DEBUG_PRINTER(target_debug_print_options)) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:468
static void resume_lwp(struct lwp_info *lp, int step, enum gdb_signal signo)
Definition: linux-nat.c:1658
char * target_pid_to_str(ptid_t ptid)
Definition: target.c:2233
#define _(String)
Definition: gdb_locale.h:40
static int attach_proc_task_lwp_callback(ptid_t ptid)
Definition: linux-nat.c:1204
static int linux_nat_core_of_thread(struct target_ops *ops, ptid_t ptid)
Definition: linux-nat.c:4863
static int linux_nat_post_attach_wait(ptid_t ptid, int first, int *cloned, int *signalled)
Definition: linux-nat.c:978
enum gdb_signal stop_signal
Definition: gdbthread.h:159
struct target_ops * inf_ptrace_target(void)
Definition: inf-ptrace.c:668
static int set_ignore_sigint(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2424
static struct target_ops * linux_ops
Definition: linux-nat.c:173
void linux_nat_set_siginfo_fixup(struct target_ops *t, int(*siginfo_fixup)(siginfo_t *, gdb_byte *, int))
Definition: linux-nat.c:5082
static int resume_clear_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:1729
int linux_nat_get_siginfo(ptid_t ptid, siginfo_t *siginfo)
Definition: linux-nat.c:5104
static void linux_nat_terminal_inferior(struct target_ops *self)
Definition: linux-nat.c:4618
static void restore_personality(void *arg)
void regcache_write_pc(struct regcache *regcache, CORE_ADDR pc)
Definition: regcache.c:1201
static int select_singlestep_lwp_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2736
static void handle_target_event(int error, gdb_client_data client_data)
Definition: linux-nat.c:4692
void linux_nat_set_new_fork(struct target_ops *t, linux_nat_new_fork_ftype *new_fork)
Definition: linux-nat.c:5052
void child_terminal_ours(struct target_ops *self)
Definition: inflow.c:385
#define END_CATCH
struct target_ops * inf_ptrace_trad_target(CORE_ADDR(*register_u_offset)(struct gdbarch *, int, int))
Definition: inf-ptrace.c:827
static int running_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2711
void( linux_nat_new_fork_ftype)(struct lwp_info *parent, pid_t child_pid)
Definition: linux-nat.h:176
static int resumed_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2991
static int select_event_lwp_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2759
static target_xfer_partial_ftype * super_xfer_partial
Definition: linux-nat.c:198
PTRACE_TYPE_RET ptrace()
static void linux_nat_stop(struct target_ops *self, ptid_t ptid)
Definition: linux-nat.c:4807
void store_unsigned_integer(gdb_byte *, int, enum bfd_endian, ULONGEST)
Definition: findvar.c:212
ptid_t ptid_of_lwp(struct lwp_info *lwp)
Definition: linux-nat.c:295
int breakpoint_inserted_here_p(struct address_space *aspace, CORE_ADDR pc)
Definition: breakpoint.c:4256
const char * paddress(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: utils.c:2743
Definition: ptid.h:35
#define PTRACE_EVENT_VFORK_DONE
Definition: linux-ptrace.h:75
static void linux_nat_close(struct target_ops *self)
Definition: linux-nat.c:4816
void add_file_handler(int fd, handler_func *proc, gdb_client_data client_data)
Definition: event-loop.c:388
int(* to_remove_exec_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:600
int linux_multi_process
Definition: linux-nat.c:4593
void linux_ptrace_attach_fail_reason(pid_t pid, struct buffer *buffer)
Definition: linux-ptrace.c:36
ptid_t ptid_build(int pid, long lwp, long tid)
Definition: ptid.c:31
struct target_waitstatus waitstatus
Definition: linux-nat.h:94
int must_set_ptrace_flags
Definition: linux-nat.h:38
static int detach_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:1458
#define ALL_INFERIORS(I)
Definition: inferior.h:504
int debug_linux_namespaces
int fileio_to_host_openflags(int fileio_open_flags, int *open_flags_p)
Definition: fileio.c:81
mach_port_t kern_return_t mach_port_t msgports mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition: gnu-nat.c:1885
#define TRY
static int linux_nat_filesystem_is_local(struct target_ops *ops)
Definition: linux-nat.c:4875
static void linux_nat_pass_signals(struct target_ops *self, int numsigs, unsigned char *pass_signals)
Definition: linux-nat.c:743
static int linux_handle_extended_wait(struct lwp_info *lp, int status)
Definition: linux-nat.c:2003
#define PTRACE_EVENT_FORK
Definition: linux-ptrace.h:71
const char *const name
Definition: aarch64-tdep.c:68
int(* to_remove_fork_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:590
#define WSTOPSIG
Definition: gdb_wait.h:75
static struct lwp_info * linux_nat_filter_event(int lwpid, int status)
Definition: linux-nat.c:3053
struct cleanup * save_inferior_ptid(void)
Definition: infrun.c:7538
CORE_ADDR gdbarch_decr_pc_after_break(struct gdbarch *gdbarch)
Definition: gdbarch.c:2754
static void(* super_close)(struct target_ops *)
Definition: linux-nat.c:202
static char * linux_child_pid_to_exec_file(struct target_ops *self, int pid)
Definition: linux-nat.c:4134
static int linux_nat_fileio_unlink(struct target_ops *self, struct inferior *inf, const char *filename, int *target_errno)
Definition: linux-nat.c:4957
static void linux_resume_one_lwp_throw(struct lwp_info *lp, int step, enum gdb_signal signo)
Definition: linux-nat.c:1573
static int linux_child_remove_exec_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:645
void linux_ptrace_init_warnings(void)
Definition: linux-ptrace.c:573
static int linux_nat_fileio_open(struct target_ops *self, struct inferior *inf, const char *filename, int flags, int mode, int warn_if_slow, int *target_errno)
Definition: linux-nat.c:4905
#define CATCH(EXCEPTION, MASK)
char * target_options_to_string(int target_options)
Definition: target.c:3364
enum tribool have_ptrace_getregset
Definition: linux-nat.c:169
struct address_space * aspace
Definition: inferior.h:314
char *(* to_thread_name)(struct target_ops *, struct thread_info *) TARGET_DEFAULT_RETURN(NULL)
Definition: target.h:635
char * linux_ptrace_attach_fail_reason_string(ptid_t ptid, int err)
Definition: linux-ptrace.c:55
static void linux_nat_async(struct target_ops *ops, int enable)
Definition: linux-nat.c:4738
#define WTERMSIG(w)
Definition: gdb_wait.h:71
static void async_file_mark(void)
Definition: linux-nat.c:250
int linux_supports_traceclone(void)
Definition: linux-ptrace.c:546
static void cleanup_target_stop(void *arg)
Definition: linux-nat.c:4451
void inferior_event_handler(enum inferior_event_type event_type, gdb_client_data client_data)
Definition: inf-loop.c:40
struct target_ops current_target
int detach_breakpoints(ptid_t ptid)
Definition: breakpoint.c:3847
static void(* linux_nat_prepare_to_resume)(struct lwp_info *)
Definition: linux-nat.c:187
void initialize_file_ftype(void)
Definition: defs.h:281
struct fork_info * add_fork(pid_t pid)
Definition: linux-fork.c:69
int resumed
Definition: linux-nat.h:58
static enum target_xfer_status linux_nat_xfer_osdata(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:4393
int cloned
Definition: linux-nat.h:43
int linux_ns_same(pid_t pid, enum linux_ns_type type)
static int get_pending_status(struct lwp_info *lp, int *status)
Definition: linux-nat.c:1375
void linux_fork_detach(const char *args, int from_tty)
Definition: linux-fork.c:389
static int status_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2648
void fprintf_filtered(struct ui_file *stream, const char *format,...)
Definition: utils.c:2351
void free_current_marker(void *arg)
Definition: tracepoint.c:3884
mach_port_t mach_port_t name mach_port_t mach_port_t name error_t err
Definition: gnu-nat.c:1816
void ui_file_write_async_safe(struct ui_file *file, const char *buf, long length_buf)
Definition: ui-file.c:232
#define PTRACE_O_TRACEVFORKDONE
Definition: linux-ptrace.h:67
static void(* linux_nat_new_thread)(struct lwp_info *)
Definition: linux-nat.c:177
static void linux_init_ptrace(pid_t pid, int attached)
Definition: linux-nat.c:401
struct simple_pid_list * next
Definition: linux-nat.c:217
enum gdb_signal sig
Definition: waitstatus.h:108
#define WIFEXITED(w)
Definition: gdb_wait.h:44
static LONGEST spu_enumerate_spu_ids(int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
Definition: linux-nat.c:4196
#define PTRACE_O_TRACEVFORK
Definition: linux-ptrace.h:64
int(* to_remove_vfork_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:594
void target_resume(ptid_t ptid, int step, enum gdb_signal signal)
Definition: target.c:2245
void fprintf_unfiltered(struct ui_file *stream, const char *format,...)
Definition: utils.c:2361
mach_port_t mach_port_t name mach_port_t mach_port_t name error_t int status
Definition: gnu-nat.c:1816
int(* to_supports_stopped_by_sw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:500
void(* to_resume)(struct target_ops *, ptid_t, int TARGET_DEBUG_PRINTER(target_debug_print_step), enum gdb_signal) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:464
int linux_ptrace_get_extended_event(int wstat)
Definition: linux-ptrace.c:587
static int linux_nat_can_async_p(struct target_ops *ops)
Definition: linux-nat.c:4576
static int linux_async_pipe(int enable)
Definition: linux-nat.c:4700
enum resume_kind last_resume_kind
Definition: linux-nat.h:61
static int num_lwps(int pid)
Definition: linux-nat.c:424
static void linux_resume_one_lwp(struct lwp_info *lp, int step, enum gdb_signal signo)
Definition: linux-nat.c:1641
static char * linux_nat_fileio_readlink(struct target_ops *self, struct inferior *inf, const char *filename, int *target_errno)
Definition: linux-nat.c:4932
#define PTRACE_EVENT_CLONE
Definition: linux-ptrace.h:73
int status
Definition: linux-nat.h:64
#define TARGET_WNOHANG
Definition: wait.h:28
ptid_t pid_to_ptid(int pid)
Definition: ptid.c:44
int host_to_fileio_error(int error)
Definition: fileio.c:28
enum bfd_endian gdbarch_byte_order(struct gdbarch *gdbarch)
Definition: gdbarch.c:1420
int(* to_insert_exec_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:598
void linux_nat_set_prepare_to_resume(struct target_ops *t, void(*prepare_to_resume)(struct lwp_info *))
Definition: linux-nat.c:5094
struct cleanup * make_cleanup(make_cleanup_ftype *function, void *arg)
Definition: cleanups.c:117
void store_waitstatus(struct target_waitstatus *ourstatus, int hoststatus)
Definition: inf-child.c:51
int catching_syscall_number(int syscall_number)
union target_waitstatus::@161 value
ptid_t current_lwp_ptid(void)
Definition: linux-nat.c:5125
target_xfer_status
Definition: target.h:219
static pid_t linux_nat_fileio_pid_of(struct inferior *inf)
Definition: linux-nat.c:4894
static int(* linux_nat_status_is_event)(int status)
Definition: linux-nat.c:287
static void async_file_flush(void)
Definition: linux-nat.c:233
struct inferior * find_inferior_pid(int pid)
Definition: inferior.c:354
static void purge_lwp_list(int pid)
Definition: linux-nat.c:780
int gdb_signal_to_host(enum gdb_signal)
Definition: signals.c:631
enum gdb_signal gdb_signal_from_host(int)
Definition: signals.c:116
int linux_proc_pid_is_stopped(pid_t pid)
Definition: linux-procfs.c:149
char * linux_proc_pid_to_exec_file(int pid)
Definition: linux-procfs.c:261
static void maybe_clear_ignore_sigint(struct lwp_info *lp)
Definition: linux-nat.c:2444
static int linux_nat_supports_stopped_by_hw_breakpoint(struct target_ops *ops)
Definition: linux-nat.c:2915
#define enable()
Definition: ser-go32.c:239
Definition: gnu-nat.c:163
static sigset_t suspend_mask
Definition: linux-nat.c:708
enum target_xfer_status target_xfer_partial_ftype(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
void target_mourn_inferior(void)
Definition: target.c:2300
#define gdb_assert(expr)
Definition: gdb_assert.h:33
static VEC(static_tracepoint_marker_p)
Definition: linux-nat.c:4461
void thread_change_ptid(ptid_t old_ptid, ptid_t new_ptid)
Definition: thread.c:754
static int linux_child_follow_fork(struct target_ops *ops, int follow_child, int detach_fork)
Definition: linux-nat.c:451
int ignore_sigint
Definition: linux-nat.h:89
int linux_common_core_of_thread(ptid_t ptid)
Definition: linux-osdata.c:61
int(* to_insert_fork_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:588
#define target_is_async_p()
Definition: target.h:1751
static int startswith(const char *string, const char *pattern)
Definition: common-utils.h:75
void linux_nat_add_target(struct target_ops *t)
Definition: linux-nat.c:4972
struct cleanup * maybe_disable_address_space_randomization(int disable_randomization)
int stopped
Definition: linux-nat.h:50
int agent_run_command(int pid, const char *cmd, int len)
Definition: agent.c:189
static unsigned int debug_linux_nat
Definition: linux-nat.c:204
int gdbarch_addr_bit(struct gdbarch *gdbarch)
Definition: gdbarch.c:1707
const char * gdb_signal_to_name(enum gdb_signal)
Definition: signals.c:78
static int linux_nat_stopped_by_sw_breakpoint(struct target_ops *ops)
Definition: linux-nat.c:2882
static enum target_xfer_status linux_nat_xfer_partial(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:4000
void linux_stop_and_wait_all_lwps(void)
Definition: linux-nat.c:2386
enum target_xfer_status(* to_xfer_partial)(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) TARGET_DEFAULT_RETURN(TARGET_XFER_E_IO)
Definition: target.h:724
static int linux_nat_stopped_data_address(struct target_ops *ops, CORE_ADDR *addr_p)
Definition: linux-nat.c:2539
#define PTRACE_O_TRACEFORK
Definition: linux-ptrace.h:63
static int(* linux_nat_siginfo_fixup)(siginfo_t *, gdb_byte *, int)
Definition: linux-nat.c:192
static struct thread_info * new_thread(ptid_t ptid)
Definition: thread.c:221
#define WEXITSTATUS(w)
Definition: gdb_wait.h:67
void printf_unfiltered(const char *format,...)
Definition: utils.c:2399
struct cmd_list_element * setdebuglist
Definition: cli-cmds.c:173
int forks_exist_p(void)
Definition: linux-fork.c:61
static int stop_and_resume_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:3001
void(* to_detach)(struct target_ops *ops, const char *, int) TARGET_DEFAULT_IGNORE()
Definition: target.h:460
char * status_to_str(int status)
Definition: linux-waitpid.c:55
#define PTRACE_O_TRACEEXEC
Definition: linux-ptrace.h:66
void(* to_async)(struct target_ops *, int) TARGET_DEFAULT_NORETURN(tcomplain())
Definition: target.h:664
int software_breakpoint_inserted_here_p(struct address_space *aspace, CORE_ADDR pc)
Definition: breakpoint.c:4279
static ptid_t linux_nat_wait(struct target_ops *ops, ptid_t ptid, struct target_waitstatus *ourstatus, int target_options)
Definition: linux-nat.c:3720
int core
Definition: linux-nat.h:104
struct target_ops * linux_target(void)
Definition: linux-nat.c:4544
void * xmalloc(YYSIZE_T)
static int linux_child_insert_fork_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:615
struct ui_file * gdb_stdlog
Definition: main.c:73
static int sigtrap_is_event(int status)
Definition: linux-nat.c:2553
static void check_zombie_leaders(void)
Definition: linux-nat.c:3341
target_object
Definition: target.h:136
void * gdb_client_data
Definition: event-loop.h:70
struct inferior * vfork_child
Definition: inferior.h:350
char *(* to_pid_to_exec_file)(struct target_ops *, int pid) TARGET_DEFAULT_RETURN(NULL)
Definition: target.h:644
int linux_supports_tracefork(void)
Definition: linux-ptrace.c:535
int lwp_is_stopped(struct lwp_info *lwp)
Definition: linux-nat.c:320
char * normal_pid_to_str(ptid_t ptid)
Definition: target.c:3207
#define COMM_LEN
int attach_flag
Definition: inferior.h:340
Definition: value.c:172
tribool
Definition: linux-nat.h:119
int linux_mntns_unlink(pid_t pid, const char *filename)
CORE_ADDR stopped_data_address
Definition: linux-nat.h:86
static int linux_nat_ptrace_options(int attached)
Definition: linux-nat.c:379
LONGEST linux_common_xfer_osdata(const char *annex, gdb_byte *readbuf, ULONGEST offset, ULONGEST len)
static int linux_nat_supports_multi_process(struct target_ops *self)
Definition: linux-nat.c:4596
static int linux_nat_stop_lwp(struct lwp_info *lwp, void *data)
Definition: linux-nat.c:4763
int ptid_get_pid(ptid_t ptid)
Definition: ptid.c:52
#define PTRACE_GETSIGINFO
Definition: linux-ptrace.h:42
static ptid_t linux_nat_wait_1(struct target_ops *ops, ptid_t ptid, struct target_waitstatus *ourstatus, int target_options)
Definition: linux-nat.c:3403
int signal_pass_state(int signo)
Definition: infrun.c:6792
#define USE_SIGTRAP_SIGINFO
Definition: linux-ptrace.h:117
unsigned short selector
Definition: go32-nat.c:1064
#define WIFSTOPPED(w)
Definition: gdb_wait.h:62
int(* to_fileio_open)(struct target_ops *, struct inferior *inf, const char *filename, int flags, int mode, int warn_if_slow, int *target_errno)
Definition: target.h:872
static char * linux_nat_pid_to_str(struct target_ops *ops, ptid_t ptid)
Definition: linux-nat.c:4078
struct cleanup * make_cleanup_fclose(FILE *file)
Definition: utils.c:207
static void siginfo_fixup(siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
Definition: linux-nat.c:3926
static void linux_nat_kill(struct target_ops *ops)
Definition: linux-nat.c:3856
void throw_exception(struct gdb_exception exception)
#define buffer_grow_str0(BUFFER, STRING)
Definition: buffer.h:56
char * execd_pathname
Definition: waitstatus.h:112
const char const char int
Definition: command.h:229
bfd_byte gdb_byte
Definition: common-types.h:38
static enum target_xfer_status linux_proc_xfer_partial(struct target_ops *ops, enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
Definition: linux-nat.c:4145
void linux_nat_set_status_is_event(struct target_ops *t, int(*status_is_event)(int status))
Definition: linux-nat.c:2563
static int lwp_status_pending_p(struct lwp_info *lp)
Definition: linux-nat.c:2748
int linux_proc_pid_is_gone(pid_t pid)
Definition: linux-procfs.c:108
int non_stop
Definition: infrun.c:180
ptid_t null_ptid
Definition: ptid.c:25
void(* to_mourn_inferior)(struct target_ops *) TARGET_DEFAULT_FUNC(default_mourn_inferior)
Definition: target.h:607
void void void void void void void void void perror_with_name(const char *string) ATTRIBUTE_NORETURN
Definition: utils.c:979
void buffer_init(struct buffer *buffer)
Definition: buffer.c:60
struct simple_pid_list * stopped_pids
Definition: linux-nat.c:219
static void show_debug_linux_nat(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
Definition: linux-nat.c:206
static int linux_nat_supports_stopped_by_sw_breakpoint(struct target_ops *ops)
Definition: linux-nat.c:2894
#define SEEK_SET
Definition: defs.h:87
void prune_threads(void)
Definition: thread.c:626
static int linux_child_insert_vfork_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:627
void(* to_pass_signals)(struct target_ops *, int, unsigned char *TARGET_DEBUG_PRINTER(target_debug_print_signals)) TARGET_DEFAULT_IGNORE()
Definition: target.h:617
void(* to_stop)(struct target_ops *, ptid_t) TARGET_DEFAULT_IGNORE()
Definition: target.h:637
static int linux_nat_supports_non_stop(struct target_ops *self)
Definition: linux-nat.c:4585
ptid_t ptid
Definition: gdbthread.h:169
void registers_changed_ptid(ptid_t ptid)
Definition: regcache.c:586
int xsnprintf(char *str, size_t size, const char *format,...)
Definition: common-utils.c:134
static int linux_nat_is_async_p(struct target_ops *ops)
Definition: linux-nat.c:4568
int target_async_permitted
Definition: target.c:3778
static struct lwp_info * add_initial_lwp(ptid_t ptid)
Definition: linux-nat.c:819
enum target_waitkind kind
Definition: waitstatus.h:100
static int linux_nat_thread_alive(struct target_ops *ops, ptid_t ptid)
Definition: linux-nat.c:4055
void(* to_kill)(struct target_ops *) TARGET_DEFAULT_NORETURN(noprocess())
Definition: target.h:575
int target_read_memory(CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
Definition: target.c:1393
#define PTRACE_SETSIGINFO
Definition: linux-ptrace.h:43
static void linux_target_install_ops(struct target_ops *t)
Definition: linux-nat.c:4522
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition: regcache.c:1174
void linux_enable_event_reporting(pid_t pid, int options)
Definition: linux-ptrace.c:489
#define PTRACE_EVENT_VFORK
Definition: linux-ptrace.h:72
ptid_t inferior_ptid
Definition: infcmd.c:124
struct minimal_symbol * minsym
Definition: minsyms.h:32
#define O_LARGEFILE
Definition: linux-nat.c:165
static int check_ptrace_stopped_lwp_gone(struct lwp_info *lp)
Definition: linux-nat.c:1612
char * safe_strerror(int)
int(* to_stopped_by_sw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:497
void linux_disable_event_reporting(pid_t pid)
Definition: linux-ptrace.c:510
int linux_supports_tracevforkdone(void)
Definition: linux-ptrace.c:555
int offset
Definition: agent.c:65
#define PT_KILL
Definition: gdb_ptrace.h:84
void registers_changed(void)
Definition: regcache.c:624
void(* to_post_attach)(struct target_ops *, int) TARGET_DEFAULT_IGNORE()
Definition: target.h:458
Definition: buffer.h:23
int(* to_can_async_p)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:660
static void linux_nat_update_thread_list(struct target_ops *ops)
Definition: linux-nat.c:4064
static void linux_child_post_attach(struct target_ops *self, int pid)
Definition: linux-nat.c:410
void(* to_create_inferior)(struct target_ops *, char *, char *, char **, int)
Definition: target.h:584
int line
Definition: symtab.h:1570
static struct lwp_info * find_lwp_pid(ptid_t ptid)
Definition: linux-nat.c:890
struct thread_suspend_state suspend
Definition: gdbthread.h:202
int fake_pid_p
Definition: inferior.h:301
LONGEST gdbarch_get_syscall_number(struct gdbarch *gdbarch, ptid_t ptid)
Definition: gdbarch.c:4015
int parse_pid_to_attach(const char *args)
Definition: utils.c:3108
static int kill_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:3767
int(* to_stopped_by_watchpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:542
struct address_space * get_regcache_aspace(const struct regcache *regcache)
Definition: regcache.c:303
int linux_proc_pid_is_zombie(pid_t pid)
Definition: linux-procfs.c:183
void stdin_event_handler(int error, gdb_client_data client_data)
Definition: event-top.c:416
void ** data
Definition: gdbarch.c:139
static int linux_nat_stopped_by_watchpoint(struct target_ops *ops)
Definition: linux-nat.c:2529
static int linux_handle_syscall_trap(struct lwp_info *lp, int stopping)
Definition: linux-nat.c:1877
void(* to_terminal_ours)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:571
int gdb_open_cloexec(const char *filename, int flags, unsigned long mode)
Definition: filestuff.c:291
int(* to_stopped_by_hw_breakpoint)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:510
enum target_stop_reason stop_reason
Definition: linux-nat.h:78
struct inferior * current_inferior(void)
Definition: inferior.c:57
void(* to_terminal_inferior)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:567
#define FORMAT
static void linux_nat_resume(struct target_ops *ops, ptid_t ptid, int step, enum gdb_signal signo)
Definition: linux-nat.c:1745
static void restore_child_signals_mask(sigset_t *prev_mask)
Definition: linux-nat.c:733
void delete_exited_threads(void)
Definition: thread.c:640
int syscall_state
Definition: linux-nat.h:101
unsigned long long ULONGEST
Definition: common-types.h:53
static void linux_nat_terminal_ours(struct target_ops *self)
Definition: linux-nat.c:4652
static int kill_lwp(int lwpid, int signo)
Definition: linux-nat.c:1845
initialize_file_ftype _initialize_linux_nat
static void lwp_free(struct lwp_info *lp)
Definition: linux-nat.c:771
int my_waitpid(int pid, int *status, int flags)
Definition: linux-waitpid.c:81
void clear_sigint_trap(void)
Definition: inflow.c:810
#define PTRACE_O_TRACESYSGOOD
Definition: linux-ptrace.h:62
int linux_mntns_open_cloexec(pid_t pid, const char *filename, int flags, mode_t mode)
static void delete_lwp(ptid_t ptid)
Definition: linux-nat.c:865
#define WIFSIGNALED(w)
Definition: gdb_wait.h:48
int(* to_supports_non_stop)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:668
long ptid_get_lwp(ptid_t ptid)
Definition: ptid.c:60
const char * gdb_signal_to_string(enum gdb_signal)
Definition: signals.c:68
void linux_nat_switch_fork(ptid_t new_ptid)
Definition: linux-nat.c:936
int signalled
Definition: linux-nat.h:47
static int check_stopped_by_watchpoint(struct lwp_info *lp)
Definition: linux-nat.c:2475
void lwp_set_arch_private_info(struct lwp_info *lwp, struct arch_lwp_info *info)
Definition: linux-nat.c:303
struct cmd_list_element * showdebuglist
Definition: cli-cmds.c:175
int(* to_fileio_unlink)(struct target_ops *, struct inferior *inf, const char *filename, int *target_errno)
Definition: target.h:905
#define PTRACE_TYPE_ARG3
Definition: config.h:658
void linux_proc_attach_tgid_threads(pid_t pid, linux_proc_attach_lwp_func attach_lwp)
Definition: linux-procfs.c:191
static void exit_lwp(struct lwp_info *lp)
Definition: linux-nat.c:959
static int wait_lwp(struct lwp_info *lp)
Definition: linux-nat.c:2186
ssize_t linux_mntns_readlink(pid_t pid, const char *filename, char *buf, size_t bufsiz)
resume_kind
Definition: resume.h:25
struct fork_info * find_fork_pid(pid_t pid)
Definition: linux-fork.c:180
static int check_stopped_by_breakpoint(struct lwp_info *lp)
Definition: linux-nat.c:2777
static int linux_thread_alive(ptid_t ptid)
Definition: linux-nat.c:4031
static int linux_child_set_syscall_catchpoint(struct target_ops *self, int pid, int needed, int any_count, int table_size, int *table)
Definition: linux-nat.c:651
int(* to_insert_vfork_catchpoint)(struct target_ops *, int) TARGET_DEFAULT_RETURN(1)
Definition: target.h:592
int to_has_thread_control
Definition: target.h:656
int(* to_thread_alive)(struct target_ops *, ptid_t ptid) TARGET_DEFAULT_RETURN(0)
Definition: target.h:627
#define HOST_CHAR_BIT
Definition: host-defs.h:40
static int stop_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2346
static int linux_nat_supports_disable_randomization(struct target_ops *self)
Definition: linux-nat.c:4602
void * arg
Definition: cleanups.c:43
void linux_nat_set_forget_process(struct target_ops *t, linux_nat_forget_process_ftype *fn)
Definition: linux-nat.c:5062
void(* to_attach)(struct target_ops *ops, const char *, int)
Definition: target.h:457
int stop_requested
Definition: gdbthread.h:261
static int detach_fork
Definition: infrun.c:130
static linux_nat_new_fork_ftype * linux_nat_new_fork
Definition: linux-nat.c:180
int(* to_stopped_data_address)(struct target_ops *, CORE_ADDR *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:546
static int in_pid_list_p(struct simple_pid_list *list, int pid)
Definition: linux-nat.c:348
static int linux_child_insert_exec_catchpoint(struct target_ops *self, int pid)
Definition: linux-nat.c:639
int(* to_set_syscall_catchpoint)(struct target_ops *, int, int, int, int, int *) TARGET_DEFAULT_RETURN(1)
Definition: target.h:602
static void select_event_lwp(ptid_t filter, struct lwp_info **orig_lp, int *status)
Definition: linux-nat.c:2923
void(* to_update_thread_list)(struct target_ops *) TARGET_DEFAULT_IGNORE()
Definition: target.h:629
#define target_async(ENABLE)
Definition: target.h:1754
CORE_ADDR stop_pc
Definition: linux-nat.h:71
struct lwp_info * iterate_over_lwps(ptid_t filter, iterate_over_lwps_ftype callback, void *data)
Definition: linux-nat.c:910
int stopped_data_address_p
Definition: linux-nat.h:85
struct bound_minimal_symbol lookup_minimal_symbol(const char *name, const char *sfile, struct objfile *objf)
Definition: minsyms.c:163
#define target_thread_architecture(ptid)
Definition: target.h:1797
#define SPUFS_MAGIC
Definition: linux-nat.c:73
char * buffer_finish(struct buffer *buffer)
Definition: buffer.c:66
#define PTRACE_O_EXITKILL
Definition: linux-ptrace.h:82
void set_executing(ptid_t ptid, int executing)
Definition: thread.c:850
target_stop_reason
Definition: waitstatus.h:121
static int count_events_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2720
int ptid_lwp_p(ptid_t ptid)
Definition: ptid.c:98
void error(const char *fmt,...)
Definition: errors.c:38
void delete_file_handler(int fd)
Definition: event-loop.c:539
int( iterate_over_lwps_ftype)(struct lwp_info *lwp, void *arg)
Definition: linux-nat.h:41
int linux_supports_tracesysgood(void)
Definition: linux-ptrace.c:564
char *(* to_fileio_readlink)(struct target_ops *, struct inferior *inf, const char *filename, int *target_errno)
Definition: target.h:915
ptid_t minus_one_ptid
Definition: ptid.c:26
static char * linux_nat_thread_name(struct target_ops *self, struct thread_info *thr)
Definition: linux-nat.c:4094
mach_port_t mach_port_t name mach_port_t mach_port_t name error_t int int rusage_t pid_t pid
Definition: gnu-nat.c:1818
static int stop_wait_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:2572
int print_thread_events
Definition: thread.c:1653
void throw_error(enum errors error, const char *fmt,...)
long long LONGEST
Definition: common-types.h:52
void( linux_nat_forget_process_ftype)(pid_t pid)
Definition: linux-nat.h:183
void do_cleanups(struct cleanup *old_chain)
Definition: cleanups.c:175
void add_setshow_boolean_cmd(const char *name, enum command_class theclass, int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_sfunc_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:541
int is_executing(ptid_t ptid)
Definition: thread.c:840
void linux_nat_forget_process(pid_t pid)
Definition: linux-nat.c:5072
int(* to_filesystem_is_local)(struct target_ops *) TARGET_DEFAULT_RETURN(1)
Definition: target.h:862
target_xfer_partial_ftype memory_xfer_auxv
int(* to_supports_multi_process)(struct target_ops *) TARGET_DEFAULT_RETURN(0)
Definition: target.h:812
void(* to_post_startup_inferior)(struct target_ops *, ptid_t) TARGET_DEFAULT_IGNORE()
Definition: target.h:586
struct target_ops * linux_trad_target(CORE_ADDR(*register_u_offset)(struct gdbarch *, int, int))
Definition: linux-nat.c:4555
const ULONGEST const LONGEST len
Definition: target.h:309
static int resume_set_callback(struct lwp_info *lp, void *data)
Definition: linux-nat.c:1737
int linux_proc_pid_is_trace_stopped_nowarn(pid_t pid)
Definition: linux-procfs.c:158
static void linux_child_post_startup_inferior(struct target_ops *self, ptid_t ptid)
Definition: linux-nat.c:416
void check_for_thread_db(void)