aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h101
1 files changed, 77 insertions, 24 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 29eae73c951d..75e6e60bf583 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,7 +100,7 @@ struct robust_list_head;
100struct bio; 100struct bio;
101struct fs_struct; 101struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_counter_context; 103struct perf_event_context;
104 104
105/* 105/*
106 * List of flags we want to share for kernel threads, 106 * List of flags we want to share for kernel threads,
@@ -140,6 +140,10 @@ extern int nr_processes(void);
140extern unsigned long nr_running(void); 140extern unsigned long nr_running(void);
141extern unsigned long nr_uninterruptible(void); 141extern unsigned long nr_uninterruptible(void);
142extern unsigned long nr_iowait(void); 142extern unsigned long nr_iowait(void);
143extern unsigned long nr_iowait_cpu(void);
144extern unsigned long this_cpu_load(void);
145
146
143extern void calc_global_load(void); 147extern void calc_global_load(void);
144extern u64 cpu_nr_migrations(int cpu); 148extern u64 cpu_nr_migrations(int cpu);
145 149
@@ -190,6 +194,7 @@ extern unsigned long long time_sync_thresh;
190/* in tsk->state again */ 194/* in tsk->state again */
191#define TASK_DEAD 64 195#define TASK_DEAD 64
192#define TASK_WAKEKILL 128 196#define TASK_WAKEKILL 128
197#define TASK_WAKING 256
193 198
194/* Convenience macros for the sake of set_task_state */ 199/* Convenience macros for the sake of set_task_state */
195#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 200#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -256,7 +261,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
256extern void init_idle(struct task_struct *idle, int cpu); 261extern void init_idle(struct task_struct *idle, int cpu);
257extern void init_idle_bootup_task(struct task_struct *idle); 262extern void init_idle_bootup_task(struct task_struct *idle);
258 263
259extern int runqueue_is_locked(void); 264extern int runqueue_is_locked(int cpu);
260extern void task_rq_unlock_wait(struct task_struct *p); 265extern void task_rq_unlock_wait(struct task_struct *p);
261 266
262extern cpumask_var_t nohz_cpu_mask; 267extern cpumask_var_t nohz_cpu_mask;
@@ -304,7 +309,7 @@ extern void softlockup_tick(void);
304extern void touch_softlockup_watchdog(void); 309extern void touch_softlockup_watchdog(void);
305extern void touch_all_softlockup_watchdogs(void); 310extern void touch_all_softlockup_watchdogs(void);
306extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 311extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
307 struct file *filp, void __user *buffer, 312 void __user *buffer,
308 size_t *lenp, loff_t *ppos); 313 size_t *lenp, loff_t *ppos);
309extern unsigned int softlockup_panic; 314extern unsigned int softlockup_panic;
310extern int softlockup_thresh; 315extern int softlockup_thresh;
@@ -326,7 +331,7 @@ extern unsigned long sysctl_hung_task_check_count;
326extern unsigned long sysctl_hung_task_timeout_secs; 331extern unsigned long sysctl_hung_task_timeout_secs;
327extern unsigned long sysctl_hung_task_warnings; 332extern unsigned long sysctl_hung_task_warnings;
328extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 333extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
329 struct file *filp, void __user *buffer, 334 void __user *buffer,
330 size_t *lenp, loff_t *ppos); 335 size_t *lenp, loff_t *ppos);
331#endif 336#endif
332 337
@@ -421,6 +426,15 @@ static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
421 return max(mm->hiwater_rss, get_mm_rss(mm)); 426 return max(mm->hiwater_rss, get_mm_rss(mm));
422} 427}
423 428
429static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
430 struct mm_struct *mm)
431{
432 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
433
434 if (*maxrss < hiwater_rss)
435 *maxrss = hiwater_rss;
436}
437
424static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 438static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
425{ 439{
426 return max(mm->hiwater_vm, mm->total_vm); 440 return max(mm->hiwater_vm, mm->total_vm);
@@ -433,7 +447,9 @@ extern int get_dumpable(struct mm_struct *mm);
433/* dumpable bits */ 447/* dumpable bits */
434#define MMF_DUMPABLE 0 /* core dump is permitted */ 448#define MMF_DUMPABLE 0 /* core dump is permitted */
435#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ 449#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
450
436#define MMF_DUMPABLE_BITS 2 451#define MMF_DUMPABLE_BITS 2
452#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
437 453
438/* coredump filter bits */ 454/* coredump filter bits */
439#define MMF_DUMP_ANON_PRIVATE 2 455#define MMF_DUMP_ANON_PRIVATE 2
@@ -443,6 +459,7 @@ extern int get_dumpable(struct mm_struct *mm);
443#define MMF_DUMP_ELF_HEADERS 6 459#define MMF_DUMP_ELF_HEADERS 6
444#define MMF_DUMP_HUGETLB_PRIVATE 7 460#define MMF_DUMP_HUGETLB_PRIVATE 7
445#define MMF_DUMP_HUGETLB_SHARED 8 461#define MMF_DUMP_HUGETLB_SHARED 8
462
446#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 463#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
447#define MMF_DUMP_FILTER_BITS 7 464#define MMF_DUMP_FILTER_BITS 7
448#define MMF_DUMP_FILTER_MASK \ 465#define MMF_DUMP_FILTER_MASK \
@@ -456,6 +473,10 @@ extern int get_dumpable(struct mm_struct *mm);
456#else 473#else
457# define MMF_DUMP_MASK_DEFAULT_ELF 0 474# define MMF_DUMP_MASK_DEFAULT_ELF 0
458#endif 475#endif
476 /* leave room for more dump flags */
477#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
478
479#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
459 480
460struct sighand_struct { 481struct sighand_struct {
461 atomic_t count; 482 atomic_t count;
@@ -472,6 +493,13 @@ struct pacct_struct {
472 unsigned long ac_minflt, ac_majflt; 493 unsigned long ac_minflt, ac_majflt;
473}; 494};
474 495
496struct cpu_itimer {
497 cputime_t expires;
498 cputime_t incr;
499 u32 error;
500 u32 incr_error;
501};
502
475/** 503/**
476 * struct task_cputime - collected CPU time counts 504 * struct task_cputime - collected CPU time counts
477 * @utime: time spent in user mode, in &cputime_t units 505 * @utime: time spent in user mode, in &cputime_t units
@@ -566,9 +594,12 @@ struct signal_struct {
566 struct pid *leader_pid; 594 struct pid *leader_pid;
567 ktime_t it_real_incr; 595 ktime_t it_real_incr;
568 596
569 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 597 /*
570 cputime_t it_prof_expires, it_virt_expires; 598 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
571 cputime_t it_prof_incr, it_virt_incr; 599 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
600 * values are defined to 0 and 1 respectively
601 */
602 struct cpu_itimer it[2];
572 603
573 /* 604 /*
574 * Thread group totals for process CPU timers. 605 * Thread group totals for process CPU timers.
@@ -600,6 +631,7 @@ struct signal_struct {
600 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 631 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
601 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 632 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
602 unsigned long inblock, oublock, cinblock, coublock; 633 unsigned long inblock, oublock, cinblock, coublock;
634 unsigned long maxrss, cmaxrss;
603 struct task_io_accounting ioac; 635 struct task_io_accounting ioac;
604 636
605 /* 637 /*
@@ -631,6 +663,8 @@ struct signal_struct {
631 unsigned audit_tty; 663 unsigned audit_tty;
632 struct tty_audit_buf *tty_audit_buf; 664 struct tty_audit_buf *tty_audit_buf;
633#endif 665#endif
666
667 int oom_adj; /* OOM kill score adjustment (bit shift) */
634}; 668};
635 669
636/* Context switch must be unlocked if interrupts are to be enabled */ 670/* Context switch must be unlocked if interrupts are to be enabled */
@@ -700,7 +734,7 @@ struct user_struct {
700#endif 734#endif
701#endif 735#endif
702 736
703#ifdef CONFIG_PERF_COUNTERS 737#ifdef CONFIG_PERF_EVENTS
704 atomic_long_t locked_vm; 738 atomic_long_t locked_vm;
705#endif 739#endif
706}; 740};
@@ -802,14 +836,14 @@ enum cpu_idle_type {
802#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ 836#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
803#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ 837#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
804#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 838#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
805#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */ 839#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
806#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 840#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
807#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */ 841#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
808#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 842#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
809#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ 843#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
810#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 844#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
811#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 845#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
812#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ 846
813#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 847#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
814 848
815enum powersavings_balance_level { 849enum powersavings_balance_level {
@@ -991,6 +1025,9 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
991 return 0; 1025 return 0;
992} 1026}
993 1027
1028unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1029unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1030
994#else /* CONFIG_SMP */ 1031#else /* CONFIG_SMP */
995 1032
996struct sched_domain_attr; 1033struct sched_domain_attr;
@@ -1002,6 +1039,7 @@ partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1002} 1039}
1003#endif /* !CONFIG_SMP */ 1040#endif /* !CONFIG_SMP */
1004 1041
1042
1005struct io_context; /* See blkdev.h */ 1043struct io_context; /* See blkdev.h */
1006 1044
1007 1045
@@ -1019,6 +1057,12 @@ struct uts_namespace;
1019struct rq; 1057struct rq;
1020struct sched_domain; 1058struct sched_domain;
1021 1059
1060/*
1061 * wake flags
1062 */
1063#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1064#define WF_FORK 0x02 /* child wakeup after fork */
1065
1022struct sched_class { 1066struct sched_class {
1023 const struct sched_class *next; 1067 const struct sched_class *next;
1024 1068
@@ -1026,13 +1070,13 @@ struct sched_class {
1026 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1070 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1027 void (*yield_task) (struct rq *rq); 1071 void (*yield_task) (struct rq *rq);
1028 1072
1029 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 1073 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1030 1074
1031 struct task_struct * (*pick_next_task) (struct rq *rq); 1075 struct task_struct * (*pick_next_task) (struct rq *rq);
1032 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1076 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1033 1077
1034#ifdef CONFIG_SMP 1078#ifdef CONFIG_SMP
1035 int (*select_task_rq)(struct task_struct *p, int sync); 1079 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1036 1080
1037 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 1081 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1038 struct rq *busiest, unsigned long max_load_move, 1082 struct rq *busiest, unsigned long max_load_move,
@@ -1064,6 +1108,8 @@ struct sched_class {
1064 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1108 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1065 int oldprio, int running); 1109 int oldprio, int running);
1066 1110
1111 unsigned int (*get_rr_interval) (struct task_struct *task);
1112
1067#ifdef CONFIG_FAIR_GROUP_SCHED 1113#ifdef CONFIG_FAIR_GROUP_SCHED
1068 void (*moved_group) (struct task_struct *p); 1114 void (*moved_group) (struct task_struct *p);
1069#endif 1115#endif
@@ -1102,6 +1148,8 @@ struct sched_entity {
1102 u64 start_runtime; 1148 u64 start_runtime;
1103 u64 avg_wakeup; 1149 u64 avg_wakeup;
1104 1150
1151 u64 avg_running;
1152
1105#ifdef CONFIG_SCHEDSTATS 1153#ifdef CONFIG_SCHEDSTATS
1106 u64 wait_start; 1154 u64 wait_start;
1107 u64 wait_max; 1155 u64 wait_max;
@@ -1199,7 +1247,6 @@ struct task_struct {
1199 * a short time 1247 * a short time
1200 */ 1248 */
1201 unsigned char fpu_counter; 1249 unsigned char fpu_counter;
1202 s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
1203#ifdef CONFIG_BLK_DEV_IO_TRACE 1250#ifdef CONFIG_BLK_DEV_IO_TRACE
1204 unsigned int btrace_seq; 1251 unsigned int btrace_seq;
1205#endif 1252#endif
@@ -1224,7 +1271,6 @@ struct task_struct {
1224 struct mm_struct *mm, *active_mm; 1271 struct mm_struct *mm, *active_mm;
1225 1272
1226/* task state */ 1273/* task state */
1227 struct linux_binfmt *binfmt;
1228 int exit_state; 1274 int exit_state;
1229 int exit_code, exit_signal; 1275 int exit_code, exit_signal;
1230 int pdeath_signal; /* The signal sent when the parent dies */ 1276 int pdeath_signal; /* The signal sent when the parent dies */
@@ -1436,10 +1482,10 @@ struct task_struct {
1436 struct list_head pi_state_list; 1482 struct list_head pi_state_list;
1437 struct futex_pi_state *pi_state_cache; 1483 struct futex_pi_state *pi_state_cache;
1438#endif 1484#endif
1439#ifdef CONFIG_PERF_COUNTERS 1485#ifdef CONFIG_PERF_EVENTS
1440 struct perf_counter_context *perf_counter_ctxp; 1486 struct perf_event_context *perf_event_ctxp;
1441 struct mutex perf_counter_mutex; 1487 struct mutex perf_event_mutex;
1442 struct list_head perf_counter_list; 1488 struct list_head perf_event_list;
1443#endif 1489#endif
1444#ifdef CONFIG_NUMA 1490#ifdef CONFIG_NUMA
1445 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1491 struct mempolicy *mempolicy; /* Protected by alloc_lock */
@@ -1492,6 +1538,7 @@ struct task_struct {
1492 /* bitmask of trace recursion */ 1538 /* bitmask of trace recursion */
1493 unsigned long trace_recursion; 1539 unsigned long trace_recursion;
1494#endif /* CONFIG_TRACING */ 1540#endif /* CONFIG_TRACING */
1541 unsigned long stack_start;
1495}; 1542};
1496 1543
1497/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1544/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1699,7 +1746,7 @@ extern cputime_t task_gtime(struct task_struct *p);
1699#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1746#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1700#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1747#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1701#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1748#define PF_KSWAPD 0x00040000 /* I am kswapd */
1702#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ 1749#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
1703#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1750#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1704#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1751#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1705#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1752#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -1742,7 +1789,6 @@ extern cputime_t task_gtime(struct task_struct *p);
1742 1789
1743#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1790#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1744#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ 1791#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1745#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1746 1792
1747static inline void rcu_copy_process(struct task_struct *p) 1793static inline void rcu_copy_process(struct task_struct *p)
1748{ 1794{
@@ -1772,10 +1818,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
1772 return 0; 1818 return 0;
1773} 1819}
1774#endif 1820#endif
1821
1822#ifndef CONFIG_CPUMASK_OFFSTACK
1775static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) 1823static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1776{ 1824{
1777 return set_cpus_allowed_ptr(p, &new_mask); 1825 return set_cpus_allowed_ptr(p, &new_mask);
1778} 1826}
1827#endif
1779 1828
1780/* 1829/*
1781 * Architectures can set this to 1 if they have specified 1830 * Architectures can set this to 1 if they have specified
@@ -1858,7 +1907,7 @@ extern unsigned int sysctl_sched_time_avg;
1858extern unsigned int sysctl_timer_migration; 1907extern unsigned int sysctl_timer_migration;
1859 1908
1860int sched_nr_latency_handler(struct ctl_table *table, int write, 1909int sched_nr_latency_handler(struct ctl_table *table, int write,
1861 struct file *file, void __user *buffer, size_t *length, 1910 void __user *buffer, size_t *length,
1862 loff_t *ppos); 1911 loff_t *ppos);
1863#endif 1912#endif
1864#ifdef CONFIG_SCHED_DEBUG 1913#ifdef CONFIG_SCHED_DEBUG
@@ -1876,7 +1925,7 @@ extern unsigned int sysctl_sched_rt_period;
1876extern int sysctl_sched_rt_runtime; 1925extern int sysctl_sched_rt_runtime;
1877 1926
1878int sched_rt_handler(struct ctl_table *table, int write, 1927int sched_rt_handler(struct ctl_table *table, int write,
1879 struct file *filp, void __user *buffer, size_t *lenp, 1928 void __user *buffer, size_t *lenp,
1880 loff_t *ppos); 1929 loff_t *ppos);
1881 1930
1882extern unsigned int sysctl_sched_compat_yield; 1931extern unsigned int sysctl_sched_compat_yield;
@@ -2011,6 +2060,7 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
2011extern int kill_pid(struct pid *pid, int sig, int priv); 2060extern int kill_pid(struct pid *pid, int sig, int priv);
2012extern int kill_proc_info(int, struct siginfo *, pid_t); 2061extern int kill_proc_info(int, struct siginfo *, pid_t);
2013extern int do_notify_parent(struct task_struct *, int); 2062extern int do_notify_parent(struct task_struct *, int);
2063extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2014extern void force_sig(int, struct task_struct *); 2064extern void force_sig(int, struct task_struct *);
2015extern void force_sig_specific(int, struct task_struct *); 2065extern void force_sig_specific(int, struct task_struct *);
2016extern int send_sig(int, struct task_struct *, int); 2066extern int send_sig(int, struct task_struct *, int);
@@ -2288,7 +2338,10 @@ static inline int signal_pending(struct task_struct *p)
2288 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 2338 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2289} 2339}
2290 2340
2291extern int __fatal_signal_pending(struct task_struct *p); 2341static inline int __fatal_signal_pending(struct task_struct *p)
2342{
2343 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2344}
2292 2345
2293static inline int fatal_signal_pending(struct task_struct *p) 2346static inline int fatal_signal_pending(struct task_struct *p)
2294{ 2347{