aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h209
1 files changed, 189 insertions, 20 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1f5fa53b46b1..a88462a32617 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -102,6 +102,23 @@ struct fs_struct;
102struct bts_context; 102struct bts_context;
103struct perf_event_context; 103struct perf_event_context;
104 104
105#ifdef CONFIG_PREEMPT
106extern int kernel_preemption;
107#else
108# define kernel_preemption 0
109#endif
110#ifdef CONFIG_PREEMPT_VOLUNTARY
111extern int voluntary_preemption;
112#else
113# define voluntary_preemption 0
114#endif
115
116#ifdef CONFIG_PREEMPT_SOFTIRQS
117extern int softirq_preemption;
118#else
119# define softirq_preemption 0
120#endif
121
105/* 122/*
106 * List of flags we want to share for kernel threads, 123 * List of flags we want to share for kernel threads,
107 * if only because they are not used by them anyway. 124 * if only because they are not used by them anyway.
@@ -170,9 +187,13 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
170} 187}
171#endif 188#endif
172 189
190extern struct mutex kernel_sem;
191
173/* 192/*
174 * Task state bitmask. NOTE! These bits are also 193 * Task state bitmask. NOTE! These bits are also
175 * encoded in fs/proc/array.c: get_task_state(). 194 * used in fs/proc/array.c: get_task_state() and
195 * in include/trace/events/sched.h in the
196 * sched_switch trace event.
176 * 197 *
177 * We have two separate sets of flags: task->state 198 * We have two separate sets of flags: task->state
178 * is about runnability, while task->exit_state are 199 * is about runnability, while task->exit_state are
@@ -181,20 +202,59 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
181 * mistake. 202 * mistake.
182 */ 203 */
183#define TASK_RUNNING 0 204#define TASK_RUNNING 0
184#define TASK_INTERRUPTIBLE 1 205#define TASK_STATE_0 "R"
185#define TASK_UNINTERRUPTIBLE 2 206#define DESCR_TASK_STATE_0 "running"
186#define __TASK_STOPPED 4 207
187#define __TASK_TRACED 8 208#define TASK_RUNNING_MUTEX 1
209#define TASK_STATE_1 "M"
210#define DESCR_TASK_STATE_1 "running-mutex"
211
212#define TASK_INTERRUPTIBLE 2
213#define TASK_STATE_2 "S"
214#define DESCR_TASK_STATE_2 "sleeping"
215
216#define TASK_UNINTERRUPTIBLE 4
217#define TASK_STATE_4 "D"
218#define DESCR_TASK_STATE_4 "disk sleep"
219
220#define __TASK_STOPPED 8
221#define TASK_STATE_8 "T"
222#define DESCR_TASK_STATE_8 "stopped"
223
224#define __TASK_TRACED 16
225#define TASK_STATE_16 "t"
226#define DESCR_TASK_STATE_16 "tracing stop"
227
188/* in tsk->exit_state */ 228/* in tsk->exit_state */
189#define EXIT_ZOMBIE 16 229#define EXIT_ZOMBIE 32
190#define EXIT_DEAD 32 230#define TASK_STATE_32 "Z"
231#define DESCR_TASK_STATE_32 "zombie"
232
233#define EXIT_DEAD 64
234#define TASK_STATE_64 "X"
235#define DESCR_TASK_STATE_64 "dead"
236
191/* in tsk->state again */ 237/* in tsk->state again */
192#define TASK_DEAD 64 238#define TASK_DEAD 128
193#define TASK_WAKEKILL 128 239#define TASK_STATE_128 "x"
194#define TASK_WAKING 256 240#define DESCR_TASK_STATE_128 "dead"
195#define TASK_STATE_MAX 512 241
242#define TASK_WAKEKILL 256
243#define TASK_STATE_256 "K"
244#define DESCR_TASK_STATE_256 "wakekill"
196 245
197#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" 246#define TASK_WAKING 512
247#define TASK_STATE_512 "W"
248#define DESCR_TASK_STATE_512 "waking"
249
250#define TASK_STATE_MAX 1024
251
252#define TASK_STATE_TO_CHAR_STR \
253 TASK_STATE_0 TASK_STATE_1 TASK_STATE_2 TASK_STATE_4 TASK_STATE_8 \
254 TASK_STATE_16 TASK_STATE_32 TASK_STATE_64 TASK_STATE_128 TASK_STATE_256 \
255 TASK_STATE_512
256
257#define TASK_STATE_MAX 1024
198 258
199extern char ___assert_task_state[1 - 2*!!( 259extern char ___assert_task_state[1 - 2*!!(
200 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 260 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -209,7 +269,8 @@ extern char ___assert_task_state[1 - 2*!!(
209#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 269#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
210 270
211/* get_task_state() */ 271/* get_task_state() */
212#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 272#define TASK_REPORT (TASK_RUNNING | TASK_RUNNING_MUTEX | \
273 TASK_INTERRUPTIBLE | \
213 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 274 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
214 __TASK_TRACED) 275 __TASK_TRACED)
215 276
@@ -226,6 +287,12 @@ extern char ___assert_task_state[1 - 2*!!(
226#define set_task_state(tsk, state_value) \ 287#define set_task_state(tsk, state_value) \
227 set_mb((tsk)->state, (state_value)) 288 set_mb((tsk)->state, (state_value))
228 289
290#ifdef CONFIG_X86_LOCAL_APIC
291extern void nmi_show_all_regs(void);
292#else
293# define nmi_show_all_regs() do { } while (0)
294#endif
295
229/* 296/*
230 * set_current_state() includes a barrier so that the write of current->state 297 * set_current_state() includes a barrier so that the write of current->state
231 * is correctly serialised wrt the caller's subsequent test of whether to 298 * is correctly serialised wrt the caller's subsequent test of whether to
@@ -358,6 +425,11 @@ extern signed long schedule_timeout_killable(signed long timeout);
358extern signed long schedule_timeout_uninterruptible(signed long timeout); 425extern signed long schedule_timeout_uninterruptible(signed long timeout);
359asmlinkage void schedule(void); 426asmlinkage void schedule(void);
360extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); 427extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
428/*
429 * This one can be called with interrupts disabled, only
430 * to be used by lowlevel arch code!
431 */
432asmlinkage void __sched __schedule(void);
361 433
362struct nsproxy; 434struct nsproxy;
363struct user_namespace; 435struct user_namespace;
@@ -561,7 +633,7 @@ struct task_cputime {
561struct thread_group_cputimer { 633struct thread_group_cputimer {
562 struct task_cputime cputime; 634 struct task_cputime cputime;
563 int running; 635 int running;
564 spinlock_t lock; 636 raw_spinlock_t lock;
565}; 637};
566 638
567/* 639/*
@@ -1087,7 +1159,8 @@ struct sched_domain;
1087struct sched_class { 1159struct sched_class {
1088 const struct sched_class *next; 1160 const struct sched_class *next;
1089 1161
1090 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 1162 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
1163 bool head);
1091 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 1164 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1092 void (*yield_task) (struct rq *rq); 1165 void (*yield_task) (struct rq *rq);
1093 1166
@@ -1237,15 +1310,14 @@ struct task_struct {
1237 void *stack; 1310 void *stack;
1238 atomic_t usage; 1311 atomic_t usage;
1239 unsigned int flags; /* per process flags, defined below */ 1312 unsigned int flags; /* per process flags, defined below */
1313 unsigned int extra_flags;
1240 unsigned int ptrace; 1314 unsigned int ptrace;
1241 1315
1242 int lock_depth; /* BKL lock depth */ 1316 int lock_depth; /* BKL lock depth */
1243 1317
1244#ifdef CONFIG_SMP 1318#ifdef CONFIG_SMP
1245#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1246 int oncpu; 1319 int oncpu;
1247#endif 1320#endif
1248#endif
1249 1321
1250 int prio, static_prio, normal_prio; 1322 int prio, static_prio, normal_prio;
1251 unsigned int rt_priority; 1323 unsigned int rt_priority;
@@ -1289,6 +1361,7 @@ struct task_struct {
1289 struct plist_node pushable_tasks; 1361 struct plist_node pushable_tasks;
1290 1362
1291 struct mm_struct *mm, *active_mm; 1363 struct mm_struct *mm, *active_mm;
1364 int pagefault_disabled;
1292 1365
1293/* task state */ 1366/* task state */
1294 int exit_state; 1367 int exit_state;
@@ -1363,6 +1436,8 @@ struct task_struct {
1363 struct task_cputime cputime_expires; 1436 struct task_cputime cputime_expires;
1364 struct list_head cpu_timers[3]; 1437 struct list_head cpu_timers[3];
1365 1438
1439 struct task_struct* posix_timer_list;
1440
1366/* process credentials */ 1441/* process credentials */
1367 const struct cred *real_cred; /* objective and real subjective task 1442 const struct cred *real_cred; /* objective and real subjective task
1368 * credentials (COW) */ 1443 * credentials (COW) */
@@ -1398,6 +1473,7 @@ struct task_struct {
1398/* signal handlers */ 1473/* signal handlers */
1399 struct signal_struct *signal; 1474 struct signal_struct *signal;
1400 struct sighand_struct *sighand; 1475 struct sighand_struct *sighand;
1476 struct sigqueue *sigqueue_cache;
1401 1477
1402 sigset_t blocked, real_blocked; 1478 sigset_t blocked, real_blocked;
1403 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ 1479 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
@@ -1465,6 +1541,26 @@ struct task_struct {
1465 gfp_t lockdep_reclaim_gfp; 1541 gfp_t lockdep_reclaim_gfp;
1466#endif 1542#endif
1467 1543
1544/* realtime bits */
1545
1546#define MAX_PREEMPT_TRACE 25
1547#define MAX_LOCK_STACK MAX_PREEMPT_TRACE
1548#ifdef CONFIG_DEBUG_PREEMPT
1549 atomic_t lock_count;
1550# ifdef CONFIG_PREEMPT_RT
1551 struct rt_mutex *owned_lock[MAX_LOCK_STACK];
1552# endif
1553#endif
1554#ifdef CONFIG_DETECT_SOFTLOCKUP
1555 unsigned long softlockup_count; /* Count to keep track how long the
1556 * thread is in the kernel without
1557 * sleeping.
1558 */
1559#endif
1560#ifdef CONFIG_DEBUG_RT_MUTEXES
1561 void *last_kernel_lock;
1562#endif
1563
1468/* journalling filesystem info */ 1564/* journalling filesystem info */
1469 void *journal_info; 1565 void *journal_info;
1470 1566
@@ -1559,6 +1655,9 @@ struct task_struct {
1559 unsigned long trace; 1655 unsigned long trace;
1560 /* bitmask of trace recursion */ 1656 /* bitmask of trace recursion */
1561 unsigned long trace_recursion; 1657 unsigned long trace_recursion;
1658#ifdef CONFIG_WAKEUP_LATENCY_HIST
1659 u64 preempt_timestamp_hist;
1660#endif
1562#endif /* CONFIG_TRACING */ 1661#endif /* CONFIG_TRACING */
1563 unsigned long stack_start; 1662 unsigned long stack_start;
1564#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1663#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
@@ -1569,11 +1668,24 @@ struct task_struct {
1569 unsigned long memsw_bytes; /* uncharged mem+swap usage */ 1668 unsigned long memsw_bytes; /* uncharged mem+swap usage */
1570 } memcg_batch; 1669 } memcg_batch;
1571#endif 1670#endif
1671#ifdef CONFIG_PREEMPT_RT
1672 /*
1673 * Temporary hack, until we find a solution to
1674 * handle printk in atomic operations.
1675 */
1676 int in_printk;
1677#endif
1572}; 1678};
1573 1679
1574/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1680/* Future-safe accessor for struct task_struct's cpus_allowed. */
1575#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1681#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1576 1682
1683#ifdef CONFIG_PREEMPT_RT
1684# define set_printk_might_sleep(x) do { current->in_printk = x; } while(0)
1685#else
1686# define set_printk_might_sleep(x) do { } while(0)
1687#endif
1688
1577/* 1689/*
1578 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1690 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1579 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH 1691 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -1742,6 +1854,15 @@ extern struct pid *cad_pid;
1742extern void free_task(struct task_struct *tsk); 1854extern void free_task(struct task_struct *tsk);
1743#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) 1855#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1744 1856
1857#ifdef CONFIG_PREEMPT_RT
1858extern void __put_task_struct_cb(struct rcu_head *rhp);
1859
1860static inline void put_task_struct(struct task_struct *t)
1861{
1862 if (atomic_dec_and_test(&t->usage))
1863 call_rcu(&t->rcu, __put_task_struct_cb);
1864}
1865#else
1745extern void __put_task_struct(struct task_struct *t); 1866extern void __put_task_struct(struct task_struct *t);
1746 1867
1747static inline void put_task_struct(struct task_struct *t) 1868static inline void put_task_struct(struct task_struct *t)
@@ -1749,6 +1870,7 @@ static inline void put_task_struct(struct task_struct *t)
1749 if (atomic_dec_and_test(&t->usage)) 1870 if (atomic_dec_and_test(&t->usage))
1750 __put_task_struct(t); 1871 __put_task_struct(t);
1751} 1872}
1873#endif
1752 1874
1753extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1875extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1754extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); 1876extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
@@ -1762,6 +1884,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1762#define PF_EXITING 0x00000004 /* getting shut down */ 1884#define PF_EXITING 0x00000004 /* getting shut down */
1763#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1885#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1764#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1886#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1887#define PF_KMAP 0x00000020 /* this context has a kmap */
1765#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1888#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1766#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1889#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1767#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1890#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
@@ -1789,6 +1912,10 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1789#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1912#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
1790#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1913#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1791 1914
1915/* Flags in the extra_flags field */
1916#define PFE_SOFTIRQ 0x00000001 /* softirq context */
1917#define PFE_HARDIRQ 0x00000002 /* hardirq thread */
1918
1792/* 1919/*
1793 * Only the _current_ task can read/write to tsk->flags, but other 1920 * Only the _current_ task can read/write to tsk->flags, but other
1794 * tasks can access tsk->flags in readonly mode for example 1921 * tasks can access tsk->flags in readonly mode for example
@@ -1968,9 +2095,14 @@ int sched_rt_handler(struct ctl_table *table, int write,
1968 2095
1969extern unsigned int sysctl_sched_compat_yield; 2096extern unsigned int sysctl_sched_compat_yield;
1970 2097
2098extern void task_setprio(struct task_struct *p, int prio);
2099
1971#ifdef CONFIG_RT_MUTEXES 2100#ifdef CONFIG_RT_MUTEXES
1972extern int rt_mutex_getprio(struct task_struct *p); 2101extern int rt_mutex_getprio(struct task_struct *p);
1973extern void rt_mutex_setprio(struct task_struct *p, int prio); 2102static inline void rt_mutex_setprio(struct task_struct *p, int prio)
2103{
2104 task_setprio(p, prio);
2105}
1974extern void rt_mutex_adjust_pi(struct task_struct *p); 2106extern void rt_mutex_adjust_pi(struct task_struct *p);
1975#else 2107#else
1976static inline int rt_mutex_getprio(struct task_struct *p) 2108static inline int rt_mutex_getprio(struct task_struct *p)
@@ -1992,8 +2124,17 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int,
1992extern struct task_struct *idle_task(int cpu); 2124extern struct task_struct *idle_task(int cpu);
1993extern struct task_struct *curr_task(int cpu); 2125extern struct task_struct *curr_task(int cpu);
1994extern void set_curr_task(int cpu, struct task_struct *p); 2126extern void set_curr_task(int cpu, struct task_struct *p);
2127extern struct task_struct *rq_curr(struct rq *rq);
1995 2128
1996void yield(void); 2129void yield(void);
2130void __yield(void);
2131
2132#ifdef CONFIG_SMP
2133static inline int task_is_current(struct task_struct *task)
2134{
2135 return task->oncpu;
2136}
2137#endif
1997 2138
1998/* 2139/*
1999 * The default (Linux) execution domain. 2140 * The default (Linux) execution domain.
@@ -2055,6 +2196,9 @@ extern void do_timer(unsigned long ticks);
2055 2196
2056extern int wake_up_state(struct task_struct *tsk, unsigned int state); 2197extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2057extern int wake_up_process(struct task_struct *tsk); 2198extern int wake_up_process(struct task_struct *tsk);
2199extern int wake_up_process_mutex(struct task_struct * tsk);
2200extern int wake_up_process_sync(struct task_struct * tsk);
2201extern int wake_up_process_mutex_sync(struct task_struct * tsk);
2058extern void wake_up_new_task(struct task_struct *tsk, 2202extern void wake_up_new_task(struct task_struct *tsk,
2059 unsigned long clone_flags); 2203 unsigned long clone_flags);
2060#ifdef CONFIG_SMP 2204#ifdef CONFIG_SMP
@@ -2145,12 +2289,20 @@ extern struct mm_struct * mm_alloc(void);
2145 2289
2146/* mmdrop drops the mm and the page tables */ 2290/* mmdrop drops the mm and the page tables */
2147extern void __mmdrop(struct mm_struct *); 2291extern void __mmdrop(struct mm_struct *);
2292extern void __mmdrop_delayed(struct mm_struct *);
2293
2148static inline void mmdrop(struct mm_struct * mm) 2294static inline void mmdrop(struct mm_struct * mm)
2149{ 2295{
2150 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2296 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2151 __mmdrop(mm); 2297 __mmdrop(mm);
2152} 2298}
2153 2299
2300static inline void mmdrop_delayed(struct mm_struct * mm)
2301{
2302 if (atomic_dec_and_test(&mm->mm_count))
2303 __mmdrop_delayed(mm);
2304}
2305
2154/* mmput gets rid of the mappings and all user-space */ 2306/* mmput gets rid of the mappings and all user-space */
2155extern void mmput(struct mm_struct *); 2307extern void mmput(struct mm_struct *);
2156/* Grab a reference to a task's mm, if it is not already going away */ 2308/* Grab a reference to a task's mm, if it is not already going away */
@@ -2418,7 +2570,7 @@ extern int _cond_resched(void);
2418 2570
2419extern int __cond_resched_lock(spinlock_t *lock); 2571extern int __cond_resched_lock(spinlock_t *lock);
2420 2572
2421#ifdef CONFIG_PREEMPT 2573#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT)
2422#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2574#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2423#else 2575#else
2424#define PREEMPT_LOCK_OFFSET 0 2576#define PREEMPT_LOCK_OFFSET 0
@@ -2431,10 +2583,20 @@ extern int __cond_resched_lock(spinlock_t *lock);
2431 2583
2432extern int __cond_resched_softirq(void); 2584extern int __cond_resched_softirq(void);
2433 2585
2586
2587#ifndef CONFIG_PREEMPT_RT
2434#define cond_resched_softirq() ({ \ 2588#define cond_resched_softirq() ({ \
2435 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ 2589 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2436 __cond_resched_softirq(); \ 2590 __cond_resched_softirq(); \
2437}) 2591})
2592#else
2593#define cond_resched_softirq() ({ \
2594 __might_sleep(__FILE__, __LINE__, 0); \
2595 __cond_resched_softirq(); \
2596})
2597#endif
2598
2599extern int cond_resched_softirq_context(void);
2438 2600
2439/* 2601/*
2440 * Does a critical section need to be broken due to another 2602 * Does a critical section need to be broken due to another
@@ -2459,7 +2621,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2459static inline void thread_group_cputime_init(struct signal_struct *sig) 2621static inline void thread_group_cputime_init(struct signal_struct *sig)
2460{ 2622{
2461 sig->cputimer.cputime = INIT_CPUTIME; 2623 sig->cputimer.cputime = INIT_CPUTIME;
2462 spin_lock_init(&sig->cputimer.lock); 2624 raw_spin_lock_init(&sig->cputimer.lock);
2463 sig->cputimer.running = 0; 2625 sig->cputimer.running = 0;
2464} 2626}
2465 2627
@@ -2467,6 +2629,13 @@ static inline void thread_group_cputime_free(struct signal_struct *sig)
2467{ 2629{
2468} 2630}
2469 2631
2632static inline int softirq_need_resched(void)
2633{
2634 if (softirq_preemption && (current->extra_flags & PFE_SOFTIRQ))
2635 return need_resched();
2636 return 0;
2637}
2638
2470/* 2639/*
2471 * Reevaluate whether the task has signals pending delivery. 2640 * Reevaluate whether the task has signals pending delivery.
2472 * Wake the task if so. 2641 * Wake the task if so.