diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:37:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:37:30 -0400 |
commit | 9620639b7ea3843983f4ced8b4c81eb4d8974838 (patch) | |
tree | 54266fac3bcf89e61ae06c7d36ca708df6e0ea33 /include/linux/interrupt.h | |
parent | a926021cb1f8a99a275eaf6eb546102e9469dc59 (diff) | |
parent | 6d1cafd8b56ea726c10a5a104de57cc3ed8fa953 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
sched: Resched proper CPU on yield_to()
sched: Allow users with sufficient RLIMIT_NICE to change from SCHED_IDLE policy
sched: Allow SCHED_BATCH to preempt SCHED_IDLE tasks
sched: Clean up the IRQ_TIME_ACCOUNTING code
sched: Add #ifdef around irq time accounting functions
sched, autogroup: Stop claiming ownership of the root task group
sched, autogroup: Stop going ahead if autogroup is disabled
sched, autogroup, sysctl: Use proc_dointvec_minmax() instead
sched: Fix the group_imb logic
sched: Clean up some f_b_g() comments
sched: Clean up remnants of sd_idle
sched: Wholesale removal of sd_idle logic
sched: Add yield_to(task, preempt) functionality
sched: Use a buddy to implement yield_task_fair()
sched: Limit the scope of clear_buddies
sched: Check the right ->nr_running in yield_task_fair()
sched: Avoid expensive initial update_cfs_load(), on UP too
sched: Fix switch_from_fair()
sched: Simplify the idle scheduling class
softirqs: Account ksoftirqd time as cpustat softirq
...
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d746da19c6a2..2eb16e03422f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -427,6 +427,13 @@ extern void raise_softirq(unsigned int nr); | |||
427 | */ | 427 | */ |
428 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | 428 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
429 | 429 | ||
430 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); | ||
431 | |||
432 | static inline struct task_struct *this_cpu_ksoftirqd(void) | ||
433 | { | ||
434 | return this_cpu_read(ksoftirqd); | ||
435 | } | ||
436 | |||
430 | /* Try to send a softirq to a remote cpu. If this cannot be done, the | 437 | /* Try to send a softirq to a remote cpu. If this cannot be done, the |
431 | * work will be queued to the local cpu. | 438 | * work will be queued to the local cpu. |
432 | */ | 439 | */ |