aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hardirq.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 15:55:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 15:55:43 -0400
commitbc4016f48161454a9a8e5eb209b0693c6cde9f62 (patch)
treef470f5d711e975b152eec90282f5dd30a1d5dba5 /include/linux/hardirq.h
parent5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (diff)
parentb7dadc38797584f6203386da1947ed5edf516646 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (29 commits) sched: Export account_system_vtime() sched: Call tick_check_idle before __irq_enter sched: Remove irq time from available CPU power sched: Do not account irq time to current task x86: Add IRQ_TIME_ACCOUNTING sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time sched: Add a PF flag for ksoftirqd identification sched: Consolidate account_system_vtime extern declaration sched: Fix softirq time accounting sched: Drop group_capacity to 1 only if local group has extra capacity sched: Force balancing on newidle balance if local group has capacity sched: Set group_imb only a task can be pulled from the busiest cpu sched: Do not consider SCHED_IDLE tasks to be cache hot sched: Drop all load weight manipulation for RT tasks sched: Create special class for stop/migrate work sched: Unindent labels sched: Comment updates: fix default latency and granularity numbers tracing/sched: Add sched_pi_setprio tracepoint sched: Give CPU bound RT tasks preference sched: Try not to migrate higher priority RT tasks ...
Diffstat (limited to 'include/linux/hardirq.h')
-rw-r--r--include/linux/hardirq.h9
1 files changed, 8 insertions, 1 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 1f4517d55b19..96c323ac44df 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,8 @@
64#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 64#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
65#define NMI_OFFSET (1UL << NMI_SHIFT) 65#define NMI_OFFSET (1UL << NMI_SHIFT)
66 66
67#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
68
67#ifndef PREEMPT_ACTIVE 69#ifndef PREEMPT_ACTIVE
68#define PREEMPT_ACTIVE_BITS 1 70#define PREEMPT_ACTIVE_BITS 1
69#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 71#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
@@ -82,10 +84,13 @@
82/* 84/*
83 * Are we doing bottom half or hardware interrupt processing? 85 * Are we doing bottom half or hardware interrupt processing?
84 * Are we in a softirq context? Interrupt context? 86 * Are we in a softirq context? Interrupt context?
87 * in_softirq - Are we currently processing softirq or have bh disabled?
88 * in_serving_softirq - Are we currently processing softirq?
85 */ 89 */
86#define in_irq() (hardirq_count()) 90#define in_irq() (hardirq_count())
87#define in_softirq() (softirq_count()) 91#define in_softirq() (softirq_count())
88#define in_interrupt() (irq_count()) 92#define in_interrupt() (irq_count())
93#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
89 94
90/* 95/*
91 * Are we in NMI context? 96 * Are we in NMI context?
@@ -132,10 +137,12 @@ extern void synchronize_irq(unsigned int irq);
132 137
133struct task_struct; 138struct task_struct;
134 139
135#ifndef CONFIG_VIRT_CPU_ACCOUNTING 140#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
136static inline void account_system_vtime(struct task_struct *tsk) 141static inline void account_system_vtime(struct task_struct *tsk)
137{ 142{
138} 143}
144#else
145extern void account_system_vtime(struct task_struct *tsk);
139#endif 146#endif
140 147
141#if defined(CONFIG_NO_HZ) 148#if defined(CONFIG_NO_HZ)