diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 09:43:54 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 10:06:11 -0400 |
commit | 7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch) | |
tree | 5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /include/linux/percpu_counter.h | |
parent | 7d754596756240fa918b94cd0c3011c77a638987 (diff) | |
parent | 02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff) |
Merge 'Linux v3.0' into Litmus
Some notes:
* Litmus^RT scheduling class is the topmost scheduling class
(above stop_sched_class).
* scheduler_ipi() function (e.g., in smp_reschedule_interrupt())
may increase IPI latencies.
* Added path into schedule() to quickly re-evaluate scheduling
decision without becoming preemptive again. This used to be
a standard path before the removal of BKL.
Conflicts:
Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/smp.c
arch/x86/include/asm/unistd_32.h
arch/x86/kernel/smp.c
arch/x86/kernel/syscall_table_32.S
include/linux/hrtimer.h
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r-- | include/linux/percpu_counter.h | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 8a7d510ffa9c..5edc9014263a 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
@@ -75,7 +75,12 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) | |||
75 | barrier(); /* Prevent reloads of fbc->count */ | 75 | barrier(); /* Prevent reloads of fbc->count */ |
76 | if (ret >= 0) | 76 | if (ret >= 0) |
77 | return ret; | 77 | return ret; |
78 | return 1; | 78 | return 0; |
79 | } | ||
80 | |||
81 | static inline int percpu_counter_initialized(struct percpu_counter *fbc) | ||
82 | { | ||
83 | return (fbc->counters != NULL); | ||
79 | } | 84 | } |
80 | 85 | ||
81 | #else | 86 | #else |
@@ -128,6 +133,10 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) | |||
128 | return fbc->count; | 133 | return fbc->count; |
129 | } | 134 | } |
130 | 135 | ||
136 | /* | ||
137 | * percpu_counter is intended to track positive numbers. In the UP case the | ||
138 | * number should never be negative. | ||
139 | */ | ||
131 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) | 140 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
132 | { | 141 | { |
133 | return fbc->count; | 142 | return fbc->count; |
@@ -143,6 +152,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc) | |||
143 | return percpu_counter_read(fbc); | 152 | return percpu_counter_read(fbc); |
144 | } | 153 | } |
145 | 154 | ||
155 | static inline int percpu_counter_initialized(struct percpu_counter *fbc) | ||
156 | { | ||
157 | return 1; | ||
158 | } | ||
159 | |||
146 | #endif /* CONFIG_SMP */ | 160 | #endif /* CONFIG_SMP */ |
147 | 161 | ||
148 | static inline void percpu_counter_inc(struct percpu_counter *fbc) | 162 | static inline void percpu_counter_inc(struct percpu_counter *fbc) |