diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-25 10:21:48 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:11:47 -0400 |
| commit | f032a450812f6c7edd532772cc7c48091bca9f27 (patch) | |
| tree | 697632b583437656337cd883b1bb52bca2f6d77f /lib | |
| parent | ec484608c5885931c432e99ecfd2772288cd993c (diff) | |
locking, percpu_counter: Annotate ::lock as raw
The percpu_counter::lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/percpu_counter.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 28f2c33c6b53..f087105ed914 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
| 59 | { | 59 | { |
| 60 | int cpu; | 60 | int cpu; |
| 61 | 61 | ||
| 62 | spin_lock(&fbc->lock); | 62 | raw_spin_lock(&fbc->lock); |
| 63 | for_each_possible_cpu(cpu) { | 63 | for_each_possible_cpu(cpu) { |
| 64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 65 | *pcount = 0; | 65 | *pcount = 0; |
| 66 | } | 66 | } |
| 67 | fbc->count = amount; | 67 | fbc->count = amount; |
| 68 | spin_unlock(&fbc->lock); | 68 | raw_spin_unlock(&fbc->lock); |
| 69 | } | 69 | } |
| 70 | EXPORT_SYMBOL(percpu_counter_set); | 70 | EXPORT_SYMBOL(percpu_counter_set); |
| 71 | 71 | ||
| @@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
| 76 | preempt_disable(); | 76 | preempt_disable(); |
| 77 | count = __this_cpu_read(*fbc->counters) + amount; | 77 | count = __this_cpu_read(*fbc->counters) + amount; |
| 78 | if (count >= batch || count <= -batch) { | 78 | if (count >= batch || count <= -batch) { |
| 79 | spin_lock(&fbc->lock); | 79 | raw_spin_lock(&fbc->lock); |
| 80 | fbc->count += count; | 80 | fbc->count += count; |
| 81 | __this_cpu_write(*fbc->counters, 0); | 81 | __this_cpu_write(*fbc->counters, 0); |
| 82 | spin_unlock(&fbc->lock); | 82 | raw_spin_unlock(&fbc->lock); |
| 83 | } else { | 83 | } else { |
| 84 | __this_cpu_write(*fbc->counters, count); | 84 | __this_cpu_write(*fbc->counters, count); |
| 85 | } | 85 | } |
| @@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
| 96 | s64 ret; | 96 | s64 ret; |
| 97 | int cpu; | 97 | int cpu; |
| 98 | 98 | ||
| 99 | spin_lock(&fbc->lock); | 99 | raw_spin_lock(&fbc->lock); |
| 100 | ret = fbc->count; | 100 | ret = fbc->count; |
| 101 | for_each_online_cpu(cpu) { | 101 | for_each_online_cpu(cpu) { |
| 102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
| 103 | ret += *pcount; | 103 | ret += *pcount; |
| 104 | } | 104 | } |
| 105 | spin_unlock(&fbc->lock); | 105 | raw_spin_unlock(&fbc->lock); |
| 106 | return ret; | 106 | return ret; |
| 107 | } | 107 | } |
| 108 | EXPORT_SYMBOL(__percpu_counter_sum); | 108 | EXPORT_SYMBOL(__percpu_counter_sum); |
| @@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); | |||
| 110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
| 111 | struct lock_class_key *key) | 111 | struct lock_class_key *key) |
| 112 | { | 112 | { |
| 113 | spin_lock_init(&fbc->lock); | 113 | raw_spin_lock_init(&fbc->lock); |
| 114 | lockdep_set_class(&fbc->lock, key); | 114 | lockdep_set_class(&fbc->lock, key); |
| 115 | fbc->count = amount; | 115 | fbc->count = amount; |
| 116 | fbc->counters = alloc_percpu(s32); | 116 | fbc->counters = alloc_percpu(s32); |
| @@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
| 173 | s32 *pcount; | 173 | s32 *pcount; |
| 174 | unsigned long flags; | 174 | unsigned long flags; |
| 175 | 175 | ||
| 176 | spin_lock_irqsave(&fbc->lock, flags); | 176 | raw_spin_lock_irqsave(&fbc->lock, flags); |
| 177 | pcount = per_cpu_ptr(fbc->counters, cpu); | 177 | pcount = per_cpu_ptr(fbc->counters, cpu); |
| 178 | fbc->count += *pcount; | 178 | fbc->count += *pcount; |
| 179 | *pcount = 0; | 179 | *pcount = 0; |
| 180 | spin_unlock_irqrestore(&fbc->lock, flags); | 180 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
| 181 | } | 181 | } |
| 182 | mutex_unlock(&percpu_counters_lock); | 182 | mutex_unlock(&percpu_counters_lock); |
| 183 | #endif | 183 | #endif |
