diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2009-07-25 10:43:30 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:11:50 -0400 |
| commit | 740969f91e950b64a18fdd0a25164cdee042abf0 (patch) | |
| tree | b152f88baf3868a7f3df4fa712ecd6a8c811d0a4 | |
| parent | cdcc136ffd264849a943acb42c36ffe9b458f811 (diff) | |
locking, lib/proportions: Annotate prop_local_percpu::lock as raw
The prop_local_percpu::lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/proportions.h | 6 | ||||
| -rw-r--r-- | lib/proportions.c | 12 |
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/proportions.h b/include/linux/proportions.h index cf793bbbd05e..ef35bb73f69b 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h | |||
| @@ -58,7 +58,7 @@ struct prop_local_percpu { | |||
| 58 | */ | 58 | */ |
| 59 | int shift; | 59 | int shift; |
| 60 | unsigned long period; | 60 | unsigned long period; |
| 61 | spinlock_t lock; /* protect the snapshot state */ | 61 | raw_spinlock_t lock; /* protect the snapshot state */ |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | int prop_local_init_percpu(struct prop_local_percpu *pl); | 64 | int prop_local_init_percpu(struct prop_local_percpu *pl); |
| @@ -106,11 +106,11 @@ struct prop_local_single { | |||
| 106 | */ | 106 | */ |
| 107 | unsigned long period; | 107 | unsigned long period; |
| 108 | int shift; | 108 | int shift; |
| 109 | spinlock_t lock; /* protect the snapshot state */ | 109 | raw_spinlock_t lock; /* protect the snapshot state */ |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | #define INIT_PROP_LOCAL_SINGLE(name) \ | 112 | #define INIT_PROP_LOCAL_SINGLE(name) \ |
| 113 | { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | 113 | { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | int prop_local_init_single(struct prop_local_single *pl); | 116 | int prop_local_init_single(struct prop_local_single *pl); |
diff --git a/lib/proportions.c b/lib/proportions.c index d50746a79de2..05df84801b56 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
| @@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | |||
| 190 | 190 | ||
| 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) | 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) |
| 192 | { | 192 | { |
| 193 | spin_lock_init(&pl->lock); | 193 | raw_spin_lock_init(&pl->lock); |
| 194 | pl->shift = 0; | 194 | pl->shift = 0; |
| 195 | pl->period = 0; | 195 | pl->period = 0; |
| 196 | return percpu_counter_init(&pl->events, 0); | 196 | return percpu_counter_init(&pl->events, 0); |
| @@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
| 226 | if (pl->period == global_period) | 226 | if (pl->period == global_period) |
| 227 | return; | 227 | return; |
| 228 | 228 | ||
| 229 | spin_lock_irqsave(&pl->lock, flags); | 229 | raw_spin_lock_irqsave(&pl->lock, flags); |
| 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| 231 | 231 | ||
| 232 | /* | 232 | /* |
| @@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
| 247 | percpu_counter_set(&pl->events, 0); | 247 | percpu_counter_set(&pl->events, 0); |
| 248 | 248 | ||
| 249 | pl->period = global_period; | 249 | pl->period = global_period; |
| 250 | spin_unlock_irqrestore(&pl->lock, flags); | 250 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| @@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, | |||
| 324 | 324 | ||
| 325 | int prop_local_init_single(struct prop_local_single *pl) | 325 | int prop_local_init_single(struct prop_local_single *pl) |
| 326 | { | 326 | { |
| 327 | spin_lock_init(&pl->lock); | 327 | raw_spin_lock_init(&pl->lock); |
| 328 | pl->shift = 0; | 328 | pl->shift = 0; |
| 329 | pl->period = 0; | 329 | pl->period = 0; |
| 330 | pl->events = 0; | 330 | pl->events = 0; |
| @@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
| 356 | if (pl->period == global_period) | 356 | if (pl->period == global_period) |
| 357 | return; | 357 | return; |
| 358 | 358 | ||
| 359 | spin_lock_irqsave(&pl->lock, flags); | 359 | raw_spin_lock_irqsave(&pl->lock, flags); |
| 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
| 361 | /* | 361 | /* |
| 362 | * For each missed period, we half the local counter. | 362 | * For each missed period, we half the local counter. |
| @@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
| 367 | else | 367 | else |
| 368 | pl->events = 0; | 368 | pl->events = 0; |
| 369 | pl->period = global_period; | 369 | pl->period = global_period; |
| 370 | spin_unlock_irqrestore(&pl->lock, flags); | 370 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| 373 | /* | 373 | /* |
