summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-02-27 09:37:36 -0500
committerIngo Molnar <mingo@kernel.org>2017-03-16 04:57:08 -0400
commit383776fa7527745224446337f2dcfb0f0d1b8b56 (patch)
treed9c5427418b5f6e1f6fdde58016c8347e45f7ec1 /mm/percpu.c
parent6419c4af777a773a45a1b1af735de0fcd9a7dcc7 (diff)
locking/lockdep: Handle statically initialized PER_CPU locks properly
If a PER_CPU struct which contains a spin_lock is statically initialized via: DEFINE_PER_CPU(struct foo, bla) = { .lock = __SPIN_LOCK_UNLOCKED(bla.lock) }; then lockdep assigns a seperate key to each lock because the logic for assigning a key to statically initialized locks is to use the address as the key. With per CPU locks the address is obvioulsy different on each CPU. That's wrong, because all locks should have the same key. To solve this the following modifications are required: 1) Extend the is_kernel/module_percpu_addr() functions to hand back the canonical address of the per CPU address, i.e. the per CPU address minus the per CPU offset. 2) Check the lock address with these functions and if the per CPU check matches use the returned canonical address as the lock key, so all per CPU locks have the same key. 3) Move the static_obj(key) check into look_up_lock_class() so this check can be avoided for statically initialized per CPU locks. That's required because the canonical address fails the static_obj(key) check for obvious reasons. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [ Merged Dan's fixups for !MODULES and !SMP into this patch. ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dan Murphy <dmurphy@ti.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20170227143736.pectaimkjkan5kow@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 5696039b5c07..7d3b728c0254 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1281,18 +1281,7 @@ void free_percpu(void __percpu *ptr)
1281} 1281}
1282EXPORT_SYMBOL_GPL(free_percpu); 1282EXPORT_SYMBOL_GPL(free_percpu);
1283 1283
1284/** 1284bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1285 * is_kernel_percpu_address - test whether address is from static percpu area
1286 * @addr: address to test
1287 *
1288 * Test whether @addr belongs to in-kernel static percpu area. Module
1289 * static percpu areas are not considered. For those, use
1290 * is_module_percpu_address().
1291 *
1292 * RETURNS:
1293 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1294 */
1295bool is_kernel_percpu_address(unsigned long addr)
1296{ 1285{
1297#ifdef CONFIG_SMP 1286#ifdef CONFIG_SMP
1298 const size_t static_size = __per_cpu_end - __per_cpu_start; 1287 const size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -1301,16 +1290,36 @@ bool is_kernel_percpu_address(unsigned long addr)
1301 1290
1302 for_each_possible_cpu(cpu) { 1291 for_each_possible_cpu(cpu) {
1303 void *start = per_cpu_ptr(base, cpu); 1292 void *start = per_cpu_ptr(base, cpu);
1293 void *va = (void *)addr;
1304 1294
1305 if ((void *)addr >= start && (void *)addr < start + static_size) 1295 if (va >= start && va < start + static_size) {
1296 if (can_addr)
1297 *can_addr = (unsigned long) (va - start);
1306 return true; 1298 return true;
1307 } 1299 }
1300 }
1308#endif 1301#endif
1309 /* on UP, can't distinguish from other static vars, always false */ 1302 /* on UP, can't distinguish from other static vars, always false */
1310 return false; 1303 return false;
1311} 1304}
1312 1305
1313/** 1306/**
1307 * is_kernel_percpu_address - test whether address is from static percpu area
1308 * @addr: address to test
1309 *
1310 * Test whether @addr belongs to in-kernel static percpu area. Module
1311 * static percpu areas are not considered. For those, use
1312 * is_module_percpu_address().
1313 *
1314 * RETURNS:
1315 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1316 */
1317bool is_kernel_percpu_address(unsigned long addr)
1318{
1319 return __is_kernel_percpu_address(addr, NULL);
1320}
1321
1322/**
1314 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 1323 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1315 * @addr: the address to be converted to physical address 1324 * @addr: the address to be converted to physical address
1316 * 1325 *