diff options
author | David S. Miller <davem@davemloft.net> | 2009-04-01 04:47:10 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-16 07:56:11 -0400 |
commit | 5a5488d3bb9a23d9884572e5d85dfeefe8749d3d (patch) | |
tree | afa8db75cdf771257cd5541ed80a606df60f9cf8 /arch/sparc/kernel/smp_64.c | |
parent | 19f0fa3fb3499d8c5fb861933959f546d05fc202 (diff) |
sparc64: Store per-cpu offset in trap_block[]
Surprisingly this actually makes LOAD_PER_CPU_BASE() a little
more efficient.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 4226d0ebaea5..b20f253857b7 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1371,23 +1371,17 @@ void smp_send_stop(void) | |||
1371 | { | 1371 | { |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | unsigned long __per_cpu_base __read_mostly; | ||
1375 | unsigned long __per_cpu_shift __read_mostly; | ||
1376 | |||
1377 | EXPORT_SYMBOL(__per_cpu_base); | ||
1378 | EXPORT_SYMBOL(__per_cpu_shift); | ||
1379 | |||
1380 | void __init real_setup_per_cpu_areas(void) | 1374 | void __init real_setup_per_cpu_areas(void) |
1381 | { | 1375 | { |
1382 | unsigned long paddr, goal, size, i; | 1376 | unsigned long base, shift, paddr, goal, size, i; |
1383 | char *ptr; | 1377 | char *ptr; |
1384 | 1378 | ||
1385 | /* Copy section for each CPU (we discard the original) */ | 1379 | /* Copy section for each CPU (we discard the original) */ |
1386 | goal = PERCPU_ENOUGH_ROOM; | 1380 | goal = PERCPU_ENOUGH_ROOM; |
1387 | 1381 | ||
1388 | __per_cpu_shift = PAGE_SHIFT; | 1382 | shift = PAGE_SHIFT; |
1389 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | 1383 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) |
1390 | __per_cpu_shift++; | 1384 | shift++; |
1391 | 1385 | ||
1392 | paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); | 1386 | paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); |
1393 | if (!paddr) { | 1387 | if (!paddr) { |
@@ -1396,10 +1390,12 @@ void __init real_setup_per_cpu_areas(void) | |||
1396 | } | 1390 | } |
1397 | 1391 | ||
1398 | ptr = __va(paddr); | 1392 | ptr = __va(paddr); |
1399 | __per_cpu_base = ptr - __per_cpu_start; | 1393 | base = ptr - __per_cpu_start; |
1400 | 1394 | ||
1401 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 1395 | for (i = 0; i < NR_CPUS; i++, ptr += size) { |
1396 | __per_cpu_offset(i) = base + (i * size); | ||
1402 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 1397 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
1398 | } | ||
1403 | 1399 | ||
1404 | /* Setup %g5 for the boot cpu. */ | 1400 | /* Setup %g5 for the boot cpu. */ |
1405 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | 1401 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); |