aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/smp_64.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 02:00:51 -0400
commitfb435d5233f8b6f9b93c11d6304d8e98fed03234 (patch)
tree76a210c3895b9db5dc7e1f185ee0a60744fef99a /arch/sparc/kernel/smp_64.c
parentfd1e8a1fe2b54df6c185b4fa65f181f50b9c4d4e (diff)
percpu: add pcpu_unit_offsets[]
Currently units are mapped sequentially into address space. This patch adds pcpu_unit_offsets[] which allows units to be mapped to arbitrary offsets from the chunk base address. This is necessary to allow sparse embedding which might would need to allocate address ranges and memory areas which aren't aligned to unit size but allocation atom size (page or large page size). This also simplifies things a bit by removing the need to calculate offset from unit number. With this change, there's no need for the arch code to know pcpu_unit_size. Update pcpu_setup_first_chunk() and first chunk allocators to return regular 0 or -errno return code instead of unit size or -errno. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r--arch/sparc/kernel/smp_64.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a42a4a744d14..b03fd362c629 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1478,9 +1478,10 @@ void __init setup_per_cpu_areas(void)
1478 static struct vm_struct vm; 1478 static struct vm_struct vm;
1479 struct pcpu_alloc_info *ai; 1479 struct pcpu_alloc_info *ai;
1480 unsigned long delta, cpu; 1480 unsigned long delta, cpu;
1481 size_t size_sum, pcpu_unit_size; 1481 size_t size_sum;
1482 size_t ptrs_size; 1482 size_t ptrs_size;
1483 void **ptrs; 1483 void **ptrs;
1484 int rc;
1484 1485
1485 ai = pcpu_alloc_alloc_info(1, nr_cpu_ids); 1486 ai = pcpu_alloc_alloc_info(1, nr_cpu_ids);
1486 1487
@@ -1526,14 +1527,15 @@ void __init setup_per_cpu_areas(void)
1526 pcpu_map_range(start, end, virt_to_page(ptrs[cpu])); 1527 pcpu_map_range(start, end, virt_to_page(ptrs[cpu]));
1527 } 1528 }
1528 1529
1529 pcpu_unit_size = pcpu_setup_first_chunk(ai, vm.addr); 1530 rc = pcpu_setup_first_chunk(ai, vm.addr);
1531 if (rc)
1532 panic("failed to setup percpu first chunk (%d)", rc);
1530 1533
1531 free_bootmem(__pa(ptrs), ptrs_size); 1534 free_bootmem(__pa(ptrs), ptrs_size);
1532 1535
1533 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1536 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1534 for_each_possible_cpu(cpu) { 1537 for_each_possible_cpu(cpu)
1535 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 1538 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1536 }
1537 1539
1538 /* Setup %g5 for the boot cpu. */ 1540 /* Setup %g5 for the boot cpu. */
1539 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1541 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());