aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/smp_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/smp_64.c')
-rw-r--r--arch/sparc/kernel/smp_64.c165
1 files changed, 156 insertions, 9 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 567a6a47ba23..1de47d2169c8 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -21,6 +21,7 @@
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/profile.h> 22#include <linux/profile.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
24#include <linux/cpu.h> 25#include <linux/cpu.h>
25 26
26#include <asm/head.h> 27#include <asm/head.h>
@@ -1371,19 +1372,165 @@ void smp_send_stop(void)
1371{ 1372{
1372} 1373}
1373 1374
1375/**
1376 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1377 * @cpu: cpu to allocate for
1378 * @size: size allocation in bytes
1379 * @align: alignment
1380 *
1381 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1382 * does the right thing for NUMA regardless of the current
1383 * configuration.
1384 *
1385 * RETURNS:
1386 * Pointer to the allocated area on success, NULL on failure.
1387 */
1388static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1389 unsigned long align)
1390{
1391 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1392#ifdef CONFIG_NEED_MULTIPLE_NODES
1393 int node = cpu_to_node(cpu);
1394 void *ptr;
1395
1396 if (!node_online(node) || !NODE_DATA(node)) {
1397 ptr = __alloc_bootmem(size, align, goal);
1398 pr_info("cpu %d has no node %d or node-local memory\n",
1399 cpu, node);
1400 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1401 cpu, size, __pa(ptr));
1402 } else {
1403 ptr = __alloc_bootmem_node(NODE_DATA(node),
1404 size, align, goal);
1405 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1406 "%016lx\n", cpu, size, node, __pa(ptr));
1407 }
1408 return ptr;
1409#else
1410 return __alloc_bootmem(size, align, goal);
1411#endif
1412}
1413
1414static size_t pcpur_size __initdata;
1415static void **pcpur_ptrs __initdata;
1416
1417static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
1418{
1419 size_t off = (size_t)pageno << PAGE_SHIFT;
1420
1421 if (off >= pcpur_size)
1422 return NULL;
1423
1424 return virt_to_page(pcpur_ptrs[cpu] + off);
1425}
1426
1427#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
1428
1429static void __init pcpu_map_range(unsigned long start, unsigned long end,
1430 struct page *page)
1431{
1432 unsigned long pfn = page_to_pfn(page);
1433 unsigned long pte_base;
1434
1435 BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
1436
1437 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1438 _PAGE_CP_4U | _PAGE_CV_4U |
1439 _PAGE_P_4U | _PAGE_W_4U);
1440 if (tlb_type == hypervisor)
1441 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1442 _PAGE_CP_4V | _PAGE_CV_4V |
1443 _PAGE_P_4V | _PAGE_W_4V);
1444
1445 while (start < end) {
1446 pgd_t *pgd = pgd_offset_k(start);
1447 unsigned long this_end;
1448 pud_t *pud;
1449 pmd_t *pmd;
1450 pte_t *pte;
1451
1452 pud = pud_offset(pgd, start);
1453 if (pud_none(*pud)) {
1454 pmd_t *new;
1455
1456 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1457 pud_populate(&init_mm, pud, new);
1458 }
1459
1460 pmd = pmd_offset(pud, start);
1461 if (!pmd_present(*pmd)) {
1462 pte_t *new;
1463
1464 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1465 pmd_populate_kernel(&init_mm, pmd, new);
1466 }
1467
1468 pte = pte_offset_kernel(pmd, start);
1469 this_end = (start + PMD_SIZE) & PMD_MASK;
1470 if (this_end > end)
1471 this_end = end;
1472
1473 while (start < this_end) {
1474 unsigned long paddr = pfn << PAGE_SHIFT;
1475
1476 pte_val(*pte) = (paddr | pte_base);
1477
1478 start += PAGE_SIZE;
1479 pte++;
1480 pfn++;
1481 }
1482 }
1483}
1484
1374void __init setup_per_cpu_areas(void) 1485void __init setup_per_cpu_areas(void)
1375{ 1486{
1376 unsigned long size, i, nr_possible_cpus = num_possible_cpus(); 1487 size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
1377 char *ptr; 1488 static struct vm_struct vm;
1489 unsigned long delta, cpu;
1490 size_t pcpu_unit_size;
1491 size_t ptrs_size;
1492
1493 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
1494 PERCPU_DYNAMIC_RESERVE);
1495 dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
1496
1497
1498 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
1499 pcpur_ptrs = alloc_bootmem(ptrs_size);
1500
1501 for_each_possible_cpu(cpu) {
1502 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
1503 PCPU_CHUNK_SIZE);
1504
1505 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
1506 PCPU_CHUNK_SIZE - pcpur_size);
1507
1508 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
1509 }
1510
1511 /* allocate address and map */
1512 vm.flags = VM_ALLOC;
1513 vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
1514 vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
1515
1516 for_each_possible_cpu(cpu) {
1517 unsigned long start = (unsigned long) vm.addr;
1518 unsigned long end;
1519
1520 start += cpu * PCPU_CHUNK_SIZE;
1521 end = start + PCPU_CHUNK_SIZE;
1522 pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
1523 }
1524
1525 pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
1526 PERCPU_MODULE_RESERVE, dyn_size,
1527 PCPU_CHUNK_SIZE, vm.addr, NULL);
1378 1528
1379 /* Copy section for each CPU (we discard the original) */ 1529 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
1380 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
1381 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
1382 1530
1383 for_each_possible_cpu(i) { 1531 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1384 __per_cpu_offset(i) = ptr - __per_cpu_start; 1532 for_each_possible_cpu(cpu) {
1385 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1533 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
1386 ptr += size;
1387 } 1534 }
1388 1535
1389 /* Setup %g5 for the boot cpu. */ 1536 /* Setup %g5 for the boot cpu. */