aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/setup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-16 16:54:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-16 16:54:16 -0500
commit0db2812a5240f2663b92d8d4b761122dd2e0c6c3 (patch)
tree4966ada2a4a75dc89b7038ac1694a8acc4d3aa3f /arch/tile/kernel/setup.c
parenteb64c3c6cdb8fa8a4d324eb71a9033b62e150918 (diff)
parentc47b15c4928c46b889a1ea2e9b54f4110514e1c3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf: "Note that one of the changes converts my old cmetcalf@tilera.com email in MAINTAINERS to the cmetcalf@ezchip.com email that you see on this email" * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch/tile: update MAINTAINERS email to EZchip tile: avoid undefined behavior with regs[TREG_TP] etc arch: tile: kernel: kgdb.c: Use memcpy() instead of pointer copy one by one tile: Use the more common pr_warn instead of pr_warning arch: tile: gxio: Export symbols for module using in 'mpipe.c' arch: tile: kernel: signal.c: Use __copy_from/to_user() instead of __get/put_user()
Diffstat (limited to 'arch/tile/kernel/setup.c')
-rw-r--r--arch/tile/kernel/setup.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 7f079bbfdf4c..864eea69556d 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str)
130 130
131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); 131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
132 pr_info("Forcing RAM used to no more than %dMB\n", 132 pr_info("Forcing RAM used to no more than %dMB\n",
133 maxmem_pfn >> (20 - PAGE_SHIFT)); 133 maxmem_pfn >> (20 - PAGE_SHIFT));
134 return 0; 134 return 0;
135} 135}
136early_param("maxmem", setup_maxmem); 136early_param("maxmem", setup_maxmem);
@@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str)
149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << 149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
150 (HPAGE_SHIFT - PAGE_SHIFT); 150 (HPAGE_SHIFT - PAGE_SHIFT);
151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n", 151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
153 return 0; 153 return 0;
154} 154}
155early_param("maxnodemem", setup_maxnodemem); 155early_param("maxnodemem", setup_maxnodemem);
@@ -417,8 +417,7 @@ static void __init setup_memory(void)
417 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; 417 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
418 range.size -= (range.start - start_pa); 418 range.size -= (range.start - start_pa);
419 range.size &= HPAGE_MASK; 419 range.size &= HPAGE_MASK;
420 pr_err("Range not hugepage-aligned: %#llx..%#llx:" 420 pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
421 " now %#llx-%#llx\n",
422 start_pa, start_pa + orig_size, 421 start_pa, start_pa + orig_size,
423 range.start, range.start + range.size); 422 range.start, range.start + range.size);
424 } 423 }
@@ -437,8 +436,8 @@ static void __init setup_memory(void)
437 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { 436 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
438 int max_size = maxnodemem_pfn[i]; 437 int max_size = maxnodemem_pfn[i];
439 if (max_size > 0) { 438 if (max_size > 0) {
440 pr_err("Maxnodemem reduced node %d to" 439 pr_err("Maxnodemem reduced node %d to %d pages\n",
441 " %d pages\n", i, max_size); 440 i, max_size);
442 range.size = PFN_PHYS(max_size); 441 range.size = PFN_PHYS(max_size);
443 } else { 442 } else {
444 pr_err("Maxnodemem disabled node %d\n", i); 443 pr_err("Maxnodemem disabled node %d\n", i);
@@ -490,8 +489,8 @@ static void __init setup_memory(void)
490 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); 489 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
491 if (end < pci_reserve_end_pfn + percpu_pages) { 490 if (end < pci_reserve_end_pfn + percpu_pages) {
492 end = pci_reserve_start_pfn; 491 end = pci_reserve_start_pfn;
493 pr_err("PCI mapping region reduced node %d to" 492 pr_err("PCI mapping region reduced node %d to %ld pages\n",
494 " %ld pages\n", i, end - start); 493 i, end - start);
495 } 494 }
496 } 495 }
497#endif 496#endif
@@ -555,10 +554,9 @@ static void __init setup_memory(void)
555 MAXMEM_PFN : mappable_physpages; 554 MAXMEM_PFN : mappable_physpages;
556 highmem_pages = (long) (physpages - lowmem_pages); 555 highmem_pages = (long) (physpages - lowmem_pages);
557 556
558 pr_notice("%ldMB HIGHMEM available.\n", 557 pr_notice("%ldMB HIGHMEM available\n",
559 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); 558 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
560 pr_notice("%ldMB LOWMEM available.\n", 559 pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
561 pages_to_mb(lowmem_pages));
562#else 560#else
563 /* Set max_low_pfn based on what node 0 can directly address. */ 561 /* Set max_low_pfn based on what node 0 can directly address. */
564 max_low_pfn = node_end_pfn[0]; 562 max_low_pfn = node_end_pfn[0];
@@ -571,8 +569,8 @@ static void __init setup_memory(void)
571 max_pfn = MAXMEM_PFN; 569 max_pfn = MAXMEM_PFN;
572 node_end_pfn[0] = MAXMEM_PFN; 570 node_end_pfn[0] = MAXMEM_PFN;
573 } else { 571 } else {
574 pr_notice("%ldMB memory available.\n", 572 pr_notice("%ldMB memory available\n",
575 pages_to_mb(node_end_pfn[0])); 573 pages_to_mb(node_end_pfn[0]));
576 } 574 }
577 for (i = 1; i < MAX_NUMNODES; ++i) { 575 for (i = 1; i < MAX_NUMNODES; ++i) {
578 node_start_pfn[i] = 0; 576 node_start_pfn[i] = 0;
@@ -587,8 +585,7 @@ static void __init setup_memory(void)
587 if (pages) 585 if (pages)
588 high_memory = pfn_to_kaddr(node_end_pfn[i]); 586 high_memory = pfn_to_kaddr(node_end_pfn[i]);
589 } 587 }
590 pr_notice("%ldMB memory available.\n", 588 pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
591 pages_to_mb(lowmem_pages));
592#endif 589#endif
593#endif 590#endif
594} 591}
@@ -1535,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
1535 1532
1536 BUG_ON(pgd_addr_invalid(addr)); 1533 BUG_ON(pgd_addr_invalid(addr));
1537 if (addr < VMALLOC_START || addr >= VMALLOC_END) 1534 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1538 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" 1535 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
1539 " try increasing CONFIG_VMALLOC_RESERVE\n",
1540 addr, VMALLOC_START, VMALLOC_END); 1536 addr, VMALLOC_START, VMALLOC_END);
1541 1537
1542 pgd = swapper_pg_dir + pgd_index(addr); 1538 pgd = swapper_pg_dir + pgd_index(addr);
@@ -1591,8 +1587,8 @@ void __init setup_per_cpu_areas(void)
1591 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1587 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1592 ptep = virt_to_kpte(lowmem_va); 1588 ptep = virt_to_kpte(lowmem_va);
1593 if (pte_huge(*ptep)) { 1589 if (pte_huge(*ptep)) {
1594 printk(KERN_DEBUG "early shatter of huge page" 1590 printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
1595 " at %#lx\n", lowmem_va); 1591 lowmem_va);
1596 shatter_pmd((pmd_t *)ptep); 1592 shatter_pmd((pmd_t *)ptep);
1597 ptep = virt_to_kpte(lowmem_va); 1593 ptep = virt_to_kpte(lowmem_va);
1598 BUG_ON(pte_huge(*ptep)); 1594 BUG_ON(pte_huge(*ptep));