aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/fault_64.c9
-rw-r--r--arch/sparc/mm/gup.c59
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c91
-rw-r--r--arch/sparc/mm/tlb.c11
-rw-r--r--arch/sparc/mm/tsb.c2
6 files changed, 132 insertions, 42 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 097aee763af3..5062ff389e83 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -472,8 +472,13 @@ good_area:
472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 mm_rss = mm->context.huge_pte_count; 473 mm_rss = mm->context.huge_pte_count;
474 if (unlikely(mm_rss > 474 if (unlikely(mm_rss >
475 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) 475 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
476 tsb_grow(mm, MM_TSB_HUGE, mm_rss); 476 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
477 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
478 else
479 hugetlb_setup(regs);
480
481 }
477#endif 482#endif
478 return; 483 return;
479 484
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 42c55df3aec3..01ee23dd724d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
66 return 1; 66 return 1;
67} 67}
68 68
69static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
70 unsigned long end, int write, struct page **pages,
71 int *nr)
72{
73 struct page *head, *page, *tail;
74 u32 mask;
75 int refs;
76
77 mask = PMD_HUGE_PRESENT;
78 if (write)
79 mask |= PMD_HUGE_WRITE;
80 if ((pmd_val(pmd) & mask) != mask)
81 return 0;
82
83 refs = 0;
84 head = pmd_page(pmd);
85 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86 tail = page;
87 do {
88 VM_BUG_ON(compound_head(page) != head);
89 pages[*nr] = page;
90 (*nr)++;
91 page++;
92 refs++;
93 } while (addr += PAGE_SIZE, addr != end);
94
95 if (!page_cache_add_speculative(head, refs)) {
96 *nr -= refs;
97 return 0;
98 }
99
100 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
101 *nr -= refs;
102 while (refs--)
103 put_page(head);
104 return 0;
105 }
106
107 /* Any tail page need their mapcount reference taken before we
108 * return.
109 */
110 while (refs--) {
111 if (PageTail(tail))
112 get_huge_page_tail(tail);
113 tail++;
114 }
115
116 return 1;
117}
118
69static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 119static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
70 int write, struct page **pages, int *nr) 120 int write, struct page **pages, int *nr)
71{ 121{
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
77 pmd_t pmd = *pmdp; 127 pmd_t pmd = *pmdp;
78 128
79 next = pmd_addr_end(addr, end); 129 next = pmd_addr_end(addr, end);
80 if (pmd_none(pmd)) 130 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
81 return 0; 131 return 0;
82 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 132 if (unlikely(pmd_large(pmd))) {
133 if (!gup_huge_pmd(pmdp, pmd, addr, next,
134 write, pages, nr))
135 return 0;
136 } else if (!gup_pte_range(pmd, addr, next, write,
137 pages, nr))
83 return 0; 138 return 0;
84 } while (pmdp++, addr = next, addr != end); 139 } while (pmdp++, addr = next, addr != end);
85 140
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index dde85ef1c56d..48e0c030e8f5 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -57,7 +57,7 @@ void show_mem(unsigned int filter)
57 printk("Mem-info:\n"); 57 printk("Mem-info:\n");
58 show_free_areas(filter); 58 show_free_areas(filter);
59 printk("Free swap: %6ldkB\n", 59 printk("Free swap: %6ldkB\n",
60 nr_swap_pages << (PAGE_SHIFT-10)); 60 get_nr_swap_pages() << (PAGE_SHIFT-10));
61 printk("%ld pages of RAM\n", totalram_pages); 61 printk("%ld pages of RAM\n", totalram_pages);
62 printk("%ld free pages\n", nr_free_pages()); 62 printk("%ld free pages\n", nr_free_pages());
63} 63}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c3b72423c846..1588d33d5492 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; 314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag; 315 unsigned long tag;
316 316
317 if (unlikely(!tsb))
318 return;
319
317 tsb += ((address >> tsb_hash_shift) & 320 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); 321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL); 322 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, tte); 323 tsb_insert(tsb, tag, tte);
321} 324}
322 325
326#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
327static inline bool is_hugetlb_pte(pte_t pte)
328{
329 if ((tlb_type == hypervisor &&
330 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
331 (tlb_type != hypervisor &&
332 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
333 return true;
334 return false;
335}
336#endif
337
323void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 338void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
324{ 339{
325 unsigned long tsb_index, tsb_hash_shift, flags;
326 struct mm_struct *mm; 340 struct mm_struct *mm;
341 unsigned long flags;
327 pte_t pte = *ptep; 342 pte_t pte = *ptep;
328 343
329 if (tlb_type != hypervisor) { 344 if (tlb_type != hypervisor) {
@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
335 350
336 mm = vma->vm_mm; 351 mm = vma->vm_mm;
337 352
338 tsb_index = MM_TSB_BASE;
339 tsb_hash_shift = PAGE_SHIFT;
340
341 spin_lock_irqsave(&mm->context.lock, flags); 353 spin_lock_irqsave(&mm->context.lock, flags);
342 354
343#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
344 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { 356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
345 if ((tlb_type == hypervisor && 357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
346 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || 358 address, pte_val(pte));
347 (tlb_type != hypervisor && 359 else
348 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
349 tsb_index = MM_TSB_HUGE;
350 tsb_hash_shift = HPAGE_SHIFT;
351 }
352 }
353#endif 360#endif
354 361 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
355 __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, 362 address, pte_val(pte));
356 address, pte_val(pte));
357 363
358 spin_unlock_irqrestore(&mm->context.lock, flags); 364 spin_unlock_irqrestore(&mm->context.lock, flags);
359} 365}
@@ -2021,6 +2027,16 @@ static void __init patch_tlb_miss_handler_bitmap(void)
2021 flushi(&valid_addr_bitmap_insn[0]); 2027 flushi(&valid_addr_bitmap_insn[0]);
2022} 2028}
2023 2029
2030static void __init register_page_bootmem_info(void)
2031{
2032#ifdef CONFIG_NEED_MULTIPLE_NODES
2033 int i;
2034
2035 for_each_online_node(i)
2036 if (NODE_DATA(i)->node_spanned_pages)
2037 register_page_bootmem_info_node(NODE_DATA(i));
2038#endif
2039}
2024void __init mem_init(void) 2040void __init mem_init(void)
2025{ 2041{
2026 unsigned long codepages, datapages, initpages; 2042 unsigned long codepages, datapages, initpages;
@@ -2038,20 +2054,8 @@ void __init mem_init(void)
2038 2054
2039 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 2055 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2040 2056
2041#ifdef CONFIG_NEED_MULTIPLE_NODES 2057 register_page_bootmem_info();
2042 {
2043 int i;
2044 for_each_online_node(i) {
2045 if (NODE_DATA(i)->node_spanned_pages != 0) {
2046 totalram_pages +=
2047 free_all_bootmem_node(NODE_DATA(i));
2048 }
2049 }
2050 totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
2051 }
2052#else
2053 totalram_pages = free_all_bootmem(); 2058 totalram_pages = free_all_bootmem();
2054#endif
2055 2059
2056 /* We subtract one to account for the mem_map_zero page 2060 /* We subtract one to account for the mem_map_zero page
2057 * allocated below. 2061 * allocated below.
@@ -2231,6 +2235,11 @@ void __meminit vmemmap_populate_print_last(void)
2231 node_start = 0; 2235 node_start = 0;
2232 } 2236 }
2233} 2237}
2238
2239void vmemmap_free(struct page *memmap, unsigned long nr_pages)
2240{
2241}
2242
2234#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2243#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2235 2244
2236static void prot_init_common(unsigned long page_none, 2245static void prot_init_common(unsigned long page_none,
@@ -2712,14 +2721,28 @@ static void context_reload(void *__data)
2712 load_secondary_context(mm); 2721 load_secondary_context(mm);
2713} 2722}
2714 2723
2715void hugetlb_setup(struct mm_struct *mm) 2724void hugetlb_setup(struct pt_regs *regs)
2716{ 2725{
2717 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; 2726 struct mm_struct *mm = current->mm;
2727 struct tsb_config *tp;
2718 2728
2719 if (likely(tp->tsb != NULL)) 2729 if (in_atomic() || !mm) {
2720 return; 2730 const struct exception_table_entry *entry;
2731
2732 entry = search_exception_tables(regs->tpc);
2733 if (entry) {
2734 regs->tpc = entry->fixup;
2735 regs->tnpc = regs->tpc + 4;
2736 return;
2737 }
2738 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2739 die_if_kernel("HugeTSB in atomic", regs);
2740 }
2741
2742 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2743 if (likely(tp->tsb == NULL))
2744 tsb_grow(mm, MM_TSB_HUGE, 0);
2721 2745
2722 tsb_grow(mm, MM_TSB_HUGE, 0);
2723 tsb_context_switch(mm); 2746 tsb_context_switch(mm);
2724 smp_tsb_sync(mm); 2747 smp_tsb_sync(mm);
2725 2748
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3e8fec391fe0..ba6ae7ffdc2c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
135 mm->context.huge_pte_count++; 135 mm->context.huge_pte_count++;
136 else 136 else
137 mm->context.huge_pte_count--; 137 mm->context.huge_pte_count--;
138 if (mm->context.huge_pte_count == 1) 138
139 hugetlb_setup(mm); 139 /* Do not try to allocate the TSB hash table if we
140 * don't have one already. We have various locks held
141 * and thus we'll end up doing a GFP_KERNEL allocation
142 * in an atomic context.
143 *
144 * Instead, we let the first TLB miss on a hugepage
145 * take care of this.
146 */
140 } 147 }
141 148
142 if (!pmd_none(orig)) { 149 if (!pmd_none(orig)) {
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 7f6474347491..428982b9becf 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
314retry_tsb_alloc: 314retry_tsb_alloc:
315 gfp_flags = GFP_KERNEL; 315 gfp_flags = GFP_KERNEL;
316 if (new_size > (PAGE_SIZE * 2)) 316 if (new_size > (PAGE_SIZE * 2))
317 gfp_flags = __GFP_NOWARN | __GFP_NORETRY; 317 gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
318 318
319 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], 319 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
320 gfp_flags, numa_node_id()); 320 gfp_flags, numa_node_id());