aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/tlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:15 -0400
commit9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch)
treef67d62e896cedf75599ea45f9ecf9999c6ad24cd /arch/sparc/mm/tlb.c
parent1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff)
parent9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton: "A few misc things and very nearly all of the MM tree. A tremendous amount of stuff (again), including a significant rbtree library rework." * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits) sparc64: Support transparent huge pages. mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd(). mm: Add and use update_mmu_cache_pmd() in transparent huge page code. sparc64: Document PGD and PMD layout. sparc64: Eliminate PTE table memory wastage. sparc64: Halve the size of PTE tables sparc64: Only support 4MB huge pages and 8KB base pages. memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning mm: memcg: clean up mm_match_cgroup() signature mm: document PageHuge somewhat mm: use %pK for /proc/vmallocinfo mm, thp: fix mlock statistics mm, thp: fix mapped pages avoiding unevictable list on mlock memory-hotplug: update memory block's state and notify userspace memory-hotplug: preparation to notify memory block's state at memory hot remove mm: avoid section mismatch warning for memblock_type_name make GFP_NOTRACK definition unconditional cma: decrease cc.nr_migratepages after reclaiming pagelist CMA: migrate mlocked pages kpageflags: fix wrong KPF_THP on non-huge compound pages ...
Diffstat (limited to 'arch/sparc/mm/tlb.c')
-rw-r--r--arch/sparc/mm/tlb.c118
1 files changed, 102 insertions, 16 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b1f279cd00bf..3e8fec391fe0 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -43,16 +43,37 @@ void flush_tlb_pending(void)
43 put_cpu_var(tlb_batch); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm) 47 bool exec)
48{ 48{
49 struct tlb_batch *tb = &get_cpu_var(tlb_batch); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
50 unsigned long nr; 50 unsigned long nr;
51 51
52 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
53 if (pte_exec(orig)) 53 if (exec)
54 vaddr |= 0x1UL; 54 vaddr |= 0x1UL;
55 55
56 nr = tb->tlb_nr;
57
58 if (unlikely(nr != 0 && mm != tb->mm)) {
59 flush_tlb_pending();
60 nr = 0;
61 }
62
63 if (nr == 0)
64 tb->mm = mm;
65
66 tb->vaddrs[nr] = vaddr;
67 tb->tlb_nr = ++nr;
68 if (nr >= TLB_BATCH_NR)
69 flush_tlb_pending();
70
71 put_cpu_var(tlb_batch);
72}
73
74void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75 pte_t *ptep, pte_t orig, int fullmm)
76{
56 if (tlb_type != hypervisor && 77 if (tlb_type != hypervisor &&
57 pte_dirty(orig)) { 78 pte_dirty(orig)) {
58 unsigned long paddr, pfn = pte_pfn(orig); 79 unsigned long paddr, pfn = pte_pfn(orig);
@@ -77,26 +98,91 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
77 } 98 }
78 99
79no_cache_flush: 100no_cache_flush:
101 if (!fullmm)
102 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
103}
104
105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
106static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107 pmd_t pmd, bool exec)
108{
109 unsigned long end;
110 pte_t *pte;
111
112 pte = pte_offset_map(&pmd, vaddr);
113 end = vaddr + HPAGE_SIZE;
114 while (vaddr < end) {
115 if (pte_val(*pte) & _PAGE_VALID)
116 tlb_batch_add_one(mm, vaddr, exec);
117 pte++;
118 vaddr += PAGE_SIZE;
119 }
120 pte_unmap(pte);
121}
80 122
81 if (fullmm) { 123void set_pmd_at(struct mm_struct *mm, unsigned long addr,
82 put_cpu_var(tlb_batch); 124 pmd_t *pmdp, pmd_t pmd)
125{
126 pmd_t orig = *pmdp;
127
128 *pmdp = pmd;
129
130 if (mm == &init_mm)
83 return; 131 return;
132
133 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134 if (pmd_val(pmd) & PMD_ISHUGE)
135 mm->context.huge_pte_count++;
136 else
137 mm->context.huge_pte_count--;
138 if (mm->context.huge_pte_count == 1)
139 hugetlb_setup(mm);
84 } 140 }
85 141
86 nr = tb->tlb_nr; 142 if (!pmd_none(orig)) {
143 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
87 144
88 if (unlikely(nr != 0 && mm != tb->mm)) { 145 addr &= HPAGE_MASK;
89 flush_tlb_pending(); 146 if (pmd_val(orig) & PMD_ISHUGE)
90 nr = 0; 147 tlb_batch_add_one(mm, addr, exec);
148 else
149 tlb_batch_pmd_scan(mm, addr, orig, exec);
91 } 150 }
151}
92 152
93 if (nr == 0) 153void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
94 tb->mm = mm; 154{
155 struct list_head *lh = (struct list_head *) pgtable;
95 156
96 tb->vaddrs[nr] = vaddr; 157 assert_spin_locked(&mm->page_table_lock);
97 tb->tlb_nr = ++nr;
98 if (nr >= TLB_BATCH_NR)
99 flush_tlb_pending();
100 158
101 put_cpu_var(tlb_batch); 159 /* FIFO */
160 if (!mm->pmd_huge_pte)
161 INIT_LIST_HEAD(lh);
162 else
163 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
164 mm->pmd_huge_pte = pgtable;
165}
166
167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
168{
169 struct list_head *lh;
170 pgtable_t pgtable;
171
172 assert_spin_locked(&mm->page_table_lock);
173
174 /* FIFO */
175 pgtable = mm->pmd_huge_pte;
176 lh = (struct list_head *) pgtable;
177 if (list_empty(lh))
178 mm->pmd_huge_pte = NULL;
179 else {
180 mm->pmd_huge_pte = (pgtable_t) lh->next;
181 list_del(lh);
182 }
183 pte_val(pgtable[0]) = 0;
184 pte_val(pgtable[1]) = 0;
185
186 return pgtable;
102} 187}
188#endif /* CONFIG_TRANSPARENT_HUGEPAGE */