aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/tlb.c
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2012-10-08 19:34:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:23:06 -0400
commit9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (patch)
tree77528ae73fe70d1bae3ced18a50e59fd81d2372c /arch/sparc/mm/tlb.c
parentf5c8ad47284ca01dafc37da5a72bb9644174d387 (diff)
sparc64: Support transparent huge pages.
This is relatively easy since PMD's now cover exactly 4MB of memory. Our PMD entries are 32-bits each, so we use a special encoding. The lowest bit, PMD_ISHUGE, determines the interpretation. This is possible because sparc64's page tables are purely software entities so we can use whatever encoding scheme we want. We just have to make the TLB miss assembler page table walkers aware of the layout. set_pmd_at() works much like set_pte_at() but it has to operate in two page from a table of non-huge PTEs, so we have to queue up TLB flushes based upon what mappings are valid in the PTE table. In the second regime we are going from huge-page to non-huge-page, and in that case we need only queue up a single TLB flush to push out the huge page mapping. We still have 5 bits remaining in the huge PMD encoding so we can very likely support any new pieces of THP state tracking that might get added in the future. With lots of help from Johannes Weiner. Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc/mm/tlb.c')
-rw-r--r--arch/sparc/mm/tlb.c118
1 files changed, 102 insertions, 16 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b1f279cd00bf..3e8fec391fe0 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -43,16 +43,37 @@ void flush_tlb_pending(void)
43 put_cpu_var(tlb_batch); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, 46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm) 47 bool exec)
48{ 48{
49 struct tlb_batch *tb = &get_cpu_var(tlb_batch); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
50 unsigned long nr; 50 unsigned long nr;
51 51
52 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
53 if (pte_exec(orig)) 53 if (exec)
54 vaddr |= 0x1UL; 54 vaddr |= 0x1UL;
55 55
56 nr = tb->tlb_nr;
57
58 if (unlikely(nr != 0 && mm != tb->mm)) {
59 flush_tlb_pending();
60 nr = 0;
61 }
62
63 if (nr == 0)
64 tb->mm = mm;
65
66 tb->vaddrs[nr] = vaddr;
67 tb->tlb_nr = ++nr;
68 if (nr >= TLB_BATCH_NR)
69 flush_tlb_pending();
70
71 put_cpu_var(tlb_batch);
72}
73
74void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75 pte_t *ptep, pte_t orig, int fullmm)
76{
56 if (tlb_type != hypervisor && 77 if (tlb_type != hypervisor &&
57 pte_dirty(orig)) { 78 pte_dirty(orig)) {
58 unsigned long paddr, pfn = pte_pfn(orig); 79 unsigned long paddr, pfn = pte_pfn(orig);
@@ -77,26 +98,91 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
77 } 98 }
78 99
79no_cache_flush: 100no_cache_flush:
101 if (!fullmm)
102 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
103}
104
105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
106static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107 pmd_t pmd, bool exec)
108{
109 unsigned long end;
110 pte_t *pte;
111
112 pte = pte_offset_map(&pmd, vaddr);
113 end = vaddr + HPAGE_SIZE;
114 while (vaddr < end) {
115 if (pte_val(*pte) & _PAGE_VALID)
116 tlb_batch_add_one(mm, vaddr, exec);
117 pte++;
118 vaddr += PAGE_SIZE;
119 }
120 pte_unmap(pte);
121}
80 122
81 if (fullmm) { 123void set_pmd_at(struct mm_struct *mm, unsigned long addr,
82 put_cpu_var(tlb_batch); 124 pmd_t *pmdp, pmd_t pmd)
125{
126 pmd_t orig = *pmdp;
127
128 *pmdp = pmd;
129
130 if (mm == &init_mm)
83 return; 131 return;
132
133 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134 if (pmd_val(pmd) & PMD_ISHUGE)
135 mm->context.huge_pte_count++;
136 else
137 mm->context.huge_pte_count--;
138 if (mm->context.huge_pte_count == 1)
139 hugetlb_setup(mm);
84 } 140 }
85 141
86 nr = tb->tlb_nr; 142 if (!pmd_none(orig)) {
143 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
87 144
88 if (unlikely(nr != 0 && mm != tb->mm)) { 145 addr &= HPAGE_MASK;
89 flush_tlb_pending(); 146 if (pmd_val(orig) & PMD_ISHUGE)
90 nr = 0; 147 tlb_batch_add_one(mm, addr, exec);
148 else
149 tlb_batch_pmd_scan(mm, addr, orig, exec);
91 } 150 }
151}
92 152
93 if (nr == 0) 153void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
94 tb->mm = mm; 154{
155 struct list_head *lh = (struct list_head *) pgtable;
95 156
96 tb->vaddrs[nr] = vaddr; 157 assert_spin_locked(&mm->page_table_lock);
97 tb->tlb_nr = ++nr;
98 if (nr >= TLB_BATCH_NR)
99 flush_tlb_pending();
100 158
101 put_cpu_var(tlb_batch); 159 /* FIFO */
160 if (!mm->pmd_huge_pte)
161 INIT_LIST_HEAD(lh);
162 else
163 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
164 mm->pmd_huge_pte = pgtable;
165}
166
167pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
168{
169 struct list_head *lh;
170 pgtable_t pgtable;
171
172 assert_spin_locked(&mm->page_table_lock);
173
174 /* FIFO */
175 pgtable = mm->pmd_huge_pte;
176 lh = (struct list_head *) pgtable;
177 if (list_empty(lh))
178 mm->pmd_huge_pte = NULL;
179 else {
180 mm->pmd_huge_pte = (pgtable_t) lh->next;
181 list_del(lh);
182 }
183 pte_val(pgtable[0]) = 0;
184 pte_val(pgtable[1]) = 0;
185
186 return pgtable;
102} 187}
188#endif /* CONFIG_TRANSPARENT_HUGEPAGE */