aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:23:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:23:18 -0400
commit86505fc06b6f1ee8a13473053a41ed01948e2d4f (patch)
tree0bd9b7f5a32b7cc8d1aaf19c0085b64afa56a262
parent9d3bc3d4a42242ff73ae28afb080508cf5070161 (diff)
parent7bc3777ca19cf9ecc5533980210f29c51df7fe5e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller: 1) Double spin lock bug in sunhv serial driver, from Dan Carpenter. 2) Use correct RSS estimate when determining whether to grow the huge TSB or not, from Mike Kravetz. 3) Don't use full three level page tables for hugepages, PMD level is sufficient. From Nitin Gupta. 4) Mask out extraneous bits from TSB_TAG_ACCESS register, we only want the address bits. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: Trim page tables for 8M hugepages sparc64 mm: Fix base TSB sizing when hugetlb pages are used sparc: serial: sunhv: fix a double lock bug sparc32: off by ones in BUG_ON() sparc: Don't leak context bits into thread->fault_address
-rw-r--r--arch/sparc/include/asm/hugetlb.h12
-rw-r--r--arch/sparc/include/asm/mmu_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h7
-rw-r--r--arch/sparc/include/asm/tsb.h2
-rw-r--r--arch/sparc/kernel/dtlb_prot.S4
-rw-r--r--arch/sparc/kernel/irq_32.c4
-rw-r--r--arch/sparc/kernel/ktlb.S12
-rw-r--r--arch/sparc/kernel/tsb.S12
-rw-r--r--arch/sparc/mm/fault_64.c10
-rw-r--r--arch/sparc/mm/hugetlbpage.c170
-rw-r--r--arch/sparc/mm/init_64.c7
-rw-r--r--arch/sparc/mm/tlb.c4
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--drivers/tty/serial/sunhv.c6
14 files changed, 173 insertions, 94 deletions
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 139e711ff80c..dcbf985ab243 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -31,14 +31,6 @@ static inline int prepare_hugepage_range(struct file *file,
31 return 0; 31 return 0;
32} 32}
33 33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 34static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep) 35 unsigned long addr, pte_t *ptep)
44{ 36{
@@ -82,4 +74,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
82{ 74{
83} 75}
84 76
77void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
78 unsigned long end, unsigned long floor,
79 unsigned long ceiling);
80
85#endif /* _ASM_SPARC64_HUGETLB_H */ 81#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index 70067ce184b1..f7de0dbc38af 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -92,7 +92,8 @@ struct tsb_config {
92typedef struct { 92typedef struct {
93 spinlock_t lock; 93 spinlock_t lock;
94 unsigned long sparc64_ctx_val; 94 unsigned long sparc64_ctx_val;
95 unsigned long huge_pte_count; 95 unsigned long hugetlb_pte_count;
96 unsigned long thp_pte_count;
96 struct tsb_config tsb_block[MM_NUM_TSBS]; 97 struct tsb_config tsb_block[MM_NUM_TSBS];
97 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; 98 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
98} mm_context_t; 99} mm_context_t;
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index e7d82803a48f..1fb317fbc0b3 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -395,7 +395,7 @@ static inline unsigned long __pte_huge_mask(void)
395 395
396static inline pte_t pte_mkhuge(pte_t pte) 396static inline pte_t pte_mkhuge(pte_t pte)
397{ 397{
398 return __pte(pte_val(pte) | __pte_huge_mask()); 398 return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
399} 399}
400 400
401static inline bool is_hugetlb_pte(pte_t pte) 401static inline bool is_hugetlb_pte(pte_t pte)
@@ -403,6 +403,11 @@ static inline bool is_hugetlb_pte(pte_t pte)
403 return !!(pte_val(pte) & __pte_huge_mask()); 403 return !!(pte_val(pte) & __pte_huge_mask());
404} 404}
405 405
406static inline bool is_hugetlb_pmd(pmd_t pmd)
407{
408 return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
409}
410
406#ifdef CONFIG_TRANSPARENT_HUGEPAGE 411#ifdef CONFIG_TRANSPARENT_HUGEPAGE
407static inline pmd_t pmd_mkhuge(pmd_t pmd) 412static inline pmd_t pmd_mkhuge(pmd_t pmd)
408{ 413{
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index c6a155c3904e..32258e08da03 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -203,7 +203,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
203 * We have to propagate the 4MB bit of the virtual address 203 * We have to propagate the 4MB bit of the virtual address
204 * because we are fabricating 8MB pages using 4MB hw pages. 204 * because we are fabricating 8MB pages using 4MB hw pages.
205 */ 205 */
206#ifdef CONFIG_TRANSPARENT_HUGEPAGE 206#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
207#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ 207#define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
208 brz,pn REG1, FAIL_LABEL; \ 208 brz,pn REG1, FAIL_LABEL; \
209 sethi %uhi(_PAGE_PMD_HUGE), REG2; \ 209 sethi %uhi(_PAGE_PMD_HUGE), REG2; \
diff --git a/arch/sparc/kernel/dtlb_prot.S b/arch/sparc/kernel/dtlb_prot.S
index d668ca149e64..4087a62f96b0 100644
--- a/arch/sparc/kernel/dtlb_prot.S
+++ b/arch/sparc/kernel/dtlb_prot.S
@@ -25,13 +25,13 @@
25 25
26/* PROT ** ICACHE line 2: More real fault processing */ 26/* PROT ** ICACHE line 2: More real fault processing */
27 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 27 ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
28 srlx %g5, PAGE_SHIFT, %g5
29 sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
28 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup 30 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
29 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 31 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
30 ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault 32 ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
31 nop 33 nop
32 nop 34 nop
33 nop
34 nop
35 35
36/* PROT ** ICACHE line 3: Unused... */ 36/* PROT ** ICACHE line 3: Unused... */
37 nop 37 nop
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index a979e99f8751..cac4a5554c0e 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -165,7 +165,7 @@ void irq_link(unsigned int irq)
165 165
166 p = &irq_table[irq]; 166 p = &irq_table[irq];
167 pil = p->pil; 167 pil = p->pil;
168 BUG_ON(pil > SUN4D_MAX_IRQ); 168 BUG_ON(pil >= SUN4D_MAX_IRQ);
169 p->next = irq_map[pil]; 169 p->next = irq_map[pil];
170 irq_map[pil] = p; 170 irq_map[pil] = p;
171 171
@@ -182,7 +182,7 @@ void irq_unlink(unsigned int irq)
182 spin_lock_irqsave(&irq_map_lock, flags); 182 spin_lock_irqsave(&irq_map_lock, flags);
183 183
184 p = &irq_table[irq]; 184 p = &irq_table[irq];
185 BUG_ON(p->pil > SUN4D_MAX_IRQ); 185 BUG_ON(p->pil >= SUN4D_MAX_IRQ);
186 pnext = &irq_map[p->pil]; 186 pnext = &irq_map[p->pil];
187 while (*pnext != p) 187 while (*pnext != p)
188 pnext = &(*pnext)->next; 188 pnext = &(*pnext)->next;
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index ef0d8e9e1210..f22bec0db645 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -20,6 +20,10 @@ kvmap_itlb:
20 mov TLB_TAG_ACCESS, %g4 20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4 21 ldxa [%g4] ASI_IMMU, %g4
22 22
23 /* The kernel executes in context zero, therefore we do not
24 * need to clear the context ID bits out of %g4 here.
25 */
26
23 /* sun4v_itlb_miss branches here with the missing virtual 27 /* sun4v_itlb_miss branches here with the missing virtual
24 * address already loaded into %g4 28 * address already loaded into %g4
25 */ 29 */
@@ -128,6 +132,10 @@ kvmap_dtlb:
128 mov TLB_TAG_ACCESS, %g4 132 mov TLB_TAG_ACCESS, %g4
129 ldxa [%g4] ASI_DMMU, %g4 133 ldxa [%g4] ASI_DMMU, %g4
130 134
135 /* The kernel executes in context zero, therefore we do not
136 * need to clear the context ID bits out of %g4 here.
137 */
138
131 /* sun4v_dtlb_miss branches here with the missing virtual 139 /* sun4v_dtlb_miss branches here with the missing virtual
132 * address already loaded into %g4 140 * address already loaded into %g4
133 */ 141 */
@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
251 nop 259 nop
252 .previous 260 .previous
253 261
262 /* The kernel executes in context zero, therefore we do not
263 * need to clear the context ID bits out of %g5 here.
264 */
265
254 be,pt %xcc, sparc64_realfault_common 266 be,pt %xcc, sparc64_realfault_common
255 mov FAULT_CODE_DTLB, %g4 267 mov FAULT_CODE_DTLB, %g4
256 ba,pt %xcc, winfix_trampoline 268 ba,pt %xcc, winfix_trampoline
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index be98685c14c6..d568c8207af7 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -29,13 +29,17 @@
29 */ 29 */
30tsb_miss_dtlb: 30tsb_miss_dtlb:
31 mov TLB_TAG_ACCESS, %g4 31 mov TLB_TAG_ACCESS, %g4
32 ldxa [%g4] ASI_DMMU, %g4
33 srlx %g4, PAGE_SHIFT, %g4
32 ba,pt %xcc, tsb_miss_page_table_walk 34 ba,pt %xcc, tsb_miss_page_table_walk
33 ldxa [%g4] ASI_DMMU, %g4 35 sllx %g4, PAGE_SHIFT, %g4
34 36
35tsb_miss_itlb: 37tsb_miss_itlb:
36 mov TLB_TAG_ACCESS, %g4 38 mov TLB_TAG_ACCESS, %g4
39 ldxa [%g4] ASI_IMMU, %g4
40 srlx %g4, PAGE_SHIFT, %g4
37 ba,pt %xcc, tsb_miss_page_table_walk 41 ba,pt %xcc, tsb_miss_page_table_walk
38 ldxa [%g4] ASI_IMMU, %g4 42 sllx %g4, PAGE_SHIFT, %g4
39 43
40 /* At this point we have: 44 /* At this point we have:
41 * %g1 -- PAGE_SIZE TSB entry address 45 * %g1 -- PAGE_SIZE TSB entry address
@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
284 nop 288 nop
285 .previous 289 .previous
286 290
291 /* Clear context ID bits. */
292 srlx %g5, PAGE_SHIFT, %g5
293 sllx %g5, PAGE_SHIFT, %g5
294
287 be,pt %xcc, sparc64_realfault_common 295 be,pt %xcc, sparc64_realfault_common
288 mov FAULT_CODE_DTLB, %g4 296 mov FAULT_CODE_DTLB, %g4
289 ba,pt %xcc, winfix_trampoline 297 ba,pt %xcc, winfix_trampoline
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 6c43b924a7a2..e16fdd28a931 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc)
111 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) 111 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
112 goto out_irq_enable; 112 goto out_irq_enable;
113 113
114#ifdef CONFIG_TRANSPARENT_HUGEPAGE 114#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
115 if (pmd_trans_huge(*pmdp)) { 115 if (is_hugetlb_pmd(*pmdp)) {
116 pa = pmd_pfn(*pmdp) << PAGE_SHIFT; 116 pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
117 pa += tpc & ~HPAGE_MASK; 117 pa += tpc & ~HPAGE_MASK;
118 118
@@ -476,14 +476,14 @@ good_area:
476 up_read(&mm->mmap_sem); 476 up_read(&mm->mmap_sem);
477 477
478 mm_rss = get_mm_rss(mm); 478 mm_rss = get_mm_rss(mm);
479#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 479#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
480 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); 480 mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
481#endif 481#endif
482 if (unlikely(mm_rss > 482 if (unlikely(mm_rss >
483 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) 483 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
484 tsb_grow(mm, MM_TSB_BASE, mm_rss); 484 tsb_grow(mm, MM_TSB_BASE, mm_rss);
485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
486 mm_rss = mm->context.huge_pte_count; 486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
487 if (unlikely(mm_rss > 487 if (unlikely(mm_rss >
488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) 489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ba52e6466a82..988acc8b1b80 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/mman.h> 13#include <asm/mman.h>
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
15#include <asm/tlb.h> 16#include <asm/tlb.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
131{ 132{
132 pgd_t *pgd; 133 pgd_t *pgd;
133 pud_t *pud; 134 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *pte = NULL; 135 pte_t *pte = NULL;
136 136
137 /* We must align the address, because our caller will run
138 * set_huge_pte_at() on whatever we return, which writes out
139 * all of the sub-ptes for the hugepage range. So we have
140 * to give it the first such sub-pte.
141 */
142 addr &= HPAGE_MASK;
143
144 pgd = pgd_offset(mm, addr); 137 pgd = pgd_offset(mm, addr);
145 pud = pud_alloc(mm, pgd, addr); 138 pud = pud_alloc(mm, pgd, addr);
146 if (pud) { 139 if (pud)
147 pmd = pmd_alloc(mm, pud, addr); 140 pte = (pte_t *)pmd_alloc(mm, pud, addr);
148 if (pmd) 141
149 pte = pte_alloc_map(mm, pmd, addr);
150 }
151 return pte; 142 return pte;
152} 143}
153 144
@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
155{ 146{
156 pgd_t *pgd; 147 pgd_t *pgd;
157 pud_t *pud; 148 pud_t *pud;
158 pmd_t *pmd;
159 pte_t *pte = NULL; 149 pte_t *pte = NULL;
160 150
161 addr &= HPAGE_MASK;
162
163 pgd = pgd_offset(mm, addr); 151 pgd = pgd_offset(mm, addr);
164 if (!pgd_none(*pgd)) { 152 if (!pgd_none(*pgd)) {
165 pud = pud_offset(pgd, addr); 153 pud = pud_offset(pgd, addr);
166 if (!pud_none(*pud)) { 154 if (!pud_none(*pud))
167 pmd = pmd_offset(pud, addr); 155 pte = (pte_t *)pmd_offset(pud, addr);
168 if (!pmd_none(*pmd))
169 pte = pte_offset_map(pmd, addr);
170 }
171 } 156 }
172 return pte; 157 return pte;
173} 158}
@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
175void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 160void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
176 pte_t *ptep, pte_t entry) 161 pte_t *ptep, pte_t entry)
177{ 162{
178 int i; 163 pte_t orig;
179 pte_t orig[2];
180 unsigned long nptes;
181 164
182 if (!pte_present(*ptep) && pte_present(entry)) 165 if (!pte_present(*ptep) && pte_present(entry))
183 mm->context.huge_pte_count++; 166 mm->context.hugetlb_pte_count++;
184 167
185 addr &= HPAGE_MASK; 168 addr &= HPAGE_MASK;
186 169 orig = *ptep;
187 nptes = 1 << HUGETLB_PAGE_ORDER; 170 *ptep = entry;
188 orig[0] = *ptep;
189 orig[1] = *(ptep + nptes / 2);
190 for (i = 0; i < nptes; i++) {
191 *ptep = entry;
192 ptep++;
193 addr += PAGE_SIZE;
194 pte_val(entry) += PAGE_SIZE;
195 }
196 171
197 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ 172 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
198 addr -= REAL_HPAGE_SIZE; 173 maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
199 ptep -= nptes / 2; 174 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
200 maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
201 addr -= REAL_HPAGE_SIZE;
202 ptep -= nptes / 2;
203 maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
204} 175}
205 176
206pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 177pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
207 pte_t *ptep) 178 pte_t *ptep)
208{ 179{
209 pte_t entry; 180 pte_t entry;
210 int i;
211 unsigned long nptes;
212 181
213 entry = *ptep; 182 entry = *ptep;
214 if (pte_present(entry)) 183 if (pte_present(entry))
215 mm->context.huge_pte_count--; 184 mm->context.hugetlb_pte_count--;
216 185
217 addr &= HPAGE_MASK; 186 addr &= HPAGE_MASK;
218 nptes = 1 << HUGETLB_PAGE_ORDER; 187 *ptep = __pte(0UL);
219 for (i = 0; i < nptes; i++) {
220 *ptep = __pte(0UL);
221 addr += PAGE_SIZE;
222 ptep++;
223 }
224 188
225 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ 189 /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
226 addr -= REAL_HPAGE_SIZE;
227 ptep -= nptes / 2;
228 maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
229 addr -= REAL_HPAGE_SIZE;
230 ptep -= nptes / 2;
231 maybe_tlb_batch_add(mm, addr, ptep, entry, 0); 190 maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
191 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
232 192
233 return entry; 193 return entry;
234} 194}
235 195
236int pmd_huge(pmd_t pmd) 196int pmd_huge(pmd_t pmd)
237{ 197{
238 return 0; 198 return !pmd_none(pmd) &&
199 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
239} 200}
240 201
241int pud_huge(pud_t pud) 202int pud_huge(pud_t pud)
242{ 203{
243 return 0; 204 return 0;
244} 205}
206
207static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
208 unsigned long addr)
209{
210 pgtable_t token = pmd_pgtable(*pmd);
211
212 pmd_clear(pmd);
213 pte_free_tlb(tlb, token, addr);
214 atomic_long_dec(&tlb->mm->nr_ptes);
215}
216
217static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
218 unsigned long addr, unsigned long end,
219 unsigned long floor, unsigned long ceiling)
220{
221 pmd_t *pmd;
222 unsigned long next;
223 unsigned long start;
224
225 start = addr;
226 pmd = pmd_offset(pud, addr);
227 do {
228 next = pmd_addr_end(addr, end);
229 if (pmd_none(*pmd))
230 continue;
231 if (is_hugetlb_pmd(*pmd))
232 pmd_clear(pmd);
233 else
234 hugetlb_free_pte_range(tlb, pmd, addr);
235 } while (pmd++, addr = next, addr != end);
236
237 start &= PUD_MASK;
238 if (start < floor)
239 return;
240 if (ceiling) {
241 ceiling &= PUD_MASK;
242 if (!ceiling)
243 return;
244 }
245 if (end - 1 > ceiling - 1)
246 return;
247
248 pmd = pmd_offset(pud, start);
249 pud_clear(pud);
250 pmd_free_tlb(tlb, pmd, start);
251 mm_dec_nr_pmds(tlb->mm);
252}
253
254static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
255 unsigned long addr, unsigned long end,
256 unsigned long floor, unsigned long ceiling)
257{
258 pud_t *pud;
259 unsigned long next;
260 unsigned long start;
261
262 start = addr;
263 pud = pud_offset(pgd, addr);
264 do {
265 next = pud_addr_end(addr, end);
266 if (pud_none_or_clear_bad(pud))
267 continue;
268 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
269 ceiling);
270 } while (pud++, addr = next, addr != end);
271
272 start &= PGDIR_MASK;
273 if (start < floor)
274 return;
275 if (ceiling) {
276 ceiling &= PGDIR_MASK;
277 if (!ceiling)
278 return;
279 }
280 if (end - 1 > ceiling - 1)
281 return;
282
283 pud = pud_offset(pgd, start);
284 pgd_clear(pgd);
285 pud_free_tlb(tlb, pud, start);
286}
287
288void hugetlb_free_pgd_range(struct mmu_gather *tlb,
289 unsigned long addr, unsigned long end,
290 unsigned long floor, unsigned long ceiling)
291{
292 pgd_t *pgd;
293 unsigned long next;
294
295 pgd = pgd_offset(tlb->mm, addr);
296 do {
297 next = pgd_addr_end(addr, end);
298 if (pgd_none_or_clear_bad(pgd))
299 continue;
300 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
301 } while (pgd++, addr = next, addr != end);
302}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index aec508e37490..65457c9f1365 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
346 spin_lock_irqsave(&mm->context.lock, flags); 346 spin_lock_irqsave(&mm->context.lock, flags);
347 347
348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
349 if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) 349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
350 is_hugetlb_pte(pte)) {
351 /* We are fabricating 8MB pages using 4MB real hw pages. */
352 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
350 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, 353 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
351 address, pte_val(pte)); 354 address, pte_val(pte));
352 else 355 } else
353#endif 356#endif
354 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, 357 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
355 address, pte_val(pte)); 358 address, pte_val(pte));
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index f81cd9736700..3659d37b4d81 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
175 175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 if (pmd_val(pmd) & _PAGE_PMD_HUGE) 177 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
178 mm->context.huge_pte_count++; 178 mm->context.thp_pte_count++;
179 else 179 else
180 mm->context.huge_pte_count--; 180 mm->context.thp_pte_count--;
181 181
182 /* Do not try to allocate the TSB hash table if we 182 /* Do not try to allocate the TSB hash table if we
183 * don't have one already. We have various locks held 183 * don't have one already. We have various locks held
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index a0604a493a36..6725ed45580e 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -470,7 +470,7 @@ retry_tsb_alloc:
470int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 470int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
471{ 471{
472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 unsigned long huge_pte_count; 473 unsigned long total_huge_pte_count;
474#endif 474#endif
475 unsigned int i; 475 unsigned int i;
476 476
@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
479 mm->context.sparc64_ctx_val = 0UL; 479 mm->context.sparc64_ctx_val = 0UL;
480 480
481#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 481#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
482 /* We reset it to zero because the fork() page copying 482 /* We reset them to zero because the fork() page copying
483 * will re-increment the counters as the parent PTEs are 483 * will re-increment the counters as the parent PTEs are
484 * copied into the child address space. 484 * copied into the child address space.
485 */ 485 */
486 huge_pte_count = mm->context.huge_pte_count; 486 total_huge_pte_count = mm->context.hugetlb_pte_count +
487 mm->context.huge_pte_count = 0; 487 mm->context.thp_pte_count;
488 mm->context.hugetlb_pte_count = 0;
489 mm->context.thp_pte_count = 0;
488#endif 490#endif
489 491
490 /* copy_mm() copies over the parent's mm_struct before calling 492 /* copy_mm() copies over the parent's mm_struct before calling
@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
500 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 502 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
501 503
502#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 504#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
503 if (unlikely(huge_pte_count)) 505 if (unlikely(total_huge_pte_count))
504 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 506 tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
505#endif 507#endif
506 508
507 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 509 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index ca0d3802f2af..4e603d060e80 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
490 locked = spin_trylock_irqsave(&port->lock, flags); 490 locked = spin_trylock_irqsave(&port->lock, flags);
491 else 491 else
492 spin_lock_irqsave(&port->lock, flags); 492 spin_lock_irqsave(&port->lock, flags);
493 if (port->sysrq) {
494 locked = 0;
495 } else if (oops_in_progress) {
496 locked = spin_trylock(&port->lock);
497 } else
498 spin_lock(&port->lock);
499 493
500 for (i = 0; i < n; i++) { 494 for (i = 0; i < n; i++) {
501 if (*s == '\n') 495 if (*s == '\n')