diff options
Diffstat (limited to 'arch/sparc64/mm/tsb.c')
-rw-r--r-- | arch/sparc64/mm/tsb.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 3c1ff05038b1..353cb060561b 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c | |||
@@ -20,9 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries | |||
20 | return vaddr & (nentries - 1); | 20 | return vaddr & (nentries - 1); |
21 | } | 21 | } |
22 | 22 | ||
23 | static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context) | 23 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) |
24 | { | 24 | { |
25 | return (tag == ((vaddr >> 22) | (context << 48))); | 25 | return (tag == (vaddr >> 22)); |
26 | } | 26 | } |
27 | 27 | ||
28 | /* TSB flushes need only occur on the processor initiating the address | 28 | /* TSB flushes need only occur on the processor initiating the address |
@@ -38,8 +38,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |||
38 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); | 38 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); |
39 | struct tsb *ent = &swapper_tsb[hash]; | 39 | struct tsb *ent = &swapper_tsb[hash]; |
40 | 40 | ||
41 | if (tag_compare(ent->tag, v, 0)) { | 41 | if (tag_compare(ent->tag, v)) { |
42 | ent->tag = 0UL; | 42 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
43 | membar_storeload_storestore(); | 43 | membar_storeload_storestore(); |
44 | } | 44 | } |
45 | } | 45 | } |
@@ -50,14 +50,9 @@ void flush_tsb_user(struct mmu_gather *mp) | |||
50 | struct mm_struct *mm = mp->mm; | 50 | struct mm_struct *mm = mp->mm; |
51 | struct tsb *tsb = mm->context.tsb; | 51 | struct tsb *tsb = mm->context.tsb; |
52 | unsigned long nentries = mm->context.tsb_nentries; | 52 | unsigned long nentries = mm->context.tsb_nentries; |
53 | unsigned long ctx, base; | 53 | unsigned long base; |
54 | int i; | 54 | int i; |
55 | 55 | ||
56 | if (unlikely(!CTX_VALID(mm->context))) | ||
57 | return; | ||
58 | |||
59 | ctx = CTX_HWBITS(mm->context); | ||
60 | |||
61 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 56 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
62 | base = __pa(tsb); | 57 | base = __pa(tsb); |
63 | else | 58 | else |
@@ -71,7 +66,7 @@ void flush_tsb_user(struct mmu_gather *mp) | |||
71 | 66 | ||
72 | hash = tsb_hash(v, nentries); | 67 | hash = tsb_hash(v, nentries); |
73 | ent = base + (hash * sizeof(struct tsb)); | 68 | ent = base + (hash * sizeof(struct tsb)); |
74 | tag = (v >> 22UL) | (ctx << 48UL); | 69 | tag = (v >> 22UL); |
75 | 70 | ||
76 | tsb_flush(ent, tag); | 71 | tsb_flush(ent, tag); |
77 | } | 72 | } |
@@ -243,7 +238,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, | |||
243 | "i" (ASI_NUCLEUS_QUAD_LDD)); | 238 | "i" (ASI_NUCLEUS_QUAD_LDD)); |
244 | } | 239 | } |
245 | 240 | ||
246 | if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) | 241 | if (tag & ((1UL << TSB_TAG_LOCK_BIT) | |
242 | (1UL << TSB_TAG_INVALID_BIT))) | ||
247 | continue; | 243 | continue; |
248 | 244 | ||
249 | /* We only put base page size PTEs into the TSB, | 245 | /* We only put base page size PTEs into the TSB, |
@@ -315,10 +311,13 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) | |||
315 | break; | 311 | break; |
316 | } | 312 | } |
317 | 313 | ||
318 | page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); | 314 | page = alloc_pages(gfp_flags, get_order(size)); |
319 | if (unlikely(!page)) | 315 | if (unlikely(!page)) |
320 | return; | 316 | return; |
321 | 317 | ||
318 | /* Mark all tags as invalid. */ | ||
319 | memset(page_address(page), 0x40, size); | ||
320 | |||
322 | if (size == max_tsb_size) | 321 | if (size == max_tsb_size) |
323 | mm->context.tsb_rss_limit = ~0UL; | 322 | mm->context.tsb_rss_limit = ~0UL; |
324 | else | 323 | else |