diff options
Diffstat (limited to 'arch/sparc/mm/tsb.c')
-rw-r--r-- | arch/sparc/mm/tsb.c | 40 |
1 files changed, 16 insertions, 24 deletions
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index c52add79b83d..7f6474347491 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -78,7 +78,7 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
78 | base = __pa(base); | 78 | base = __pa(base); |
79 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); | 79 | __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); |
80 | 80 | ||
81 | #ifdef CONFIG_HUGETLB_PAGE | 81 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | 82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { |
83 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | 83 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; |
84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | 84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; |
@@ -90,29 +90,12 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
90 | spin_unlock_irqrestore(&mm->context.lock, flags); | 90 | spin_unlock_irqrestore(&mm->context.lock, flags); |
91 | } | 91 | } |
92 | 92 | ||
93 | #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) | ||
94 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | 93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
95 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | 94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
96 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) | ||
97 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K | ||
98 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K | ||
99 | #else | ||
100 | #error Broken base page size setting... | ||
101 | #endif | ||
102 | 95 | ||
103 | #ifdef CONFIG_HUGETLB_PAGE | 96 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
104 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
105 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K | ||
106 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K | ||
107 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
108 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K | ||
109 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K | ||
110 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
111 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB | 97 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB |
112 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB | 98 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB |
113 | #else | ||
114 | #error Broken huge page size setting... | ||
115 | #endif | ||
116 | #endif | 99 | #endif |
117 | 100 | ||
118 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) | 101 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) |
@@ -207,7 +190,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign | |||
207 | case MM_TSB_BASE: | 190 | case MM_TSB_BASE: |
208 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; | 191 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; |
209 | break; | 192 | break; |
210 | #ifdef CONFIG_HUGETLB_PAGE | 193 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
211 | case MM_TSB_HUGE: | 194 | case MM_TSB_HUGE: |
212 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; | 195 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; |
213 | break; | 196 | break; |
@@ -222,7 +205,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign | |||
222 | case MM_TSB_BASE: | 205 | case MM_TSB_BASE: |
223 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; | 206 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; |
224 | break; | 207 | break; |
225 | #ifdef CONFIG_HUGETLB_PAGE | 208 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
226 | case MM_TSB_HUGE: | 209 | case MM_TSB_HUGE: |
227 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; | 210 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; |
228 | break; | 211 | break; |
@@ -444,7 +427,7 @@ retry_tsb_alloc: | |||
444 | 427 | ||
445 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 428 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
446 | { | 429 | { |
447 | #ifdef CONFIG_HUGETLB_PAGE | 430 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
448 | unsigned long huge_pte_count; | 431 | unsigned long huge_pte_count; |
449 | #endif | 432 | #endif |
450 | unsigned int i; | 433 | unsigned int i; |
@@ -453,7 +436,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
453 | 436 | ||
454 | mm->context.sparc64_ctx_val = 0UL; | 437 | mm->context.sparc64_ctx_val = 0UL; |
455 | 438 | ||
456 | #ifdef CONFIG_HUGETLB_PAGE | 439 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
457 | /* We reset it to zero because the fork() page copying | 440 | /* We reset it to zero because the fork() page copying |
458 | * will re-increment the counters as the parent PTEs are | 441 | * will re-increment the counters as the parent PTEs are |
459 | * copied into the child address space. | 442 | * copied into the child address space. |
@@ -462,6 +445,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
462 | mm->context.huge_pte_count = 0; | 445 | mm->context.huge_pte_count = 0; |
463 | #endif | 446 | #endif |
464 | 447 | ||
448 | mm->context.pgtable_page = NULL; | ||
449 | |||
465 | /* copy_mm() copies over the parent's mm_struct before calling | 450 | /* copy_mm() copies over the parent's mm_struct before calling |
466 | * us, so we need to zero out the TSB pointer or else tsb_grow() | 451 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
467 | * will be confused and think there is an older TSB to free up. | 452 | * will be confused and think there is an older TSB to free up. |
@@ -474,7 +459,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
474 | */ | 459 | */ |
475 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); | 460 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); |
476 | 461 | ||
477 | #ifdef CONFIG_HUGETLB_PAGE | 462 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
478 | if (unlikely(huge_pte_count)) | 463 | if (unlikely(huge_pte_count)) |
479 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); | 464 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); |
480 | #endif | 465 | #endif |
@@ -500,10 +485,17 @@ static void tsb_destroy_one(struct tsb_config *tp) | |||
500 | void destroy_context(struct mm_struct *mm) | 485 | void destroy_context(struct mm_struct *mm) |
501 | { | 486 | { |
502 | unsigned long flags, i; | 487 | unsigned long flags, i; |
488 | struct page *page; | ||
503 | 489 | ||
504 | for (i = 0; i < MM_NUM_TSBS; i++) | 490 | for (i = 0; i < MM_NUM_TSBS; i++) |
505 | tsb_destroy_one(&mm->context.tsb_block[i]); | 491 | tsb_destroy_one(&mm->context.tsb_block[i]); |
506 | 492 | ||
493 | page = mm->context.pgtable_page; | ||
494 | if (page && put_page_testzero(page)) { | ||
495 | pgtable_page_dtor(page); | ||
496 | free_hot_cold_page(page, 0); | ||
497 | } | ||
498 | |||
507 | spin_lock_irqsave(&ctx_alloc_lock, flags); | 499 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
508 | 500 | ||
509 | if (CTX_VALID(mm->context)) { | 501 | if (CTX_VALID(mm->context)) { |