aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/pgalloc.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/tlb.h10
-rw-r--r--arch/powerpc/kernel/process.c23
-rw-r--r--arch/powerpc/mm/pgtable.c14
-rw-r--r--arch/powerpc/mm/tlb_hash32.c2
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
8 files changed, 46 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index abe8532bd14e..df1b4cbb2e70 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -32,13 +32,13 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
32 32
33#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
34extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); 34extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
35extern void pte_free_finish(void); 35extern void pte_free_finish(struct mmu_gather *tlb);
36#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
38{ 38{
39 pgtable_free(table, shift); 39 pgtable_free(table, shift);
40} 40}
41static inline void pte_free_finish(void) { } 41static inline void pte_free_finish(struct mmu_gather *tlb) { }
42#endif /* !CONFIG_SMP */ 42#endif /* !CONFIG_SMP */
43 43
44static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, 44static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index d8529ef13b23..37c353e8af7c 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -139,10 +139,12 @@ static inline struct thread_info *current_thread_info(void)
139#define TLF_NAPPING 0 /* idle thread enabled NAP mode */ 139#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ 140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ 141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
142#define TLF_LAZY_MMU 3 /* tlb_batch is active */
142 143
143#define _TLF_NAPPING (1 << TLF_NAPPING) 144#define _TLF_NAPPING (1 << TLF_NAPPING)
144#define _TLF_SLEEPING (1 << TLF_SLEEPING) 145#define _TLF_SLEEPING (1 << TLF_SLEEPING)
145#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) 146#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
147#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
146 148
147#ifndef __ASSEMBLY__ 149#ifndef __ASSEMBLY__
148#define HAVE_SET_RESTORE_SIGMASK 1 150#define HAVE_SET_RESTORE_SIGMASK 1
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e2b428b0f7ba..8f0ed7adcd12 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -28,6 +28,16 @@
28#define tlb_start_vma(tlb, vma) do { } while (0) 28#define tlb_start_vma(tlb, vma) do { } while (0)
29#define tlb_end_vma(tlb, vma) do { } while (0) 29#define tlb_end_vma(tlb, vma) do { } while (0)
30 30
31#define HAVE_ARCH_MMU_GATHER 1
32
33struct pte_freelist_batch;
34
35struct arch_mmu_gather {
36 struct pte_freelist_batch *batch;
37};
38
39#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, }
40
31extern void tlb_flush(struct mmu_gather *tlb); 41extern void tlb_flush(struct mmu_gather *tlb);
32 42
33/* Get the generic bits... */ 43/* Get the generic bits... */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 095043d79946..91e52df3d81d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -395,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
395 struct thread_struct *new_thread, *old_thread; 395 struct thread_struct *new_thread, *old_thread;
396 unsigned long flags; 396 unsigned long flags;
397 struct task_struct *last; 397 struct task_struct *last;
398#ifdef CONFIG_PPC_BOOK3S_64
399 struct ppc64_tlb_batch *batch;
400#endif
398 401
399#ifdef CONFIG_SMP 402#ifdef CONFIG_SMP
400 /* avoid complexity of lazy save/restore of fpu 403 /* avoid complexity of lazy save/restore of fpu
@@ -513,7 +516,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
513 old_thread->accum_tb += (current_tb - start_tb); 516 old_thread->accum_tb += (current_tb - start_tb);
514 new_thread->start_tb = current_tb; 517 new_thread->start_tb = current_tb;
515 } 518 }
516#endif 519#endif /* CONFIG_PPC64 */
520
521#ifdef CONFIG_PPC_BOOK3S_64
522 batch = &__get_cpu_var(ppc64_tlb_batch);
523 if (batch->active) {
524 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
525 if (batch->index)
526 __flush_tlb_pending(batch);
527 batch->active = 0;
528 }
529#endif /* CONFIG_PPC_BOOK3S_64 */
517 530
518 local_irq_save(flags); 531 local_irq_save(flags);
519 532
@@ -528,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
528 hard_irq_disable(); 541 hard_irq_disable();
529 last = _switch(old_thread, new_thread); 542 last = _switch(old_thread, new_thread);
530 543
544#ifdef CONFIG_PPC_BOOK3S_64
545 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
546 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
547 batch = &__get_cpu_var(ppc64_tlb_batch);
548 batch->active = 1;
549 }
550#endif /* CONFIG_PPC_BOOK3S_64 */
551
531 local_irq_restore(flags); 552 local_irq_restore(flags);
532 553
533 return last; 554 return last;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6a3997f98dfb..6e72788598f8 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -33,8 +33,6 @@
33 33
34#include "mmu_decl.h" 34#include "mmu_decl.h"
35 35
36DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37
38#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
39 37
40/* 38/*
@@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
43 * freeing a page table page that is being walked without locks 41 * freeing a page table page that is being walked without locks
44 */ 42 */
45 43
46static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
47static unsigned long pte_freelist_forced_free; 44static unsigned long pte_freelist_forced_free;
48 45
49struct pte_freelist_batch 46struct pte_freelist_batch
@@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
97 94
98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 95void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
99{ 96{
100 /* This is safe since tlb_gather_mmu has disabled preemption */ 97 struct pte_freelist_batch **batchp = &tlb->arch.batch;
101 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 unsigned long pgf; 98 unsigned long pgf;
103 99
104 if (atomic_read(&tlb->mm->mm_users) < 2 || 100 if (atomic_read(&tlb->mm->mm_users) < 2) {
105 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
106 pgtable_free(table, shift); 101 pgtable_free(table, shift);
107 return; 102 return;
108 } 103 }
@@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
124 } 119 }
125} 120}
126 121
127void pte_free_finish(void) 122void pte_free_finish(struct mmu_gather *tlb)
128{ 123{
129 /* This is safe since tlb_gather_mmu has disabled preemption */ 124 struct pte_freelist_batch **batchp = &tlb->arch.batch;
130 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
131 125
132 if (*batchp == NULL) 126 if (*batchp == NULL)
133 return; 127 return;
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 690566b66e8e..d555cdb06bc8 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -73,7 +73,7 @@ void tlb_flush(struct mmu_gather *tlb)
73 } 73 }
74 74
75 /* Push out batch of freed page tables */ 75 /* Push out batch of freed page tables */
76 pte_free_finish(); 76 pte_free_finish(tlb);
77} 77}
78 78
79/* 79/*
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index c14d09f614f3..5c94ca34cd79 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
155 155
156void tlb_flush(struct mmu_gather *tlb) 156void tlb_flush(struct mmu_gather *tlb)
157{ 157{
158 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); 158 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
159 159
160 /* If there's a TLB batch pending, then we must flush it because the 160 /* If there's a TLB batch pending, then we must flush it because the
161 * pages are going to be freed and we really don't want to have a CPU 161 * pages are going to be freed and we really don't want to have a CPU
@@ -164,8 +164,10 @@ void tlb_flush(struct mmu_gather *tlb)
164 if (tlbbatch->index) 164 if (tlbbatch->index)
165 __flush_tlb_pending(tlbbatch); 165 __flush_tlb_pending(tlbbatch);
166 166
167 put_cpu_var(ppc64_tlb_batch);
168
167 /* Push out batch of freed page tables */ 169 /* Push out batch of freed page tables */
168 pte_free_finish(); 170 pte_free_finish(tlb);
169} 171}
170 172
171/** 173/**
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2a030d89bbc6..8eaf67d32043 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -301,7 +301,7 @@ void tlb_flush(struct mmu_gather *tlb)
301 flush_tlb_mm(tlb->mm); 301 flush_tlb_mm(tlb->mm);
302 302
303 /* Push out batch of freed page tables */ 303 /* Push out batch of freed page tables */
304 pte_free_finish(); 304 pte_free_finish(tlb);
305} 305}
306 306
307/* 307/*