aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-15 14:16:43 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-23 22:47:29 -0400
commit56aa4129e87be43676c6e3eac41a6aa553877802 (patch)
tree51fff04b2393a6e2ae6f6ab2b27126428eec651d /arch/powerpc/mm
parentf5ac590e79d693d4239997265405535a2e0c36bd (diff)
cpumask: Use mm_cpumask() wrapper instead of cpu_vm_mask
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c10
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c2
-rw-r--r--arch/powerpc/mm/pgtable.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/mm/tlb_nohash.c18
5 files changed, 18 insertions, 21 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f5bc1b213f24..86c00c885e68 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -859,7 +859,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
859 unsigned long vsid; 859 unsigned long vsid;
860 struct mm_struct *mm; 860 struct mm_struct *mm;
861 pte_t *ptep; 861 pte_t *ptep;
862 cpumask_t tmp; 862 const struct cpumask *tmp;
863 int rc, user_region = 0, local = 0; 863 int rc, user_region = 0, local = 0;
864 int psize, ssize; 864 int psize, ssize;
865 865
@@ -907,8 +907,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
907 return 1; 907 return 1;
908 908
909 /* Check CPU locality */ 909 /* Check CPU locality */
910 tmp = cpumask_of_cpu(smp_processor_id()); 910 tmp = cpumask_of(smp_processor_id());
911 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) 911 if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
912 local = 1; 912 local = 1;
913 913
914#ifdef CONFIG_HUGETLB_PAGE 914#ifdef CONFIG_HUGETLB_PAGE
@@ -1024,7 +1024,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1024 unsigned long vsid; 1024 unsigned long vsid;
1025 void *pgdir; 1025 void *pgdir;
1026 pte_t *ptep; 1026 pte_t *ptep;
1027 cpumask_t mask;
1028 unsigned long flags; 1027 unsigned long flags;
1029 int local = 0; 1028 int local = 0;
1030 int ssize; 1029 int ssize;
@@ -1067,8 +1066,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1067 local_irq_save(flags); 1066 local_irq_save(flags);
1068 1067
1069 /* Is that local to this CPU ? */ 1068 /* Is that local to this CPU ? */
1070 mask = cpumask_of_cpu(smp_processor_id()); 1069 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1071 if (cpus_equal(mm->cpu_vm_mask, mask))
1072 local = 1; 1070 local = 1;
1073 1071
1074 /* Hash it in */ 1072 /* Hash it in */
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 52a0cfc38b64..ac4cb04ceb69 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -97,7 +97,7 @@ static unsigned int steal_context_smp(unsigned int id)
97 mm->context.id = MMU_NO_CONTEXT; 97 mm->context.id = MMU_NO_CONTEXT;
98 98
99 /* Mark it stale on all CPUs that used this mm */ 99 /* Mark it stale on all CPUs that used this mm */
100 for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask) 100 for_each_cpu(cpu, mm_cpumask(mm))
101 __set_bit(id, stale_map[cpu]); 101 __set_bit(id, stale_map[cpu]);
102 return id; 102 return id;
103 } 103 }
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index a27ded3adac5..f5c6fd42265c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -82,11 +82,10 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
82void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 82void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
83{ 83{
84 /* This is safe since tlb_gather_mmu has disabled preemption */ 84 /* This is safe since tlb_gather_mmu has disabled preemption */
85 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
86 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 85 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
87 86
88 if (atomic_read(&tlb->mm->mm_users) < 2 || 87 if (atomic_read(&tlb->mm->mm_users) < 2 ||
89 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 88 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
90 pgtable_free(pgf); 89 pgtable_free(pgf);
91 return; 90 return;
92 } 91 }
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index c931bc7d1079..1be1b5e59796 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -139,12 +139,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
139 */ 139 */
140void __flush_tlb_pending(struct ppc64_tlb_batch *batch) 140void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
141{ 141{
142 cpumask_t tmp; 142 const struct cpumask *tmp;
143 int i, local = 0; 143 int i, local = 0;
144 144
145 i = batch->index; 145 i = batch->index;
146 tmp = cpumask_of_cpu(smp_processor_id()); 146 tmp = cpumask_of(smp_processor_id());
147 if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) 147 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
148 local = 1; 148 local = 1;
149 if (i == 1) 149 if (i == 1)
150 flush_hash_page(batch->vaddr[0], batch->pte[0], 150 flush_hash_page(batch->vaddr[0], batch->pte[0],
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 39ac22b13c73..7af72970faed 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -132,11 +132,11 @@ void flush_tlb_mm(struct mm_struct *mm)
132 pid = mm->context.id; 132 pid = mm->context.id;
133 if (unlikely(pid == MMU_NO_CONTEXT)) 133 if (unlikely(pid == MMU_NO_CONTEXT))
134 goto no_context; 134 goto no_context;
135 cpu_mask = mm->cpu_vm_mask; 135 if (!cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
136 cpu_clear(smp_processor_id(), cpu_mask);
137 if (!cpus_empty(cpu_mask)) {
138 struct tlb_flush_param p = { .pid = pid }; 136 struct tlb_flush_param p = { .pid = pid };
139 smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1); 137 /* Ignores smp_processor_id() even if set. */
138 smp_call_function_many(mm_cpumask(mm),
139 do_flush_tlb_mm_ipi, &p, 1);
140 } 140 }
141 _tlbil_pid(pid); 141 _tlbil_pid(pid);
142 no_context: 142 no_context:
@@ -146,16 +146,15 @@ EXPORT_SYMBOL(flush_tlb_mm);
146 146
147void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 147void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
148{ 148{
149 cpumask_t cpu_mask; 149 struct cpumask *cpu_mask;
150 unsigned int pid; 150 unsigned int pid;
151 151
152 preempt_disable(); 152 preempt_disable();
153 pid = vma ? vma->vm_mm->context.id : 0; 153 pid = vma ? vma->vm_mm->context.id : 0;
154 if (unlikely(pid == MMU_NO_CONTEXT)) 154 if (unlikely(pid == MMU_NO_CONTEXT))
155 goto bail; 155 goto bail;
156 cpu_mask = vma->vm_mm->cpu_vm_mask; 156 cpu_mask = mm_cpumask(vma->vm_mm);
157 cpu_clear(smp_processor_id(), cpu_mask); 157 if (!cpumask_equal(cpu_mask, cpumask_of(smp_processor_id()))) {
158 if (!cpus_empty(cpu_mask)) {
159 /* If broadcast tlbivax is supported, use it */ 158 /* If broadcast tlbivax is supported, use it */
160 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { 159 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
161 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); 160 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
@@ -167,7 +166,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
167 goto bail; 166 goto bail;
168 } else { 167 } else {
169 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; 168 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
170 smp_call_function_mask(cpu_mask, 169 /* Ignores smp_processor_id() even if set in cpu_mask */
170 smp_call_function_many(cpu_mask,
171 do_flush_tlb_page_ipi, &p, 1); 171 do_flush_tlb_page_ipi, &p, 1);
172 } 172 }
173 } 173 }