aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-04-18 08:18:11 -0400
committerChris Metcalf <cmetcalf@tilera.com>2011-05-04 14:41:44 -0400
commitdc0b124d8edc6c2f95fc3a689cd40ec05ad85108 (patch)
tree1a64b5a0e4761c0fc3fb454d0385be52e69f0970 /arch/tile
parentef0aaf873ebadd7576f4fb2085ec4557a9df8bf5 (diff)
tile: replace mm->cpu_vm_mask with mm_cpumask()
We plan to change mm->cpu_vm_mask definition later. Thus, this patch convert it into proper macro. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/mmu_context.h4
-rw-r--r--arch/tile/kernel/tlb.c12
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 9bc0d0725c28..15fb24641120 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -100,8 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
100 __get_cpu_var(current_asid) = asid; 100 __get_cpu_var(current_asid) = asid;
101 101
102 /* Clear cpu from the old mm, and set it in the new one. */ 102 /* Clear cpu from the old mm, and set it in the new one. */
103 cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); 103 cpumask_clear_cpu(cpu, mm_cpumask(prev));
104 cpumask_set_cpu(cpu, &next->cpu_vm_mask); 104 cpumask_set_cpu(cpu, mm_cpumask(next));
105 105
106 /* Re-load page tables */ 106 /* Re-load page tables */
107 install_page_table(next->pgd, asid); 107 install_page_table(next->pgd, asid);
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index 2dffc1044d83..a5f241c24cac 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -34,13 +34,13 @@ void flush_tlb_mm(struct mm_struct *mm)
34{ 34{
35 HV_Remote_ASID asids[NR_CPUS]; 35 HV_Remote_ASID asids[NR_CPUS];
36 int i = 0, cpu; 36 int i = 0, cpu;
37 for_each_cpu(cpu, &mm->cpu_vm_mask) { 37 for_each_cpu(cpu, mm_cpumask(mm)) {
38 HV_Remote_ASID *asid = &asids[i++]; 38 HV_Remote_ASID *asid = &asids[i++];
39 asid->y = cpu / smp_topology.width; 39 asid->y = cpu / smp_topology.width;
40 asid->x = cpu % smp_topology.width; 40 asid->x = cpu % smp_topology.width;
41 asid->asid = per_cpu(current_asid, cpu); 41 asid->asid = per_cpu(current_asid, cpu);
42 } 42 }
43 flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, 43 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
44 0, 0, 0, NULL, asids, i); 44 0, 0, 0, NULL, asids, i);
45} 45}
46 46
@@ -54,8 +54,8 @@ void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
54{ 54{
55 unsigned long size = hv_page_size(vma); 55 unsigned long size = hv_page_size(vma);
56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
57 flush_remote(0, cache, &mm->cpu_vm_mask, 57 flush_remote(0, cache, mm_cpumask(mm),
58 va, size, size, &mm->cpu_vm_mask, NULL, 0); 58 va, size, size, mm_cpumask(mm), NULL, 0);
59} 59}
60 60
61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) 61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
@@ -70,8 +70,8 @@ void flush_tlb_range(const struct vm_area_struct *vma,
70 unsigned long size = hv_page_size(vma); 70 unsigned long size = hv_page_size(vma);
71 struct mm_struct *mm = vma->vm_mm; 71 struct mm_struct *mm = vma->vm_mm;
72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
73 flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, 73 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
74 &mm->cpu_vm_mask, NULL, 0); 74 mm_cpumask(mm), NULL, 0);
75} 75}
76 76
77void flush_tlb_all(void) 77void flush_tlb_all(void)