diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-12-24 19:51:47 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-02-12 20:54:45 -0500 |
commit | aec5e0e1c179fac4bbca4007a3f0d3107275a73c (patch) | |
tree | 3b251e52a89445a5546f398fb16a002435b6c2b6 /arch/sh/mm | |
parent | 506b85f4114b912d2e91fab8da9849289e43857f (diff) |
sh: Use a per-cpu ASID cache.
Previously this was implemented using a global cache, cache
this per-CPU instead and bump up the number of context IDs to
match NR_CPUS.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/init.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 26 |
2 files changed, 16 insertions, 15 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index bf0c263cb6fd..d172065182fb 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -39,11 +39,6 @@ | |||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
41 | 41 | ||
42 | /* | ||
43 | * Cache of MMU context last used. | ||
44 | */ | ||
45 | unsigned long mmu_context_cache = NO_CONTEXT; | ||
46 | |||
47 | #ifdef CONFIG_MMU | 42 | #ifdef CONFIG_MMU |
48 | /* It'd be good if these lines were in the standard header file. */ | 43 | /* It'd be good if these lines were in the standard header file. */ |
49 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | 44 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) |
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index ef3e4d477864..b829c17c1d17 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c | |||
@@ -16,12 +16,14 @@ | |||
16 | 16 | ||
17 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 17 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
18 | { | 18 | { |
19 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { | 19 | unsigned int cpu = smp_processor_id(); |
20 | |||
21 | if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { | ||
20 | unsigned long flags; | 22 | unsigned long flags; |
21 | unsigned long asid; | 23 | unsigned long asid; |
22 | unsigned long saved_asid = MMU_NO_ASID; | 24 | unsigned long saved_asid = MMU_NO_ASID; |
23 | 25 | ||
24 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; | 26 | asid = cpu_asid(cpu, vma->vm_mm); |
25 | page &= PAGE_MASK; | 27 | page &= PAGE_MASK; |
26 | 28 | ||
27 | local_irq_save(flags); | 29 | local_irq_save(flags); |
@@ -40,22 +42,23 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
40 | unsigned long end) | 42 | unsigned long end) |
41 | { | 43 | { |
42 | struct mm_struct *mm = vma->vm_mm; | 44 | struct mm_struct *mm = vma->vm_mm; |
45 | unsigned int cpu = smp_processor_id(); | ||
43 | 46 | ||
44 | if (mm->context.id != NO_CONTEXT) { | 47 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
45 | unsigned long flags; | 48 | unsigned long flags; |
46 | int size; | 49 | int size; |
47 | 50 | ||
48 | local_irq_save(flags); | 51 | local_irq_save(flags); |
49 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 52 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
50 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 53 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
51 | mm->context.id = NO_CONTEXT; | 54 | cpu_context(cpu, mm) = NO_CONTEXT; |
52 | if (mm == current->mm) | 55 | if (mm == current->mm) |
53 | activate_context(mm); | 56 | activate_context(mm, cpu); |
54 | } else { | 57 | } else { |
55 | unsigned long asid; | 58 | unsigned long asid; |
56 | unsigned long saved_asid = MMU_NO_ASID; | 59 | unsigned long saved_asid = MMU_NO_ASID; |
57 | 60 | ||
58 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | 61 | asid = cpu_asid(cpu, mm); |
59 | start &= PAGE_MASK; | 62 | start &= PAGE_MASK; |
60 | end += (PAGE_SIZE - 1); | 63 | end += (PAGE_SIZE - 1); |
61 | end &= PAGE_MASK; | 64 | end &= PAGE_MASK; |
@@ -76,6 +79,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
76 | 79 | ||
77 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 80 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
78 | { | 81 | { |
82 | unsigned int cpu = smp_processor_id(); | ||
79 | unsigned long flags; | 83 | unsigned long flags; |
80 | int size; | 84 | int size; |
81 | 85 | ||
@@ -87,7 +91,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
87 | unsigned long asid; | 91 | unsigned long asid; |
88 | unsigned long saved_asid = get_asid(); | 92 | unsigned long saved_asid = get_asid(); |
89 | 93 | ||
90 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | 94 | asid = cpu_asid(cpu, &init_mm); |
91 | start &= PAGE_MASK; | 95 | start &= PAGE_MASK; |
92 | end += (PAGE_SIZE - 1); | 96 | end += (PAGE_SIZE - 1); |
93 | end &= PAGE_MASK; | 97 | end &= PAGE_MASK; |
@@ -103,15 +107,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
103 | 107 | ||
104 | void flush_tlb_mm(struct mm_struct *mm) | 108 | void flush_tlb_mm(struct mm_struct *mm) |
105 | { | 109 | { |
110 | unsigned int cpu = smp_processor_id(); | ||
111 | |||
106 | /* Invalidate all TLB of this process. */ | 112 | /* Invalidate all TLB of this process. */ |
107 | /* Instead of invalidating each TLB, we get new MMU context. */ | 113 | /* Instead of invalidating each TLB, we get new MMU context. */ |
108 | if (mm->context.id != NO_CONTEXT) { | 114 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
109 | unsigned long flags; | 115 | unsigned long flags; |
110 | 116 | ||
111 | local_irq_save(flags); | 117 | local_irq_save(flags); |
112 | mm->context.id = NO_CONTEXT; | 118 | cpu_context(cpu, mm) = NO_CONTEXT; |
113 | if (mm == current->mm) | 119 | if (mm == current->mm) |
114 | activate_context(mm); | 120 | activate_context(mm, cpu); |
115 | local_irq_restore(flags); | 121 | local_irq_restore(flags); |
116 | } | 122 | } |
117 | } | 123 | } |