diff options
-rw-r--r-- | arch/cris/arch-v10/mm/tlb.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c index 7d9fec88dee5..6baf5bd209e7 100644 --- a/arch/cris/arch-v10/mm/tlb.c +++ b/arch/cris/arch-v10/mm/tlb.c | |||
@@ -4,8 +4,8 @@ | |||
4 | * Low level TLB handling | 4 | * Low level TLB handling |
5 | * | 5 | * |
6 | * | 6 | * |
7 | * Copyright (C) 2000-2002 Axis Communications AB | 7 | * Copyright (C) 2000-2007 Axis Communications AB |
8 | * | 8 | * |
9 | * Authors: Bjorn Wesen (bjornw@axis.com) | 9 | * Authors: Bjorn Wesen (bjornw@axis.com) |
10 | * | 10 | * |
11 | */ | 11 | */ |
@@ -39,7 +39,7 @@ flush_tlb_all(void) | |||
39 | unsigned long flags; | 39 | unsigned long flags; |
40 | 40 | ||
41 | /* the vpn of i & 0xf is so we dont write similar TLB entries | 41 | /* the vpn of i & 0xf is so we dont write similar TLB entries |
42 | * in the same 4-way entry group. details.. | 42 | * in the same 4-way entry group. details... |
43 | */ | 43 | */ |
44 | 44 | ||
45 | local_irq_save(flags); | 45 | local_irq_save(flags); |
@@ -47,7 +47,7 @@ flush_tlb_all(void) | |||
47 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); | 47 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
48 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | 48 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
49 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); | 49 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
50 | 50 | ||
51 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | 51 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
52 | IO_STATE(R_TLB_LO, valid, no ) | | 52 | IO_STATE(R_TLB_LO, valid, no ) | |
53 | IO_STATE(R_TLB_LO, kernel,no ) | | 53 | IO_STATE(R_TLB_LO, kernel,no ) | |
@@ -71,10 +71,10 @@ flush_tlb_mm(struct mm_struct *mm) | |||
71 | 71 | ||
72 | if(page_id == NO_CONTEXT) | 72 | if(page_id == NO_CONTEXT) |
73 | return; | 73 | return; |
74 | 74 | ||
75 | /* mark the TLB entries that match the page_id as invalid. | 75 | /* mark the TLB entries that match the page_id as invalid. |
76 | * here we could also check the _PAGE_GLOBAL bit and NOT flush | 76 | * here we could also check the _PAGE_GLOBAL bit and NOT flush |
77 | * global pages. is it worth the extra I/O ? | 77 | * global pages. is it worth the extra I/O ? |
78 | */ | 78 | */ |
79 | 79 | ||
80 | local_irq_save(flags); | 80 | local_irq_save(flags); |
@@ -83,7 +83,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
83 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { | 83 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { |
84 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | 84 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
85 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); | 85 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
86 | 86 | ||
87 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | 87 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
88 | IO_STATE(R_TLB_LO, valid, no ) | | 88 | IO_STATE(R_TLB_LO, valid, no ) | |
89 | IO_STATE(R_TLB_LO, kernel,no ) | | 89 | IO_STATE(R_TLB_LO, kernel,no ) | |
@@ -96,9 +96,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
96 | 96 | ||
97 | /* invalidate a single page */ | 97 | /* invalidate a single page */ |
98 | 98 | ||
99 | void | 99 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) |
100 | flush_tlb_page(struct vm_area_struct *vma, | ||
101 | unsigned long addr) | ||
102 | { | 100 | { |
103 | struct mm_struct *mm = vma->vm_mm; | 101 | struct mm_struct *mm = vma->vm_mm; |
104 | int page_id = mm->context.page_id; | 102 | int page_id = mm->context.page_id; |
@@ -113,7 +111,7 @@ flush_tlb_page(struct vm_area_struct *vma, | |||
113 | addr &= PAGE_MASK; /* perhaps not necessary */ | 111 | addr &= PAGE_MASK; /* perhaps not necessary */ |
114 | 112 | ||
115 | /* invalidate those TLB entries that match both the mm context | 113 | /* invalidate those TLB entries that match both the mm context |
116 | * and the virtual address requested | 114 | * and the virtual address requested |
117 | */ | 115 | */ |
118 | 116 | ||
119 | local_irq_save(flags); | 117 | local_irq_save(flags); |
@@ -125,7 +123,7 @@ flush_tlb_page(struct vm_area_struct *vma, | |||
125 | (tlb_hi & PAGE_MASK) == addr) { | 123 | (tlb_hi & PAGE_MASK) == addr) { |
126 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | 124 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
127 | addr; /* same addr as before works. */ | 125 | addr; /* same addr as before works. */ |
128 | 126 | ||
129 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | 127 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
130 | IO_STATE(R_TLB_LO, valid, no ) | | 128 | IO_STATE(R_TLB_LO, valid, no ) | |
131 | IO_STATE(R_TLB_LO, kernel,no ) | | 129 | IO_STATE(R_TLB_LO, kernel,no ) | |
@@ -144,7 +142,7 @@ dump_tlb_all(void) | |||
144 | { | 142 | { |
145 | int i; | 143 | int i; |
146 | unsigned long flags; | 144 | unsigned long flags; |
147 | 145 | ||
148 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); | 146 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); |
149 | 147 | ||
150 | local_save_flags(flags); | 148 | local_save_flags(flags); |
@@ -172,27 +170,29 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
172 | 170 | ||
173 | /* called in schedule() just before actually doing the switch_to */ | 171 | /* called in schedule() just before actually doing the switch_to */ |
174 | 172 | ||
175 | void | 173 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
176 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | 174 | struct task_struct *tsk) |
177 | struct task_struct *tsk) | ||
178 | { | 175 | { |
179 | /* make sure we have a context */ | 176 | if (prev != next) { |
177 | /* make sure we have a context */ | ||
178 | get_mmu_context(next); | ||
180 | 179 | ||
181 | get_mmu_context(next); | 180 | /* remember the pgd for the fault handlers |
181 | * this is similar to the pgd register in some other CPU's. | ||
182 | * we need our own copy of it because current and active_mm | ||
183 | * might be invalid at points where we still need to derefer | ||
184 | * the pgd. | ||
185 | */ | ||
182 | 186 | ||
183 | /* remember the pgd for the fault handlers | 187 | per_cpu(current_pgd, smp_processor_id()) = next->pgd; |
184 | * this is similar to the pgd register in some other CPU's. | ||
185 | * we need our own copy of it because current and active_mm | ||
186 | * might be invalid at points where we still need to derefer | ||
187 | * the pgd. | ||
188 | */ | ||
189 | 188 | ||
190 | per_cpu(current_pgd, smp_processor_id()) = next->pgd; | 189 | /* switch context in the MMU */ |
191 | 190 | ||
192 | /* switch context in the MMU */ | 191 | D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n", |
193 | 192 | next->context, next)); | |
194 | D(printk("switching mmu_context to %d (%p)\n", next->context, next)); | ||
195 | 193 | ||
196 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); | 194 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, |
195 | page_id, next->context.page_id); | ||
196 | } | ||
197 | } | 197 | } |
198 | 198 | ||