aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-01-14 16:15:05 -0500
committerHaavard Skinnemoen <haavard.skinnemoen@atmel.com>2008-07-02 05:01:28 -0400
commitcfd23e93a0289cf6711fd3877c5226658d87240a (patch)
tree1030f0ee5ccd59d1d80b2b5fdc892987f90fac8e /arch
parentebe74597a55fef00edc80a414ef5c6477d035e0a (diff)
avr32: Store virtual addresses in the PGD
Instead of storing physical addresses along with page flags in the PGD, store virtual addresses and use NULL to indicate a not present second-level page table. A non-page-aligned page table indicates a bad PMD. This simplifies the TLB miss handler since it no longer has to check the Present bit and no longer has to convert the PGD entry from physical to virtual address. Instead, it has to check for a NULL entry, which is slightly cheaper than either. Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/avr32/kernel/entry-avr32b.S45
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S4
-rw-r--r--arch/avr32/mm/init.c4
3 files changed, 27 insertions, 26 deletions
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 050c006a7f0c..3cdd7071f35e 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -74,12 +74,6 @@ exception_vectors:
74 .align 2 74 .align 2
75 bral do_dtlb_modified 75 bral do_dtlb_modified
76 76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83#define tlbmiss_save pushm r0-r3 77#define tlbmiss_save pushm r0-r3
84#define tlbmiss_restore popm r0-r3 78#define tlbmiss_restore popm r0-r3
85 79
@@ -108,17 +102,17 @@ tlb_miss_common:
108 bld r0, 31 102 bld r0, 31
109 brcs handle_vmalloc_miss 103 brcs handle_vmalloc_miss
110 104
111 /* First level lookup */ 105 /*
106 * First level lookup: The PGD contains virtual pointers to
107 * the second-level page tables, but they may be NULL if not
108 * present.
109 */
112pgtbl_lookup: 110pgtbl_lookup:
113 lsr r2, r0, PGDIR_SHIFT 111 lsr r2, r0, PGDIR_SHIFT
114 ld.w r3, r1[r2 << 2] 112 ld.w r3, r1[r2 << 2]
115 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT 113 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
116 bld r3, _PAGE_BIT_PRESENT 114 cp.w r3, 0
117 brcc page_table_not_present 115 breq page_table_not_present
118
119 /* Translate to virtual address in P1. */
120 andl r3, 0xf000
121 sbr r3, 31
122 116
123 /* Second level lookup */ 117 /* Second level lookup */
124 ld.w r2, r3[r1 << 2] 118 ld.w r2, r3[r1 << 2]
@@ -155,6 +149,19 @@ handle_vmalloc_miss:
155 orh r1, hi(swapper_pg_dir) 149 orh r1, hi(swapper_pg_dir)
156 rjmp pgtbl_lookup 150 rjmp pgtbl_lookup
157 151
152 /* The slow path of the TLB miss handler */
153 .align 2
154page_table_not_present:
155page_not_present:
156 tlbmiss_restore
157 sub sp, 4
158 stmts --sp, r0-lr
159 rcall save_full_context_ex
160 mfsr r12, SYSREG_ECR
161 mov r11, sp
162 rcall do_page_fault
163 rjmp ret_from_exception
164
158 165
159 /* --- System Call --- */ 166 /* --- System Call --- */
160 167
@@ -267,18 +274,6 @@ syscall_exit_work:
267 brcc syscall_exit_cont 274 brcc syscall_exit_cont
268 rjmp enter_monitor_mode 275 rjmp enter_monitor_mode
269 276
270 /* The slow path of the TLB miss handler */
271page_table_not_present:
272page_not_present:
273 tlbmiss_restore
274 sub sp, 4
275 stmts --sp, r0-lr
276 rcall save_full_context_ex
277 mfsr r12, SYSREG_ECR
278 mov r11, sp
279 rcall do_page_fault
280 rjmp ret_from_exception
281
282 /* This function expects to find offending PC in SYSREG_RAR_EX */ 277 /* This function expects to find offending PC in SYSREG_RAR_EX */
283 .type save_full_context_ex, @function 278 .type save_full_context_ex, @function
284 .align 2 279 .align 2
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index 033dd46bfa62..5d25d8eeb750 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -99,6 +99,10 @@ SECTIONS
99 */ 99 */
100 *(.data.init_task) 100 *(.data.init_task)
101 101
102 /* Then, the page-aligned data */
103 . = ALIGN(PAGE_SIZE);
104 *(.data.page_aligned)
105
102 /* Then, the cacheline aligned data */ 106 /* Then, the cacheline aligned data */
103 . = ALIGN(L1_CACHE_BYTES); 107 . = ALIGN(L1_CACHE_BYTES);
104 *(.data.cacheline_aligned) 108 *(.data.cacheline_aligned)
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 0e77578c358d..3f90a87527bb 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -24,9 +24,11 @@
24#include <asm/setup.h> 24#include <asm/setup.h>
25#include <asm/sections.h> 25#include <asm/sections.h>
26 26
27#define __page_aligned __attribute__((section(".data.page_aligned")))
28
27DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 29DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
28 30
29pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned;
30 32
31struct page *empty_zero_page; 33struct page *empty_zero_page;
32EXPORT_SYMBOL(empty_zero_page); 34EXPORT_SYMBOL(empty_zero_page);