aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris')
-rw-r--r--arch/cris/mm/fault.c95
-rw-r--r--arch/cris/mm/ioremap.c58
-rw-r--r--arch/cris/mm/tlb.c25
3 files changed, 131 insertions, 47 deletions
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 03254b9eded1..fe1cc36b5aca 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -6,6 +6,38 @@
6 * Authors: Bjorn Wesen 6 * Authors: Bjorn Wesen
7 * 7 *
8 * $Log: fault.c,v $ 8 * $Log: fault.c,v $
9 * Revision 1.20 2005/03/04 08:16:18 starvik
10 * Merge of Linux 2.6.11.
11 *
12 * Revision 1.19 2005/01/14 10:07:59 starvik
13 * Fixed warning.
14 *
15 * Revision 1.18 2005/01/12 08:10:14 starvik
16 * Readded the change of frametype when handling kernel page fault fixup
17 * for v10. This is necessary to avoid that the CPU remakes the faulting
18 * access.
19 *
20 * Revision 1.17 2005/01/11 13:53:05 starvik
21 * Use raw_printk.
22 *
23 * Revision 1.16 2004/12/17 11:39:41 starvik
24 * SMP support.
25 *
26 * Revision 1.15 2004/11/23 18:36:18 starvik
27 * Stack is now non-executable.
28 * Signal handler trampolines are placed in a reserved page mapped into all
29 * processes.
30 *
31 * Revision 1.14 2004/11/23 07:10:21 starvik
32 * Moved find_fixup_code to generic code.
33 *
34 * Revision 1.13 2004/11/23 07:00:54 starvik
35 * Actually use the execute permission bit in the MMU. This makes it possible
36 * to prevent e.g. attacks where executable code is put on the stack.
37 *
38 * Revision 1.12 2004/09/29 06:16:04 starvik
39 * Use instruction_pointer
40 *
9 * Revision 1.11 2004/05/14 07:58:05 starvik 41 * Revision 1.11 2004/05/14 07:58:05 starvik
10 * Merge of changes from 2.4 42 * Merge of changes from 2.4
11 * 43 *
@@ -103,6 +135,7 @@
103 135
104extern int find_fixup_code(struct pt_regs *); 136extern int find_fixup_code(struct pt_regs *);
105extern void die_if_kernel(const char *, struct pt_regs *, long); 137extern void die_if_kernel(const char *, struct pt_regs *, long);
138extern int raw_printk(const char *fmt, ...);
106 139
107/* debug of low-level TLB reload */ 140/* debug of low-level TLB reload */
108#undef DEBUG 141#undef DEBUG
@@ -118,7 +151,8 @@ extern void die_if_kernel(const char *, struct pt_regs *, long);
118 151
119/* current active page directory */ 152/* current active page directory */
120 153
121volatile pgd_t *current_pgd; 154volatile DEFINE_PER_CPU(pgd_t *,current_pgd);
155unsigned long cris_signal_return_page;
122 156
123/* 157/*
124 * This routine handles page faults. It determines the address, 158 * This routine handles page faults. It determines the address,
@@ -146,8 +180,9 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
146 struct vm_area_struct * vma; 180 struct vm_area_struct * vma;
147 siginfo_t info; 181 siginfo_t info;
148 182
149 D(printk("Page fault for %X at %X, prot %d write %d\n", 183 D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n",
150 address, regs->erp, protection, writeaccess)); 184 address, smp_processor_id(), instruction_pointer(regs),
185 protection, writeaccess));
151 186
152 tsk = current; 187 tsk = current;
153 188
@@ -175,8 +210,19 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
175 !user_mode(regs)) 210 !user_mode(regs))
176 goto vmalloc_fault; 211 goto vmalloc_fault;
177 212
213 /* When stack execution is not allowed we store the signal
214 * trampolines in the reserved cris_signal_return_page.
215 * Handle this in the exact same way as vmalloc (we know
216 * that the mapping is there and is valid so no need to
217 * call handle_mm_fault).
218 */
219 if (cris_signal_return_page &&
220 address == cris_signal_return_page &&
221 !protection && user_mode(regs))
222 goto vmalloc_fault;
223
178 /* we can and should enable interrupts at this point */ 224 /* we can and should enable interrupts at this point */
179 sti(); 225 local_irq_enable();
180 226
181 mm = tsk->mm; 227 mm = tsk->mm;
182 info.si_code = SEGV_MAPERR; 228 info.si_code = SEGV_MAPERR;
@@ -220,7 +266,10 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
220 266
221 /* first do some preliminary protection checks */ 267 /* first do some preliminary protection checks */
222 268
223 if (writeaccess) { 269 if (writeaccess == 2){
270 if (!(vma->vm_flags & VM_EXEC))
271 goto bad_area;
272 } else if (writeaccess == 1) {
224 if (!(vma->vm_flags & VM_WRITE)) 273 if (!(vma->vm_flags & VM_WRITE))
225 goto bad_area; 274 goto bad_area;
226 } else { 275 } else {
@@ -234,7 +283,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
234 * the fault. 283 * the fault.
235 */ 284 */
236 285
237 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 286 switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) {
238 case 1: 287 case 1:
239 tsk->min_flt++; 288 tsk->min_flt++;
240 break; 289 break;
@@ -292,10 +341,10 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
292 */ 341 */
293 342
294 if ((unsigned long) (address) < PAGE_SIZE) 343 if ((unsigned long) (address) < PAGE_SIZE)
295 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 344 raw_printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
296 else 345 else
297 printk(KERN_ALERT "Unable to handle kernel access"); 346 raw_printk(KERN_ALERT "Unable to handle kernel access");
298 printk(" at virtual address %08lx\n",address); 347 raw_printk(" at virtual address %08lx\n",address);
299 348
300 die_if_kernel("Oops", regs, (writeaccess << 1) | protection); 349 die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
301 350
@@ -346,10 +395,11 @@ vmalloc_fault:
346 395
347 int offset = pgd_index(address); 396 int offset = pgd_index(address);
348 pgd_t *pgd, *pgd_k; 397 pgd_t *pgd, *pgd_k;
398 pud_t *pud, *pud_k;
349 pmd_t *pmd, *pmd_k; 399 pmd_t *pmd, *pmd_k;
350 pte_t *pte_k; 400 pte_t *pte_k;
351 401
352 pgd = (pgd_t *)current_pgd + offset; 402 pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
353 pgd_k = init_mm.pgd + offset; 403 pgd_k = init_mm.pgd + offset;
354 404
355 /* Since we're two-level, we don't need to do both 405 /* Since we're two-level, we don't need to do both
@@ -364,8 +414,13 @@ vmalloc_fault:
364 * it exists. 414 * it exists.
365 */ 415 */
366 416
367 pmd = pmd_offset(pgd, address); 417 pud = pud_offset(pgd, address);
368 pmd_k = pmd_offset(pgd_k, address); 418 pud_k = pud_offset(pgd_k, address);
419 if (!pud_present(*pud_k))
420 goto no_context;
421
422 pmd = pmd_offset(pud, address);
423 pmd_k = pmd_offset(pud_k, address);
369 424
370 if (!pmd_present(*pmd_k)) 425 if (!pmd_present(*pmd_k))
371 goto bad_area_nosemaphore; 426 goto bad_area_nosemaphore;
@@ -385,3 +440,19 @@ vmalloc_fault:
385 return; 440 return;
386 } 441 }
387} 442}
443
444/* Find fixup code. */
445int
446find_fixup_code(struct pt_regs *regs)
447{
448 const struct exception_table_entry *fixup;
449
450 if ((fixup = search_exception_tables(instruction_pointer(regs))) != 0) {
451 /* Adjust the instruction pointer in the stackframe. */
452 instruction_pointer(regs) = fixup->fixup;
453 arch_fixup(regs);
454 return 1;
455 }
456
457 return 0;
458}
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index 6b9130bfb6c1..ebba11e270fa 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -14,9 +14,10 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17#include <asm/arch/memmap.h>
17 18
18extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 19extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
19 unsigned long phys_addr, unsigned long flags) 20 unsigned long phys_addr, pgprot_t prot)
20{ 21{
21 unsigned long end; 22 unsigned long end;
22 23
@@ -31,9 +32,7 @@ extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
31 printk("remap_area_pte: page already exists\n"); 32 printk("remap_area_pte: page already exists\n");
32 BUG(); 33 BUG();
33 } 34 }
34 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE | 35 set_pte(pte, mk_pte_phys(phys_addr, prot));
35 __WRITEABLE | _PAGE_GLOBAL |
36 _PAGE_KERNEL | flags)));
37 address += PAGE_SIZE; 36 address += PAGE_SIZE;
38 phys_addr += PAGE_SIZE; 37 phys_addr += PAGE_SIZE;
39 pte++; 38 pte++;
@@ -41,7 +40,7 @@ extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
41} 40}
42 41
43static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 42static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
44 unsigned long phys_addr, unsigned long flags) 43 unsigned long phys_addr, pgprot_t prot)
45{ 44{
46 unsigned long end; 45 unsigned long end;
47 46
@@ -56,7 +55,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
56 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); 55 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
57 if (!pte) 56 if (!pte)
58 return -ENOMEM; 57 return -ENOMEM;
59 remap_area_pte(pte, address, end - address, address + phys_addr, flags); 58 remap_area_pte(pte, address, end - address, address + phys_addr, prot);
60 address = (address + PMD_SIZE) & PMD_MASK; 59 address = (address + PMD_SIZE) & PMD_MASK;
61 pmd++; 60 pmd++;
62 } while (address && (address < end)); 61 } while (address && (address < end));
@@ -64,7 +63,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
64} 63}
65 64
66static int remap_area_pages(unsigned long address, unsigned long phys_addr, 65static int remap_area_pages(unsigned long address, unsigned long phys_addr,
67 unsigned long size, unsigned long flags) 66 unsigned long size, pgprot_t prot)
68{ 67{
69 int error; 68 int error;
70 pgd_t * dir; 69 pgd_t * dir;
@@ -77,13 +76,19 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
77 BUG(); 76 BUG();
78 spin_lock(&init_mm.page_table_lock); 77 spin_lock(&init_mm.page_table_lock);
79 do { 78 do {
79 pud_t *pud;
80 pmd_t *pmd; 80 pmd_t *pmd;
81 pmd = pmd_alloc(&init_mm, dir, address); 81
82 error = -ENOMEM; 82 error = -ENOMEM;
83 pud = pud_alloc(&init_mm, dir, address);
84 if (!pud)
85 break;
86 pmd = pmd_alloc(&init_mm, pud, address);
87
83 if (!pmd) 88 if (!pmd)
84 break; 89 break;
85 if (remap_area_pmd(pmd, address, end - address, 90 if (remap_area_pmd(pmd, address, end - address,
86 phys_addr + address, flags)) 91 phys_addr + address, prot))
87 break; 92 break;
88 error = 0; 93 error = 0;
89 address = (address + PGDIR_SIZE) & PGDIR_MASK; 94 address = (address + PGDIR_SIZE) & PGDIR_MASK;
@@ -107,9 +112,9 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
107 * have to convert them into an offset in a page-aligned mapping, but the 112 * have to convert them into an offset in a page-aligned mapping, but the
108 * caller shouldn't need to know that small detail. 113 * caller shouldn't need to know that small detail.
109 */ 114 */
110void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 115void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
111{ 116{
112 void * addr; 117 void __iomem * addr;
113 struct vm_struct * area; 118 struct vm_struct * area;
114 unsigned long offset, last_addr; 119 unsigned long offset, last_addr;
115 120
@@ -131,15 +136,36 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
131 area = get_vm_area(size, VM_IOREMAP); 136 area = get_vm_area(size, VM_IOREMAP);
132 if (!area) 137 if (!area)
133 return NULL; 138 return NULL;
134 addr = area->addr; 139 addr = (void __iomem *)area->addr;
135 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 140 if (remap_area_pages((unsigned long) addr, phys_addr, size, prot)) {
136 vfree(addr); 141 vfree((void __force *)addr);
137 return NULL; 142 return NULL;
138 } 143 }
139 return (void *) (offset + (char *)addr); 144 return (void __iomem *) (offset + (char __iomem *)addr);
145}
146
147void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
148{
149 return __ioremap_prot(phys_addr, size,
150 __pgprot(_PAGE_PRESENT | __READABLE |
151 __WRITEABLE | _PAGE_GLOBAL |
152 _PAGE_KERNEL | flags));
153}
154
155/**
156 * ioremap_nocache - map bus memory into CPU space
157 * @offset: bus address of the memory
158 * @size: size of the resource to map
159 *
160 * Must be freed with iounmap.
161 */
162
163void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
164{
165 return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
140} 166}
141 167
142void iounmap(void *addr) 168void iounmap(volatile void __iomem *addr)
143{ 169{
144 if (addr > high_memory) 170 if (addr > high_memory)
145 return vfree((void *) (PAGE_MASK & (unsigned long) addr)); 171 return vfree((void *) (PAGE_MASK & (unsigned long) addr));
diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c
index 23eca5ad7389..0df390a656cd 100644
--- a/arch/cris/mm/tlb.c
+++ b/arch/cris/mm/tlb.c
@@ -29,18 +29,6 @@
29struct mm_struct *page_id_map[NUM_PAGEID]; 29struct mm_struct *page_id_map[NUM_PAGEID];
30static int map_replace_ptr = 1; /* which page_id_map entry to replace next */ 30static int map_replace_ptr = 1; /* which page_id_map entry to replace next */
31 31
32/*
33 * Initialize the context related info for a new mm_struct
34 * instance.
35 */
36
37int
38init_new_context(struct task_struct *tsk, struct mm_struct *mm)
39{
40 mm->context = NO_CONTEXT;
41 return 0;
42}
43
44/* the following functions are similar to those used in the PPC port */ 32/* the following functions are similar to those used in the PPC port */
45 33
46static inline void 34static inline void
@@ -60,12 +48,12 @@ alloc_context(struct mm_struct *mm)
60 */ 48 */
61 flush_tlb_mm(old_mm); 49 flush_tlb_mm(old_mm);
62 50
63 old_mm->context = NO_CONTEXT; 51 old_mm->context.page_id = NO_CONTEXT;
64 } 52 }
65 53
66 /* insert it into the page_id_map */ 54 /* insert it into the page_id_map */
67 55
68 mm->context = map_replace_ptr; 56 mm->context.page_id = map_replace_ptr;
69 page_id_map[map_replace_ptr] = mm; 57 page_id_map[map_replace_ptr] = mm;
70 58
71 map_replace_ptr++; 59 map_replace_ptr++;
@@ -81,7 +69,7 @@ alloc_context(struct mm_struct *mm)
81void 69void
82get_mmu_context(struct mm_struct *mm) 70get_mmu_context(struct mm_struct *mm)
83{ 71{
84 if(mm->context == NO_CONTEXT) 72 if(mm->context.page_id == NO_CONTEXT)
85 alloc_context(mm); 73 alloc_context(mm);
86} 74}
87 75
@@ -96,11 +84,10 @@ get_mmu_context(struct mm_struct *mm)
96void 84void
97destroy_context(struct mm_struct *mm) 85destroy_context(struct mm_struct *mm)
98{ 86{
99 if(mm->context != NO_CONTEXT) { 87 if(mm->context.page_id != NO_CONTEXT) {
100 D(printk("destroy_context %d (%p)\n", mm->context, mm)); 88 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm));
101 flush_tlb_mm(mm); /* TODO this might be redundant ? */ 89 flush_tlb_mm(mm); /* TODO this might be redundant ? */
102 page_id_map[mm->context] = NULL; 90 page_id_map[mm->context.page_id] = NULL;
103 /* mm->context = NO_CONTEXT; redundant.. mm will be freed */
104 } 91 }
105} 92}
106 93