diff options
Diffstat (limited to 'arch/frv/mm')
-rw-r--r-- | arch/frv/mm/Makefile | 9 | ||||
-rw-r--r-- | arch/frv/mm/cache-page.c | 66 | ||||
-rw-r--r-- | arch/frv/mm/dma-alloc.c | 188 | ||||
-rw-r--r-- | arch/frv/mm/elf-fdpic.c | 123 | ||||
-rw-r--r-- | arch/frv/mm/extable.c | 91 | ||||
-rw-r--r-- | arch/frv/mm/fault.c | 325 | ||||
-rw-r--r-- | arch/frv/mm/highmem.c | 33 | ||||
-rw-r--r-- | arch/frv/mm/init.c | 241 | ||||
-rw-r--r-- | arch/frv/mm/kmap.c | 62 | ||||
-rw-r--r-- | arch/frv/mm/mmu-context.c | 208 | ||||
-rw-r--r-- | arch/frv/mm/pgalloc.c | 159 | ||||
-rw-r--r-- | arch/frv/mm/tlb-flush.S | 185 | ||||
-rw-r--r-- | arch/frv/mm/tlb-miss.S | 631 | ||||
-rw-r--r-- | arch/frv/mm/unaligned.c | 218 |
14 files changed, 2539 insertions, 0 deletions
diff --git a/arch/frv/mm/Makefile b/arch/frv/mm/Makefile new file mode 100644 index 000000000000..fb8b1d860f46 --- /dev/null +++ b/arch/frv/mm/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the arch-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o kmap.o | ||
6 | |||
7 | obj-$(CONFIG_MMU) += \ | ||
8 | pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \ | ||
9 | mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o | ||
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c new file mode 100644 index 000000000000..683b5e344318 --- /dev/null +++ b/arch/frv/mm/cache-page.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* cache-page.c: whole-page cache wrangling functions for MMU linux | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <asm/pgalloc.h> | ||
15 | |||
16 | /*****************************************************************************/ | ||
17 | /* | ||
18 | * DCF takes a virtual address and the page may not currently have one | ||
19 | * - temporarily hijack a kmap_atomic() slot and attach the page to it | ||
20 | */ | ||
21 | void flush_dcache_page(struct page *page) | ||
22 | { | ||
23 | unsigned long dampr2; | ||
24 | void *vaddr; | ||
25 | |||
26 | dampr2 = __get_DAMPR(2); | ||
27 | |||
28 | vaddr = kmap_atomic(page, __KM_CACHE); | ||
29 | |||
30 | frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); | ||
31 | |||
32 | kunmap_atomic(vaddr, __KM_CACHE); | ||
33 | |||
34 | if (dampr2) { | ||
35 | __set_DAMPR(2, dampr2); | ||
36 | __set_IAMPR(2, dampr2); | ||
37 | } | ||
38 | |||
39 | } /* end flush_dcache_page() */ | ||
40 | |||
41 | /*****************************************************************************/ | ||
42 | /* | ||
43 | * ICI takes a virtual address and the page may not currently have one | ||
44 | * - so we temporarily attach the page to a bit of virtual space so that is can be flushed | ||
45 | */ | ||
46 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
47 | unsigned long start, unsigned long len) | ||
48 | { | ||
49 | unsigned long dampr2; | ||
50 | void *vaddr; | ||
51 | |||
52 | dampr2 = __get_DAMPR(2); | ||
53 | |||
54 | vaddr = kmap_atomic(page, __KM_CACHE); | ||
55 | |||
56 | start = (start & ~PAGE_MASK) | (unsigned long) vaddr; | ||
57 | frv_cache_wback_inv(start, start + len); | ||
58 | |||
59 | kunmap_atomic(vaddr, __KM_CACHE); | ||
60 | |||
61 | if (dampr2) { | ||
62 | __set_DAMPR(2, dampr2); | ||
63 | __set_IAMPR(2, dampr2); | ||
64 | } | ||
65 | |||
66 | } /* end flush_icache_user_range() */ | ||
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c new file mode 100644 index 000000000000..4b38d45435f6 --- /dev/null +++ b/arch/frv/mm/dma-alloc.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* dma-alloc.c: consistent DMA memory allocation | ||
2 | * | ||
3 | * Derived from arch/ppc/mm/cachemap.c | ||
4 | * | ||
5 | * PowerPC version derived from arch/arm/mm/consistent.c | ||
6 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | ||
7 | * | ||
8 | * linux/arch/arm/mm/consistent.c | ||
9 | * | ||
10 | * Copyright (C) 2000 Russell King | ||
11 | * | ||
12 | * Consistent memory allocators. Used for DMA devices that want to | ||
13 | * share uncached memory with the processor core. The function return | ||
14 | * is the virtual address and 'dma_handle' is the physical address. | ||
15 | * Mostly stolen from the ARM port, with some changes for PowerPC. | ||
16 | * -- Dan | ||
17 | * Modified for 36-bit support. -Matt | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License version 2 as | ||
21 | * published by the Free Software Foundation. | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/types.h> | ||
32 | #include <linux/ptrace.h> | ||
33 | #include <linux/mman.h> | ||
34 | #include <linux/mm.h> | ||
35 | #include <linux/swap.h> | ||
36 | #include <linux/stddef.h> | ||
37 | #include <linux/vmalloc.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/pci.h> | ||
40 | |||
41 | #include <asm/pgalloc.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/hardirq.h> | ||
44 | #include <asm/mmu_context.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/mmu.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/smp.h> | ||
49 | |||
50 | static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) | ||
51 | { | ||
52 | pgd_t *pge; | ||
53 | pud_t *pue; | ||
54 | pmd_t *pme; | ||
55 | pte_t *pte; | ||
56 | int err = -ENOMEM; | ||
57 | |||
58 | spin_lock(&init_mm.page_table_lock); | ||
59 | |||
60 | /* Use upper 10 bits of VA to index the first level map */ | ||
61 | pge = pgd_offset_k(va); | ||
62 | pue = pud_offset(pge, va); | ||
63 | pme = pmd_offset(pue, va); | ||
64 | |||
65 | /* Use middle 10 bits of VA to index the second-level map */ | ||
66 | pte = pte_alloc_kernel(&init_mm, pme, va); | ||
67 | if (pte != 0) { | ||
68 | err = 0; | ||
69 | set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot)); | ||
70 | } | ||
71 | |||
72 | spin_unlock(&init_mm.page_table_lock); | ||
73 | return err; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * This function will allocate the requested contiguous pages and | ||
78 | * map them into the kernel's vmalloc() space. This is done so we | ||
79 | * get unique mapping for these pages, outside of the kernel's 1:1 | ||
80 | * virtual:physical mapping. This is necessary so we can cover large | ||
81 | * portions of the kernel with single large page TLB entries, and | ||
82 | * still get unique uncached pages for consistent DMA. | ||
83 | */ | ||
84 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | ||
85 | { | ||
86 | struct vm_struct *area; | ||
87 | unsigned long page, va, pa; | ||
88 | void *ret; | ||
89 | int order, err, i; | ||
90 | |||
91 | if (in_interrupt()) | ||
92 | BUG(); | ||
93 | |||
94 | /* only allocate page size areas */ | ||
95 | size = PAGE_ALIGN(size); | ||
96 | order = get_order(size); | ||
97 | |||
98 | page = __get_free_pages(gfp, order); | ||
99 | if (!page) { | ||
100 | BUG(); | ||
101 | return NULL; | ||
102 | } | ||
103 | |||
104 | /* allocate some common virtual space to map the new pages */ | ||
105 | area = get_vm_area(size, VM_ALLOC); | ||
106 | if (area == 0) { | ||
107 | free_pages(page, order); | ||
108 | return NULL; | ||
109 | } | ||
110 | va = VMALLOC_VMADDR(area->addr); | ||
111 | ret = (void *) va; | ||
112 | |||
113 | /* this gives us the real physical address of the first page */ | ||
114 | *dma_handle = pa = virt_to_bus((void *) page); | ||
115 | |||
116 | /* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free | ||
117 | * all pages that were allocated. | ||
118 | */ | ||
119 | if (order > 0) { | ||
120 | struct page *rpage = virt_to_page(page); | ||
121 | |||
122 | for (i = 1; i < (1 << order); i++) | ||
123 | set_page_count(rpage + i, 1); | ||
124 | } | ||
125 | |||
126 | err = 0; | ||
127 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | ||
128 | err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE); | ||
129 | |||
130 | if (err) { | ||
131 | vfree((void *) va); | ||
132 | return NULL; | ||
133 | } | ||
134 | |||
135 | /* we need to ensure that there are no cachelines in use, or worse dirty in this area | ||
136 | * - can't do until after virtual address mappings are created | ||
137 | */ | ||
138 | frv_cache_invalidate(va, va + size); | ||
139 | |||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * free page(s) as defined by the above mapping. | ||
145 | */ | ||
146 | void consistent_free(void *vaddr) | ||
147 | { | ||
148 | if (in_interrupt()) | ||
149 | BUG(); | ||
150 | vfree(vaddr); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * make an area consistent. | ||
155 | */ | ||
156 | void consistent_sync(void *vaddr, size_t size, int direction) | ||
157 | { | ||
158 | unsigned long start = (unsigned long) vaddr; | ||
159 | unsigned long end = start + size; | ||
160 | |||
161 | switch (direction) { | ||
162 | case PCI_DMA_NONE: | ||
163 | BUG(); | ||
164 | case PCI_DMA_FROMDEVICE: /* invalidate only */ | ||
165 | frv_cache_invalidate(start, end); | ||
166 | break; | ||
167 | case PCI_DMA_TODEVICE: /* writeback only */ | ||
168 | frv_dcache_writeback(start, end); | ||
169 | break; | ||
170 | case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
171 | frv_dcache_writeback(start, end); | ||
172 | break; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * consistent_sync_page make a page are consistent. identical | ||
178 | * to consistent_sync, but takes a struct page instead of a virtual address | ||
179 | */ | ||
180 | |||
181 | void consistent_sync_page(struct page *page, unsigned long offset, | ||
182 | size_t size, int direction) | ||
183 | { | ||
184 | void *start; | ||
185 | |||
186 | start = page_address(page) + offset; | ||
187 | consistent_sync(start, size, direction); | ||
188 | } | ||
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c new file mode 100644 index 000000000000..f5a653033fe0 --- /dev/null +++ b/arch/frv/mm/elf-fdpic.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* elf-fdpic.c: ELF FDPIC memory layout management | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/elf-fdpic.h> | ||
16 | |||
17 | /*****************************************************************************/ | ||
18 | /* | ||
19 | * lay out the userspace VM according to our grand design | ||
20 | */ | ||
21 | #ifdef CONFIG_MMU | ||
22 | void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, | ||
23 | struct elf_fdpic_params *interp_params, | ||
24 | unsigned long *start_stack, | ||
25 | unsigned long *start_brk) | ||
26 | { | ||
27 | *start_stack = 0x02200000UL; | ||
28 | |||
29 | /* if the only executable is a shared object, assume that it is an interpreter rather than | ||
30 | * a true executable, and map it such that "ld.so --list" comes out right | ||
31 | */ | ||
32 | if (!(interp_params->flags & ELF_FDPIC_FLAG_PRESENT) && | ||
33 | exec_params->hdr.e_type != ET_EXEC | ||
34 | ) { | ||
35 | exec_params->load_addr = PAGE_SIZE; | ||
36 | |||
37 | *start_brk = 0x80000000UL; | ||
38 | } | ||
39 | else { | ||
40 | exec_params->load_addr = 0x02200000UL; | ||
41 | |||
42 | if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == | ||
43 | ELF_FDPIC_FLAG_INDEPENDENT | ||
44 | ) { | ||
45 | exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT; | ||
46 | exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | } /* end elf_fdpic_arch_lay_out_mm() */ | ||
51 | #endif | ||
52 | |||
53 | /*****************************************************************************/ | ||
54 | /* | ||
55 | * place non-fixed mmaps firstly in the bottom part of memory, working up, and then in the top part | ||
56 | * of memory, working down | ||
57 | */ | ||
58 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, | ||
59 | unsigned long pgoff, unsigned long flags) | ||
60 | { | ||
61 | struct vm_area_struct *vma; | ||
62 | unsigned long limit; | ||
63 | |||
64 | if (len > TASK_SIZE) | ||
65 | return -ENOMEM; | ||
66 | |||
67 | /* only honour a hint if we're not going to clobber something doing so */ | ||
68 | if (addr) { | ||
69 | addr = PAGE_ALIGN(addr); | ||
70 | vma = find_vma(current->mm, addr); | ||
71 | if (TASK_SIZE - len >= addr && | ||
72 | (!vma || addr + len <= vma->vm_start)) | ||
73 | goto success; | ||
74 | } | ||
75 | |||
76 | /* search between the bottom of user VM and the stack grow area */ | ||
77 | addr = PAGE_SIZE; | ||
78 | limit = (current->mm->start_stack - 0x00200000); | ||
79 | if (addr + len <= limit) { | ||
80 | limit -= len; | ||
81 | |||
82 | if (addr <= limit) { | ||
83 | vma = find_vma(current->mm, PAGE_SIZE); | ||
84 | for (; vma; vma = vma->vm_next) { | ||
85 | if (addr > limit) | ||
86 | break; | ||
87 | if (addr + len <= vma->vm_start) | ||
88 | goto success; | ||
89 | addr = vma->vm_end; | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | |||
94 | /* search from just above the WorkRAM area to the top of memory */ | ||
95 | addr = PAGE_ALIGN(0x80000000); | ||
96 | limit = TASK_SIZE - len; | ||
97 | if (addr <= limit) { | ||
98 | vma = find_vma(current->mm, addr); | ||
99 | for (; vma; vma = vma->vm_next) { | ||
100 | if (addr > limit) | ||
101 | break; | ||
102 | if (addr + len <= vma->vm_start) | ||
103 | goto success; | ||
104 | addr = vma->vm_end; | ||
105 | } | ||
106 | |||
107 | if (!vma && addr <= limit) | ||
108 | goto success; | ||
109 | } | ||
110 | |||
111 | #if 0 | ||
112 | printk("[area] l=%lx (ENOMEM) f='%s'\n", | ||
113 | len, filp ? filp->f_dentry->d_name.name : ""); | ||
114 | #endif | ||
115 | return -ENOMEM; | ||
116 | |||
117 | success: | ||
118 | #if 0 | ||
119 | printk("[area] l=%lx ad=%lx f='%s'\n", | ||
120 | len, addr, filp ? filp->f_dentry->d_name.name : ""); | ||
121 | #endif | ||
122 | return addr; | ||
123 | } /* end arch_get_unmapped_area() */ | ||
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c new file mode 100644 index 000000000000..41be1128dc64 --- /dev/null +++ b/arch/frv/mm/extable.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * linux/arch/frv/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | extern const struct exception_table_entry __attribute__((aligned(8))) __start___ex_table[]; | ||
11 | extern const struct exception_table_entry __attribute__((aligned(8))) __stop___ex_table[]; | ||
12 | extern const void __memset_end, __memset_user_error_lr, __memset_user_error_handler; | ||
13 | extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler; | ||
14 | extern spinlock_t modlist_lock; | ||
15 | |||
16 | /*****************************************************************************/ | ||
17 | /* | ||
18 | * | ||
19 | */ | ||
20 | static inline unsigned long search_one_table(const struct exception_table_entry *first, | ||
21 | const struct exception_table_entry *last, | ||
22 | unsigned long value) | ||
23 | { | ||
24 | while (first <= last) { | ||
25 | const struct exception_table_entry __attribute__((aligned(8))) *mid; | ||
26 | long diff; | ||
27 | |||
28 | mid = (last - first) / 2 + first; | ||
29 | diff = mid->insn - value; | ||
30 | if (diff == 0) | ||
31 | return mid->fixup; | ||
32 | else if (diff < 0) | ||
33 | first = mid + 1; | ||
34 | else | ||
35 | last = mid - 1; | ||
36 | } | ||
37 | return 0; | ||
38 | } /* end search_one_table() */ | ||
39 | |||
40 | /*****************************************************************************/ | ||
41 | /* | ||
42 | * see if there's a fixup handler available to deal with a kernel fault | ||
43 | */ | ||
44 | unsigned long search_exception_table(unsigned long pc) | ||
45 | { | ||
46 | unsigned long ret = 0; | ||
47 | |||
48 | /* determine if the fault lay during a memcpy_user or a memset_user */ | ||
49 | if (__frame->lr == (unsigned long) &__memset_user_error_lr && | ||
50 | (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end | ||
51 | ) { | ||
52 | /* the fault occurred in a protected memset | ||
53 | * - we search for the return address (in LR) instead of the program counter | ||
54 | * - it was probably during a clear_user() | ||
55 | */ | ||
56 | return (unsigned long) &__memset_user_error_handler; | ||
57 | } | ||
58 | else if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && | ||
59 | (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end | ||
60 | ) { | ||
61 | /* the fault occurred in a protected memset | ||
62 | * - we search for the return address (in LR) instead of the program counter | ||
63 | * - it was probably during a copy_to/from_user() | ||
64 | */ | ||
65 | return (unsigned long) &__memcpy_user_error_handler; | ||
66 | } | ||
67 | |||
68 | #ifndef CONFIG_MODULES | ||
69 | /* there is only the kernel to search. */ | ||
70 | ret = search_one_table(__start___ex_table, __stop___ex_table - 1, pc); | ||
71 | return ret; | ||
72 | |||
73 | #else | ||
74 | /* the kernel is the last "module" -- no need to treat it special */ | ||
75 | unsigned long flags; | ||
76 | struct module *mp; | ||
77 | |||
78 | spin_lock_irqsave(&modlist_lock, flags); | ||
79 | |||
80 | for (mp = module_list; mp != NULL; mp = mp->next) { | ||
81 | if (mp->ex_table_start == NULL || !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING))) | ||
82 | continue; | ||
83 | ret = search_one_table(mp->ex_table_start, mp->ex_table_end - 1, pc); | ||
84 | if (ret) | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | spin_unlock_irqrestore(&modlist_lock, flags); | ||
89 | return ret; | ||
90 | #endif | ||
91 | } /* end search_exception_table() */ | ||
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c new file mode 100644 index 000000000000..41d02ac48233 --- /dev/null +++ b/arch/frv/mm/fault.c | |||
@@ -0,0 +1,325 @@ | |||
1 | /* | ||
2 | * linux/arch/frv/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
5 | * - Written by David Howells (dhowells@redhat.com) | ||
6 | * - Derived from arch/m68knommu/mm/fault.c | ||
7 | * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, | ||
8 | * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) | ||
9 | * | ||
10 | * Based on: | ||
11 | * | ||
12 | * linux/arch/m68k/mm/fault.c | ||
13 | * | ||
14 | * Copyright (C) 1995 Hamish Macdonald | ||
15 | */ | ||
16 | |||
17 | #include <linux/mman.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | |||
23 | #include <asm/system.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/gdb-stub.h> | ||
27 | |||
28 | /*****************************************************************************/ | ||
29 | /* | ||
30 | * This routine handles page faults. It determines the problem, and | ||
31 | * then passes it off to one of the appropriate routines. | ||
32 | */ | ||
33 | asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0) | ||
34 | { | ||
35 | struct vm_area_struct *vma; | ||
36 | struct mm_struct *mm; | ||
37 | unsigned long _pme, lrai, lrad, fixup; | ||
38 | siginfo_t info; | ||
39 | pgd_t *pge; | ||
40 | pud_t *pue; | ||
41 | pte_t *pte; | ||
42 | int write; | ||
43 | |||
44 | #if 0 | ||
45 | const char *atxc[16] = { | ||
46 | [0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat", | ||
47 | [0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot", | ||
48 | }; | ||
49 | |||
50 | printk("do_page_fault(%d,%lx [%s],%lx)\n", | ||
51 | datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0); | ||
52 | #endif | ||
53 | |||
54 | mm = current->mm; | ||
55 | |||
56 | /* | ||
57 | * We fault-in kernel-space virtual memory on-demand. The | ||
58 | * 'reference' page table is init_mm.pgd. | ||
59 | * | ||
60 | * NOTE! We MUST NOT take any locks for this case. We may | ||
61 | * be in an interrupt or a critical region, and should | ||
62 | * only copy the information from the master page table, | ||
63 | * nothing more. | ||
64 | * | ||
65 | * This verifies that the fault happens in kernel space | ||
66 | * and that the fault was a page not present (invalid) error | ||
67 | */ | ||
68 | if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) { | ||
69 | if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END) | ||
70 | goto kernel_pte_fault; | ||
71 | if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END) | ||
72 | goto kernel_pte_fault; | ||
73 | } | ||
74 | |||
75 | info.si_code = SEGV_MAPERR; | ||
76 | |||
77 | /* | ||
78 | * If we're in an interrupt or have no user | ||
79 | * context, we must not take the fault.. | ||
80 | */ | ||
81 | if (in_interrupt() || !mm) | ||
82 | goto no_context; | ||
83 | |||
84 | down_read(&mm->mmap_sem); | ||
85 | |||
86 | vma = find_vma(mm, ear0); | ||
87 | if (!vma) | ||
88 | goto bad_area; | ||
89 | if (vma->vm_start <= ear0) | ||
90 | goto good_area; | ||
91 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
92 | goto bad_area; | ||
93 | |||
94 | if (user_mode(__frame)) { | ||
95 | /* | ||
96 | * accessing the stack below %esp is always a bug. | ||
97 | * The "+ 32" is there due to some instructions (like | ||
98 | * pusha) doing post-decrement on the stack and that | ||
99 | * doesn't show up until later.. | ||
100 | */ | ||
101 | if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) { | ||
102 | #if 0 | ||
103 | printk("[%d] ### Access below stack @%lx (sp=%lx)\n", | ||
104 | current->pid, ear0, __frame->sp); | ||
105 | show_registers(__frame); | ||
106 | printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
107 | current->pid, | ||
108 | __frame->pc, | ||
109 | ((u8*)__frame->pc)[0], | ||
110 | ((u8*)__frame->pc)[1], | ||
111 | ((u8*)__frame->pc)[2], | ||
112 | ((u8*)__frame->pc)[3], | ||
113 | ((u8*)__frame->pc)[4], | ||
114 | ((u8*)__frame->pc)[5], | ||
115 | ((u8*)__frame->pc)[6], | ||
116 | ((u8*)__frame->pc)[7] | ||
117 | ); | ||
118 | #endif | ||
119 | goto bad_area; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | if (expand_stack(vma, ear0)) | ||
124 | goto bad_area; | ||
125 | |||
126 | /* | ||
127 | * Ok, we have a good vm_area for this memory access, so | ||
128 | * we can handle it.. | ||
129 | */ | ||
130 | good_area: | ||
131 | info.si_code = SEGV_ACCERR; | ||
132 | write = 0; | ||
133 | switch (esr0 & ESR0_ATXC) { | ||
134 | default: | ||
135 | /* handle write to write protected page */ | ||
136 | case ESR0_ATXC_WP_EXCEP: | ||
137 | #ifdef TEST_VERIFY_AREA | ||
138 | if (!(user_mode(__frame))) | ||
139 | printk("WP fault at %08lx\n", __frame->pc); | ||
140 | #endif | ||
141 | if (!(vma->vm_flags & VM_WRITE)) | ||
142 | goto bad_area; | ||
143 | write = 1; | ||
144 | break; | ||
145 | |||
146 | /* handle read from protected page */ | ||
147 | case ESR0_ATXC_PRIV_EXCEP: | ||
148 | goto bad_area; | ||
149 | |||
150 | /* handle read, write or exec on absent page | ||
151 | * - can't support write without permitting read | ||
152 | * - don't support execute without permitting read and vice-versa | ||
153 | */ | ||
154 | case ESR0_ATXC_AMRTLB_MISS: | ||
155 | if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) | ||
156 | goto bad_area; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * If for any reason at all we couldn't handle the fault, | ||
162 | * make sure we exit gracefully rather than endlessly redo | ||
163 | * the fault. | ||
164 | */ | ||
165 | switch (handle_mm_fault(mm, vma, ear0, write)) { | ||
166 | case 1: | ||
167 | current->min_flt++; | ||
168 | break; | ||
169 | case 2: | ||
170 | current->maj_flt++; | ||
171 | break; | ||
172 | case 0: | ||
173 | goto do_sigbus; | ||
174 | default: | ||
175 | goto out_of_memory; | ||
176 | } | ||
177 | |||
178 | up_read(&mm->mmap_sem); | ||
179 | return; | ||
180 | |||
181 | /* | ||
182 | * Something tried to access memory that isn't in our memory map.. | ||
183 | * Fix it, but check if it's kernel or user first.. | ||
184 | */ | ||
185 | bad_area: | ||
186 | up_read(&mm->mmap_sem); | ||
187 | |||
188 | /* User mode accesses just cause a SIGSEGV */ | ||
189 | if (user_mode(__frame)) { | ||
190 | info.si_signo = SIGSEGV; | ||
191 | info.si_errno = 0; | ||
192 | /* info.si_code has been set above */ | ||
193 | info.si_addr = (void *) ear0; | ||
194 | force_sig_info(SIGSEGV, &info, current); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | no_context: | ||
199 | /* are we prepared to handle this kernel fault? */ | ||
200 | if ((fixup = search_exception_table(__frame->pc)) != 0) { | ||
201 | __frame->pc = fixup; | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Oops. The kernel tried to access some bad page. We'll have to | ||
207 | * terminate things with extreme prejudice. | ||
208 | */ | ||
209 | |||
210 | bust_spinlocks(1); | ||
211 | |||
212 | if (ear0 < PAGE_SIZE) | ||
213 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
214 | else | ||
215 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
216 | printk(" at virtual addr %08lx\n", ear0); | ||
217 | printk(" PC : %08lx\n", __frame->pc); | ||
218 | printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0); | ||
219 | |||
220 | asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0)); | ||
221 | asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0)); | ||
222 | |||
223 | printk(KERN_ALERT " LRAI: %08lx\n", lrai); | ||
224 | printk(KERN_ALERT " LRAD: %08lx\n", lrad); | ||
225 | |||
226 | __break_hijack_kernel_event(); | ||
227 | |||
228 | pge = pgd_offset(current->mm, ear0); | ||
229 | pue = pud_offset(pge, ear0); | ||
230 | _pme = pue->pue[0].ste[0]; | ||
231 | |||
232 | printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme); | ||
233 | |||
234 | if (_pme & xAMPRx_V) { | ||
235 | unsigned long dampr, damlr, val; | ||
236 | |||
237 | asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1" | ||
238 | : "=&r"(dampr), "=r"(damlr) | ||
239 | : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V) | ||
240 | ); | ||
241 | |||
242 | pte = (pte_t *) damlr + __pte_index(ear0); | ||
243 | val = pte_val(*pte); | ||
244 | |||
245 | asm volatile("movgs %0,dampr2" :: "r" (dampr)); | ||
246 | |||
247 | printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val); | ||
248 | } | ||
249 | |||
250 | die_if_kernel("Oops\n"); | ||
251 | do_exit(SIGKILL); | ||
252 | |||
253 | /* | ||
254 | * We ran out of memory, or some other thing happened to us that made | ||
255 | * us unable to handle the page fault gracefully. | ||
256 | */ | ||
257 | out_of_memory: | ||
258 | up_read(&mm->mmap_sem); | ||
259 | printk("VM: killing process %s\n", current->comm); | ||
260 | if (user_mode(__frame)) | ||
261 | do_exit(SIGKILL); | ||
262 | goto no_context; | ||
263 | |||
264 | do_sigbus: | ||
265 | up_read(&mm->mmap_sem); | ||
266 | |||
267 | /* | ||
268 | * Send a sigbus, regardless of whether we were in kernel | ||
269 | * or user mode. | ||
270 | */ | ||
271 | info.si_signo = SIGBUS; | ||
272 | info.si_errno = 0; | ||
273 | info.si_code = BUS_ADRERR; | ||
274 | info.si_addr = (void *) ear0; | ||
275 | force_sig_info(SIGBUS, &info, current); | ||
276 | |||
277 | /* Kernel mode? Handle exceptions or die */ | ||
278 | if (!user_mode(__frame)) | ||
279 | goto no_context; | ||
280 | return; | ||
281 | |||
282 | /* | ||
283 | * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap) | ||
284 | */ | ||
285 | kernel_pte_fault: | ||
286 | { | ||
287 | /* | ||
288 | * Synchronize this task's top level page-table | ||
289 | * with the 'reference' page table. | ||
290 | * | ||
291 | * Do _not_ use "tsk" here. We might be inside | ||
292 | * an interrupt in the middle of a task switch.. | ||
293 | */ | ||
294 | int index = pgd_index(ear0); | ||
295 | pgd_t *pgd, *pgd_k; | ||
296 | pud_t *pud, *pud_k; | ||
297 | pmd_t *pmd, *pmd_k; | ||
298 | pte_t *pte_k; | ||
299 | |||
300 | pgd = (pgd_t *) __get_TTBR(); | ||
301 | pgd = (pgd_t *)__va(pgd) + index; | ||
302 | pgd_k = ((pgd_t *)(init_mm.pgd)) + index; | ||
303 | |||
304 | if (!pgd_present(*pgd_k)) | ||
305 | goto no_context; | ||
306 | //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line | ||
307 | |||
308 | pud_k = pud_offset(pgd_k, ear0); | ||
309 | if (!pud_present(*pud_k)) | ||
310 | goto no_context; | ||
311 | |||
312 | pmd_k = pmd_offset(pud_k, ear0); | ||
313 | if (!pmd_present(*pmd_k)) | ||
314 | goto no_context; | ||
315 | |||
316 | pud = pud_offset(pgd, ear0); | ||
317 | pmd = pmd_offset(pud, ear0); | ||
318 | set_pmd(pmd, *pmd_k); | ||
319 | |||
320 | pte_k = pte_offset_kernel(pmd_k, ear0); | ||
321 | if (!pte_present(*pte_k)) | ||
322 | goto no_context; | ||
323 | return; | ||
324 | } | ||
325 | } /* end do_page_fault() */ | ||
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c new file mode 100644 index 000000000000..7dc8fbf3af97 --- /dev/null +++ b/arch/frv/mm/highmem.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* highmem.c: arch-specific highmem stuff | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | void *kmap(struct page *page) | ||
14 | { | ||
15 | might_sleep(); | ||
16 | if (!PageHighMem(page)) | ||
17 | return page_address(page); | ||
18 | return kmap_high(page); | ||
19 | } | ||
20 | |||
21 | void kunmap(struct page *page) | ||
22 | { | ||
23 | if (in_interrupt()) | ||
24 | BUG(); | ||
25 | if (!PageHighMem(page)) | ||
26 | return; | ||
27 | kunmap_high(page); | ||
28 | } | ||
29 | |||
30 | struct page *kmap_atomic_to_page(void *ptr) | ||
31 | { | ||
32 | return virt_to_page(ptr); | ||
33 | } | ||
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c new file mode 100644 index 000000000000..41958f57c838 --- /dev/null +++ b/arch/frv/mm/init.c | |||
@@ -0,0 +1,241 @@ | |||
1 | /* init.c: memory initialisation for FRV | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Derived from: | ||
12 | * - linux/arch/m68knommu/mm/init.c | ||
13 | * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>, | ||
14 | * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) | ||
15 | * - linux/arch/m68k/mm/init.c | ||
16 | * - Copyright (C) 1995 Hamish Macdonald | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/pagemap.h> | ||
23 | #include <linux/swap.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/bootmem.h> | ||
29 | #include <linux/highmem.h> | ||
30 | |||
31 | #include <asm/setup.h> | ||
32 | #include <asm/segment.h> | ||
33 | #include <asm/page.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/system.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | #include <asm/virtconvert.h> | ||
38 | #include <asm/sections.h> | ||
39 | #include <asm/tlb.h> | ||
40 | |||
41 | #undef DEBUG | ||
42 | |||
43 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
44 | |||
45 | /* | ||
46 | * BAD_PAGE is the page that is used for page faults when linux | ||
47 | * is out-of-memory. Older versions of linux just did a | ||
48 | * do_exit(), but using this instead means there is less risk | ||
49 | * for a process dying in kernel mode, possibly leaving a inode | ||
50 | * unused etc.. | ||
51 | * | ||
52 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | ||
53 | * to point to BAD_PAGE entries. | ||
54 | * | ||
55 | * ZERO_PAGE is a special page that is used for zero-initialized | ||
56 | * data and COW. | ||
57 | */ | ||
58 | static unsigned long empty_bad_page_table; | ||
59 | static unsigned long empty_bad_page; | ||
60 | unsigned long empty_zero_page; | ||
61 | |||
62 | /*****************************************************************************/ | ||
63 | /* | ||
64 | * | ||
65 | */ | ||
66 | void show_mem(void) | ||
67 | { | ||
68 | unsigned long i; | ||
69 | int free = 0, total = 0, reserved = 0, shared = 0; | ||
70 | |||
71 | printk("\nMem-info:\n"); | ||
72 | show_free_areas(); | ||
73 | i = max_mapnr; | ||
74 | while (i-- > 0) { | ||
75 | struct page *page = &mem_map[i]; | ||
76 | |||
77 | total++; | ||
78 | if (PageReserved(page)) | ||
79 | reserved++; | ||
80 | else if (!page_count(page)) | ||
81 | free++; | ||
82 | else | ||
83 | shared += page_count(page) - 1; | ||
84 | } | ||
85 | |||
86 | printk("%d pages of RAM\n",total); | ||
87 | printk("%d free pages\n",free); | ||
88 | printk("%d reserved pages\n",reserved); | ||
89 | printk("%d pages shared\n",shared); | ||
90 | |||
91 | } /* end show_mem() */ | ||
92 | |||
93 | /*****************************************************************************/ | ||
94 | /* | ||
95 | * paging_init() continues the virtual memory environment setup which | ||
96 | * was begun by the code in arch/head.S. | ||
97 | * The parameters are pointers to where to stick the starting and ending | ||
98 | * addresses of available kernel virtual memory. | ||
99 | */ | ||
100 | void __init paging_init(void) | ||
101 | { | ||
102 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
103 | |||
104 | /* allocate some pages for kernel housekeeping tasks */ | ||
105 | empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | ||
106 | empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | ||
107 | empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | ||
108 | |||
109 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | ||
110 | |||
111 | #if CONFIG_HIGHMEM | ||
112 | if (num_physpages - num_mappedpages) { | ||
113 | pgd_t *pge; | ||
114 | pud_t *pue; | ||
115 | pmd_t *pme; | ||
116 | |||
117 | pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); | ||
118 | |||
119 | memset(pkmap_page_table, 0, PAGE_SIZE); | ||
120 | |||
121 | pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); | ||
122 | pue = pud_offset(pge, PKMAP_BASE); | ||
123 | pme = pmd_offset(pue, PKMAP_BASE); | ||
124 | __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE); | ||
125 | } | ||
126 | #endif | ||
127 | |||
128 | /* distribute the allocatable pages across the various zones and pass them to the allocator | ||
129 | */ | ||
130 | zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn; | ||
131 | zones_size[ZONE_NORMAL] = 0; | ||
132 | #ifdef CONFIG_HIGHMEM | ||
133 | zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; | ||
134 | #endif | ||
135 | |||
136 | free_area_init(zones_size); | ||
137 | |||
138 | #ifdef CONFIG_MMU | ||
139 | /* initialise init's MMU context */ | ||
140 | init_new_context(&init_task, &init_mm); | ||
141 | #endif | ||
142 | |||
143 | } /* end paging_init() */ | ||
144 | |||
145 | /*****************************************************************************/ | ||
146 | /* | ||
147 | * | ||
148 | */ | ||
149 | void __init mem_init(void) | ||
150 | { | ||
151 | unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT; | ||
152 | unsigned long tmp; | ||
153 | #ifdef CONFIG_MMU | ||
154 | unsigned long loop, pfn; | ||
155 | int datapages = 0; | ||
156 | #endif | ||
157 | int codek = 0, datak = 0; | ||
158 | |||
159 | /* this will put all memory onto the freelists */ | ||
160 | totalram_pages = free_all_bootmem(); | ||
161 | |||
162 | #ifdef CONFIG_MMU | ||
163 | for (loop = 0 ; loop < npages ; loop++) | ||
164 | if (PageReserved(&mem_map[loop])) | ||
165 | datapages++; | ||
166 | |||
167 | #ifdef CONFIG_HIGHMEM | ||
168 | for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { | ||
169 | struct page *page = &mem_map[pfn]; | ||
170 | |||
171 | ClearPageReserved(page); | ||
172 | set_bit(PG_highmem, &page->flags); | ||
173 | set_page_count(page, 1); | ||
174 | __free_page(page); | ||
175 | totalram_pages++; | ||
176 | } | ||
177 | #endif | ||
178 | |||
179 | codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; | ||
180 | datak = datapages << (PAGE_SHIFT - 10); | ||
181 | |||
182 | #else | ||
183 | codek = (_etext - _stext) >> 10; | ||
184 | datak = 0; //(_ebss - _sdata) >> 10; | ||
185 | #endif | ||
186 | |||
187 | tmp = nr_free_pages() << PAGE_SHIFT; | ||
188 | printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n", | ||
189 | tmp >> 10, | ||
190 | npages << (PAGE_SHIFT - 10), | ||
191 | (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, | ||
192 | rom_length >> 10, | ||
193 | codek, | ||
194 | datak | ||
195 | ); | ||
196 | |||
197 | } /* end mem_init() */ | ||
198 | |||
199 | /*****************************************************************************/ | ||
200 | /* | ||
201 | * free the memory that was only required for initialisation | ||
202 | */ | ||
203 | void __init free_initmem(void) | ||
204 | { | ||
205 | #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) | ||
206 | unsigned long start, end, addr; | ||
207 | |||
208 | start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */ | ||
209 | end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */ | ||
210 | |||
211 | /* next to check that the page we free is not a partial page */ | ||
212 | for (addr = start; addr < end; addr += PAGE_SIZE) { | ||
213 | ClearPageReserved(virt_to_page(addr)); | ||
214 | set_page_count(virt_to_page(addr), 1); | ||
215 | free_page(addr); | ||
216 | totalram_pages++; | ||
217 | } | ||
218 | |||
219 | printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n", | ||
220 | (end - start) >> 10, start, end); | ||
221 | #endif | ||
222 | } /* end free_initmem() */ | ||
223 | |||
224 | /*****************************************************************************/ | ||
225 | /* | ||
226 | * free the initial ramdisk memory | ||
227 | */ | ||
228 | #ifdef CONFIG_BLK_DEV_INITRD | ||
229 | void __init free_initrd_mem(unsigned long start, unsigned long end) | ||
230 | { | ||
231 | int pages = 0; | ||
232 | for (; start < end; start += PAGE_SIZE) { | ||
233 | ClearPageReserved(virt_to_page(start)); | ||
234 | set_page_count(virt_to_page(start), 1); | ||
235 | free_page(start); | ||
236 | totalram_pages++; | ||
237 | pages++; | ||
238 | } | ||
239 | printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); | ||
240 | } /* end free_initrd_mem() */ | ||
241 | #endif | ||
diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c new file mode 100644 index 000000000000..539f45e6d15e --- /dev/null +++ b/arch/frv/mm/kmap.c | |||
@@ -0,0 +1,62 @@ | |||
1 | /* kmap.c: ioremapping handlers | ||
2 | * | ||
3 | * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * - Derived from arch/m68k/mm/kmap.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | |||
20 | #include <asm/setup.h> | ||
21 | #include <asm/segment.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/system.h> | ||
26 | |||
27 | #undef DEBUG | ||
28 | |||
29 | /*****************************************************************************/ | ||
30 | /* | ||
31 | * Map some physical address range into the kernel address space. | ||
32 | */ | ||
33 | |||
34 | void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | ||
35 | { | ||
36 | return (void *)physaddr; | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Unmap a ioremap()ed region again | ||
41 | */ | ||
42 | void iounmap(void *addr) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * __iounmap unmaps nearly everything, so be careful | ||
48 | * it doesn't free currently pointer/page tables anymore but it | ||
49 | * wans't used anyway and might be added later. | ||
50 | */ | ||
51 | void __iounmap(void *addr, unsigned long size) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Set new cache mode for some kernel address space. | ||
57 | * The caller must push data for that range itself, if such data may already | ||
58 | * be in the cache. | ||
59 | */ | ||
60 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | ||
61 | { | ||
62 | } | ||
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c new file mode 100644 index 000000000000..f2c6866fc88b --- /dev/null +++ b/arch/frv/mm/mmu-context.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* mmu-context.c: MMU context allocation and management | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/tlbflush.h> | ||
15 | |||
16 | #define NR_CXN 4096 | ||
17 | |||
18 | static unsigned long cxn_bitmap[NR_CXN / (sizeof(unsigned long) * 8)]; | ||
19 | static LIST_HEAD(cxn_owners_lru); | ||
20 | static DEFINE_SPINLOCK(cxn_owners_lock); | ||
21 | |||
22 | int __nongpreldata cxn_pinned = -1; | ||
23 | |||
24 | |||
25 | /*****************************************************************************/ | ||
26 | /* | ||
27 | * initialise a new context | ||
28 | */ | ||
29 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
30 | { | ||
31 | memset(&mm->context, 0, sizeof(mm->context)); | ||
32 | INIT_LIST_HEAD(&mm->context.id_link); | ||
33 | mm->context.itlb_cached_pge = 0xffffffffUL; | ||
34 | mm->context.dtlb_cached_pge = 0xffffffffUL; | ||
35 | |||
36 | return 0; | ||
37 | } /* end init_new_context() */ | ||
38 | |||
39 | /*****************************************************************************/ | ||
40 | /* | ||
41 | * make sure a kernel MMU context has a CPU context number | ||
42 | * - call with cxn_owners_lock held | ||
43 | */ | ||
44 | static unsigned get_cxn(mm_context_t *ctx) | ||
45 | { | ||
46 | struct list_head *_p; | ||
47 | mm_context_t *p; | ||
48 | unsigned cxn; | ||
49 | |||
50 | if (!list_empty(&ctx->id_link)) { | ||
51 | list_move_tail(&ctx->id_link, &cxn_owners_lru); | ||
52 | } | ||
53 | else { | ||
54 | /* find the first unallocated context number | ||
55 | * - 0 is reserved for the kernel | ||
56 | */ | ||
57 | cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); | ||
58 | if (cxn < NR_CXN) { | ||
59 | set_bit(cxn, &cxn_bitmap); | ||
60 | } | ||
61 | else { | ||
62 | /* none remaining - need to steal someone else's cxn */ | ||
63 | p = NULL; | ||
64 | list_for_each(_p, &cxn_owners_lru) { | ||
65 | p = list_entry(_p, mm_context_t, id_link); | ||
66 | if (!p->id_busy && p->id != cxn_pinned) | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | BUG_ON(_p == &cxn_owners_lru); | ||
71 | |||
72 | cxn = p->id; | ||
73 | p->id = 0; | ||
74 | list_del_init(&p->id_link); | ||
75 | __flush_tlb_mm(cxn); | ||
76 | } | ||
77 | |||
78 | ctx->id = cxn; | ||
79 | list_add_tail(&ctx->id_link, &cxn_owners_lru); | ||
80 | } | ||
81 | |||
82 | return ctx->id; | ||
83 | } /* end get_cxn() */ | ||
84 | |||
85 | /*****************************************************************************/ | ||
86 | /* | ||
87 | * restore the current TLB miss handler mapped page tables into the MMU context and set up a | ||
88 | * mapping for the page directory | ||
89 | */ | ||
90 | void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd) | ||
91 | { | ||
92 | unsigned long _pgd; | ||
93 | |||
94 | _pgd = virt_to_phys(pgd); | ||
95 | |||
96 | /* save the state of the outgoing MMU context */ | ||
97 | old->id_busy = 0; | ||
98 | |||
99 | asm volatile("movsg scr0,%0" : "=r"(old->itlb_cached_pge)); | ||
100 | asm volatile("movsg dampr4,%0" : "=r"(old->itlb_ptd_mapping)); | ||
101 | asm volatile("movsg scr1,%0" : "=r"(old->dtlb_cached_pge)); | ||
102 | asm volatile("movsg dampr5,%0" : "=r"(old->dtlb_ptd_mapping)); | ||
103 | |||
104 | /* select an MMU context number */ | ||
105 | spin_lock(&cxn_owners_lock); | ||
106 | get_cxn(ctx); | ||
107 | ctx->id_busy = 1; | ||
108 | spin_unlock(&cxn_owners_lock); | ||
109 | |||
110 | asm volatile("movgs %0,cxnr" : : "r"(ctx->id)); | ||
111 | |||
112 | /* restore the state of the incoming MMU context */ | ||
113 | asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge)); | ||
114 | asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping)); | ||
115 | asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge)); | ||
116 | asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping)); | ||
117 | |||
118 | /* map the PGD into uncached virtual memory */ | ||
119 | asm volatile("movgs %0,ttbr" : : "r"(_pgd)); | ||
120 | asm volatile("movgs %0,dampr3" | ||
121 | :: "r"(_pgd | xAMPRx_L | xAMPRx_M | xAMPRx_SS_16Kb | | ||
122 | xAMPRx_S | xAMPRx_C | xAMPRx_V)); | ||
123 | |||
124 | } /* end change_mm_context() */ | ||
125 | |||
126 | /*****************************************************************************/ | ||
127 | /* | ||
128 | * finished with an MMU context number | ||
129 | */ | ||
130 | void destroy_context(struct mm_struct *mm) | ||
131 | { | ||
132 | mm_context_t *ctx = &mm->context; | ||
133 | |||
134 | spin_lock(&cxn_owners_lock); | ||
135 | |||
136 | if (!list_empty(&ctx->id_link)) { | ||
137 | if (ctx->id == cxn_pinned) | ||
138 | cxn_pinned = -1; | ||
139 | |||
140 | list_del_init(&ctx->id_link); | ||
141 | clear_bit(ctx->id, &cxn_bitmap); | ||
142 | __flush_tlb_mm(ctx->id); | ||
143 | ctx->id = 0; | ||
144 | } | ||
145 | |||
146 | spin_unlock(&cxn_owners_lock); | ||
147 | } /* end destroy_context() */ | ||
148 | |||
149 | /*****************************************************************************/ | ||
150 | /* | ||
151 | * display the MMU context currently a process is currently using | ||
152 | */ | ||
153 | #ifdef CONFIG_PROC_FS | ||
154 | char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer) | ||
155 | { | ||
156 | spin_lock(&cxn_owners_lock); | ||
157 | buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); | ||
158 | spin_unlock(&cxn_owners_lock); | ||
159 | |||
160 | return buffer; | ||
161 | } /* end proc_pid_status_frv_cxnr() */ | ||
162 | #endif | ||
163 | |||
164 | /*****************************************************************************/ | ||
165 | /* | ||
166 | * (un)pin a process's mm_struct's MMU context ID | ||
167 | */ | ||
168 | int cxn_pin_by_pid(pid_t pid) | ||
169 | { | ||
170 | struct task_struct *tsk; | ||
171 | struct mm_struct *mm = NULL; | ||
172 | int ret; | ||
173 | |||
174 | /* unpin if pid is zero */ | ||
175 | if (pid == 0) { | ||
176 | cxn_pinned = -1; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | ret = -ESRCH; | ||
181 | |||
182 | /* get a handle on the mm_struct */ | ||
183 | read_lock(&tasklist_lock); | ||
184 | tsk = find_task_by_pid(pid); | ||
185 | if (tsk) { | ||
186 | ret = -EINVAL; | ||
187 | |||
188 | task_lock(tsk); | ||
189 | if (tsk->mm) { | ||
190 | mm = tsk->mm; | ||
191 | atomic_inc(&mm->mm_users); | ||
192 | ret = 0; | ||
193 | } | ||
194 | task_unlock(tsk); | ||
195 | } | ||
196 | read_unlock(&tasklist_lock); | ||
197 | |||
198 | if (ret < 0) | ||
199 | return ret; | ||
200 | |||
201 | /* make sure it has a CXN and pin it */ | ||
202 | spin_lock(&cxn_owners_lock); | ||
203 | cxn_pinned = get_cxn(&mm->context); | ||
204 | spin_unlock(&cxn_owners_lock); | ||
205 | |||
206 | mmput(mm); | ||
207 | return 0; | ||
208 | } /* end cxn_pin_by_pid() */ | ||
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c new file mode 100644 index 000000000000..4eaec0f3525b --- /dev/null +++ b/arch/frv/mm/pgalloc.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* pgalloc.c: page directory & page table allocation | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/highmem.h> | ||
16 | #include <asm/pgalloc.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | |||
20 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); | ||
21 | kmem_cache_t *pgd_cache; | ||
22 | |||
23 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
24 | { | ||
25 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
26 | if (pte) | ||
27 | clear_page(pte); | ||
28 | return pte; | ||
29 | } | ||
30 | |||
31 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
32 | { | ||
33 | struct page *page; | ||
34 | |||
35 | #ifdef CONFIG_HIGHPTE | ||
36 | page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); | ||
37 | #else | ||
38 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | ||
39 | #endif | ||
40 | if (page) | ||
41 | clear_highpage(page); | ||
42 | flush_dcache_page(page); | ||
43 | return page; | ||
44 | } | ||
45 | |||
46 | void __set_pmd(pmd_t *pmdptr, unsigned long pmd) | ||
47 | { | ||
48 | unsigned long *__ste_p = pmdptr->ste; | ||
49 | int loop; | ||
50 | |||
51 | if (!pmd) { | ||
52 | memset(__ste_p, 0, PME_SIZE); | ||
53 | } | ||
54 | else { | ||
55 | BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe)); | ||
56 | |||
57 | for (loop = PME_SIZE; loop > 0; loop -= 4) { | ||
58 | *__ste_p++ = pmd; | ||
59 | pmd += __frv_PT_SIZE; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1)); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * List of all pgd's needed for non-PAE so it can invalidate entries | ||
68 | * in both cached and uncached pgd's; not needed for PAE since the | ||
69 | * kernel pmd is shared. If PAE were not to share the pmd a similar | ||
70 | * tactic would be needed. This is essentially codepath-based locking | ||
71 | * against pageattr.c; it is the unique case in which a valid change | ||
72 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
73 | * vmalloc faults work because attached pagetables are never freed. | ||
74 | * If the locking proves to be non-performant, a ticketing scheme with | ||
75 | * checks at dup_mmap(), exec(), and other mmlist addition points | ||
76 | * could be used. The locking scheme was chosen on the basis of | ||
77 | * manfred's recommendations and having no core impact whatsoever. | ||
78 | * -- wli | ||
79 | */ | ||
80 | DEFINE_SPINLOCK(pgd_lock); | ||
81 | struct page *pgd_list; | ||
82 | |||
83 | static inline void pgd_list_add(pgd_t *pgd) | ||
84 | { | ||
85 | struct page *page = virt_to_page(pgd); | ||
86 | page->index = (unsigned long) pgd_list; | ||
87 | if (pgd_list) | ||
88 | pgd_list->private = (unsigned long) &page->index; | ||
89 | pgd_list = page; | ||
90 | page->private = (unsigned long) &pgd_list; | ||
91 | } | ||
92 | |||
93 | static inline void pgd_list_del(pgd_t *pgd) | ||
94 | { | ||
95 | struct page *next, **pprev, *page = virt_to_page(pgd); | ||
96 | next = (struct page *) page->index; | ||
97 | pprev = (struct page **) page->private; | ||
98 | *pprev = next; | ||
99 | if (next) | ||
100 | next->private = (unsigned long) pprev; | ||
101 | } | ||
102 | |||
103 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | |||
107 | if (PTRS_PER_PMD == 1) | ||
108 | spin_lock_irqsave(&pgd_lock, flags); | ||
109 | |||
110 | memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4, | ||
111 | swapper_pg_dir + USER_PGDS_IN_LAST_PML4, | ||
112 | (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t)); | ||
113 | |||
114 | if (PTRS_PER_PMD > 1) | ||
115 | return; | ||
116 | |||
117 | pgd_list_add(pgd); | ||
118 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
119 | memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t)); | ||
120 | } | ||
121 | |||
122 | /* never called when PTRS_PER_PMD > 1 */ | ||
123 | void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) | ||
124 | { | ||
125 | unsigned long flags; /* can be called from interrupt context */ | ||
126 | |||
127 | spin_lock_irqsave(&pgd_lock, flags); | ||
128 | pgd_list_del(pgd); | ||
129 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
130 | } | ||
131 | |||
132 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
133 | { | ||
134 | pgd_t *pgd; | ||
135 | |||
136 | pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | ||
137 | if (!pgd) | ||
138 | return pgd; | ||
139 | |||
140 | return pgd; | ||
141 | } | ||
142 | |||
143 | void pgd_free(pgd_t *pgd) | ||
144 | { | ||
145 | /* in the non-PAE case, clear_page_tables() clears user pgd entries */ | ||
146 | kmem_cache_free(pgd_cache, pgd); | ||
147 | } | ||
148 | |||
149 | void __init pgtable_cache_init(void) | ||
150 | { | ||
151 | pgd_cache = kmem_cache_create("pgd", | ||
152 | PTRS_PER_PGD * sizeof(pgd_t), | ||
153 | PTRS_PER_PGD * sizeof(pgd_t), | ||
154 | 0, | ||
155 | pgd_ctor, | ||
156 | pgd_dtor); | ||
157 | if (!pgd_cache) | ||
158 | panic("pgtable_cache_init(): Cannot create pgd cache"); | ||
159 | } | ||
diff --git a/arch/frv/mm/tlb-flush.S b/arch/frv/mm/tlb-flush.S new file mode 100644 index 000000000000..6f43c74c5d95 --- /dev/null +++ b/arch/frv/mm/tlb-flush.S | |||
@@ -0,0 +1,185 @@ | |||
1 | /* tlb-flush.S: TLB flushing routines | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/ptrace.h> | ||
17 | #include <asm/spr-regs.h> | ||
18 | |||
19 | .macro DEBUG ch | ||
20 | # sethi.p %hi(0xfeff9c00),gr4 | ||
21 | # setlo %lo(0xfeff9c00),gr4 | ||
22 | # setlos #\ch,gr5 | ||
23 | # stbi gr5,@(gr4,#0) | ||
24 | # membar | ||
25 | .endm | ||
26 | |||
27 | .section .rodata | ||
28 | |||
29 | # sizes corresponding to TPXR.LMAX | ||
30 | .balign 1 | ||
31 | __tlb_lmax_sizes: | ||
32 | .byte 0, 64, 0, 0 | ||
33 | .byte 0, 0, 0, 0 | ||
34 | .byte 0, 0, 0, 0 | ||
35 | .byte 0, 0, 0, 0 | ||
36 | |||
37 | .section .text | ||
38 | .balign 4 | ||
39 | |||
40 | ############################################################################### | ||
41 | # | ||
42 | # flush everything | ||
43 | # - void __flush_tlb_all(void) | ||
44 | # | ||
45 | ############################################################################### | ||
46 | .globl __flush_tlb_all | ||
47 | .type __flush_tlb_all,@function | ||
48 | __flush_tlb_all: | ||
49 | DEBUG 'A' | ||
50 | |||
51 | # kill cached PGE value | ||
52 | setlos #0xffffffff,gr4 | ||
53 | movgs gr4,scr0 | ||
54 | movgs gr4,scr1 | ||
55 | |||
56 | # kill AMPR-cached TLB values | ||
57 | movgs gr0,iamlr1 | ||
58 | movgs gr0,iampr1 | ||
59 | movgs gr0,damlr1 | ||
60 | movgs gr0,dampr1 | ||
61 | |||
62 | # find out how many lines there are | ||
63 | movsg tpxr,gr5 | ||
64 | sethi.p %hi(__tlb_lmax_sizes),gr4 | ||
65 | srli gr5,#TPXR_LMAX_SHIFT,gr5 | ||
66 | setlo.p %lo(__tlb_lmax_sizes),gr4 | ||
67 | andi gr5,#TPXR_LMAX_SMASK,gr5 | ||
68 | ldub @(gr4,gr5),gr4 | ||
69 | |||
70 | # now, we assume that the TLB line step is page size in size | ||
71 | setlos.p #PAGE_SIZE,gr5 | ||
72 | setlos #0,gr6 | ||
73 | 1: | ||
74 | tlbpr gr6,gr0,#6,#0 | ||
75 | subicc.p gr4,#1,gr4,icc0 | ||
76 | add gr6,gr5,gr6 | ||
77 | bne icc0,#2,1b | ||
78 | |||
79 | DEBUG 'B' | ||
80 | bralr | ||
81 | |||
82 | .size __flush_tlb_all, .-__flush_tlb_all | ||
83 | |||
84 | ############################################################################### | ||
85 | # | ||
86 | # flush everything to do with one context | ||
87 | # - void __flush_tlb_mm(unsigned long contextid [GR8]) | ||
88 | # | ||
89 | ############################################################################### | ||
90 | .globl __flush_tlb_mm | ||
91 | .type __flush_tlb_mm,@function | ||
92 | __flush_tlb_mm: | ||
93 | DEBUG 'M' | ||
94 | |||
95 | # kill cached PGE value | ||
96 | setlos #0xffffffff,gr4 | ||
97 | movgs gr4,scr0 | ||
98 | movgs gr4,scr1 | ||
99 | |||
100 | # specify the context we want to flush | ||
101 | movgs gr8,tplr | ||
102 | |||
103 | # find out how many lines there are | ||
104 | movsg tpxr,gr5 | ||
105 | sethi.p %hi(__tlb_lmax_sizes),gr4 | ||
106 | srli gr5,#TPXR_LMAX_SHIFT,gr5 | ||
107 | setlo.p %lo(__tlb_lmax_sizes),gr4 | ||
108 | andi gr5,#TPXR_LMAX_SMASK,gr5 | ||
109 | ldub @(gr4,gr5),gr4 | ||
110 | |||
111 | # now, we assume that the TLB line step is page size in size | ||
112 | setlos.p #PAGE_SIZE,gr5 | ||
113 | setlos #0,gr6 | ||
114 | 0: | ||
115 | tlbpr gr6,gr0,#5,#0 | ||
116 | subicc.p gr4,#1,gr4,icc0 | ||
117 | add gr6,gr5,gr6 | ||
118 | bne icc0,#2,0b | ||
119 | |||
120 | DEBUG 'N' | ||
121 | bralr | ||
122 | |||
123 | .size __flush_tlb_mm, .-__flush_tlb_mm | ||
124 | |||
125 | ############################################################################### | ||
126 | # | ||
127 | # flush a range of addresses from the TLB | ||
128 | # - void __flush_tlb_page(unsigned long contextid [GR8], | ||
129 | # unsigned long start [GR9]) | ||
130 | # | ||
131 | ############################################################################### | ||
132 | .globl __flush_tlb_page | ||
133 | .type __flush_tlb_page,@function | ||
134 | __flush_tlb_page: | ||
135 | # kill cached PGE value | ||
136 | setlos #0xffffffff,gr4 | ||
137 | movgs gr4,scr0 | ||
138 | movgs gr4,scr1 | ||
139 | |||
140 | # specify the context we want to flush | ||
141 | movgs gr8,tplr | ||
142 | |||
143 | # zap the matching TLB line and AMR values | ||
144 | setlos #~(PAGE_SIZE-1),gr5 | ||
145 | and gr9,gr5,gr9 | ||
146 | tlbpr gr9,gr0,#5,#0 | ||
147 | |||
148 | bralr | ||
149 | |||
150 | .size __flush_tlb_page, .-__flush_tlb_page | ||
151 | |||
152 | ############################################################################### | ||
153 | # | ||
154 | # flush a range of addresses from the TLB | ||
155 | # - void __flush_tlb_range(unsigned long contextid [GR8], | ||
156 | # unsigned long start [GR9], | ||
157 | # unsigned long end [GR10]) | ||
158 | # | ||
159 | ############################################################################### | ||
160 | .globl __flush_tlb_range | ||
161 | .type __flush_tlb_range,@function | ||
162 | __flush_tlb_range: | ||
163 | # kill cached PGE value | ||
164 | setlos #0xffffffff,gr4 | ||
165 | movgs gr4,scr0 | ||
166 | movgs gr4,scr1 | ||
167 | |||
168 | # specify the context we want to flush | ||
169 | movgs gr8,tplr | ||
170 | |||
171 | # round the start down to beginning of TLB line and end up to beginning of next TLB line | ||
172 | setlos.p #~(PAGE_SIZE-1),gr5 | ||
173 | setlos #PAGE_SIZE,gr6 | ||
174 | subi.p gr10,#1,gr10 | ||
175 | and gr9,gr5,gr9 | ||
176 | and gr10,gr5,gr10 | ||
177 | 2: | ||
178 | tlbpr gr9,gr0,#5,#0 | ||
179 | subcc.p gr9,gr10,gr0,icc0 | ||
180 | add gr9,gr6,gr9 | ||
181 | bne icc0,#0,2b ; most likely a 1-page flush | ||
182 | |||
183 | bralr | ||
184 | |||
185 | .size __flush_tlb_range, .-__flush_tlb_range | ||
diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S new file mode 100644 index 000000000000..8729f7d7c6e0 --- /dev/null +++ b/arch/frv/mm/tlb-miss.S | |||
@@ -0,0 +1,631 @@ | |||
1 | /* tlb-miss.S: TLB miss handlers | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sys.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/highmem.h> | ||
18 | #include <asm/spr-regs.h> | ||
19 | |||
20 | .section .text | ||
21 | .balign 4 | ||
22 | |||
23 | .globl __entry_insn_mmu_miss | ||
24 | __entry_insn_mmu_miss: | ||
25 | break | ||
26 | nop | ||
27 | |||
28 | .globl __entry_insn_mmu_exception | ||
29 | __entry_insn_mmu_exception: | ||
30 | break | ||
31 | nop | ||
32 | |||
33 | .globl __entry_data_mmu_miss | ||
34 | __entry_data_mmu_miss: | ||
35 | break | ||
36 | nop | ||
37 | |||
38 | .globl __entry_data_mmu_exception | ||
39 | __entry_data_mmu_exception: | ||
40 | break | ||
41 | nop | ||
42 | |||
43 | ############################################################################### | ||
44 | # | ||
45 | # handle a lookup failure of one sort or another in a kernel TLB handler | ||
46 | # On entry: | ||
47 | # GR29 - faulting address | ||
48 | # SCR2 - saved CCR | ||
49 | # | ||
50 | ############################################################################### | ||
51 | .type __tlb_kernel_fault,@function | ||
52 | __tlb_kernel_fault: | ||
53 | # see if we're supposed to re-enable single-step mode upon return | ||
54 | sethi.p %hi(__break_tlb_miss_return_break),gr30 | ||
55 | setlo %lo(__break_tlb_miss_return_break),gr30 | ||
56 | movsg pcsr,gr31 | ||
57 | |||
58 | subcc gr31,gr30,gr0,icc0 | ||
59 | beq icc0,#0,__tlb_kernel_fault_sstep | ||
60 | |||
61 | movsg scr2,gr30 | ||
62 | movgs gr30,ccr | ||
63 | movgs gr29,scr2 /* save EAR0 value */ | ||
64 | sethi.p %hi(__kernel_current_task),gr29 | ||
65 | setlo %lo(__kernel_current_task),gr29 | ||
66 | ldi.p @(gr29,#0),gr29 /* restore GR29 */ | ||
67 | |||
68 | bra __entry_kernel_handle_mmu_fault | ||
69 | |||
70 | # we've got to re-enable single-stepping | ||
71 | __tlb_kernel_fault_sstep: | ||
72 | sethi.p %hi(__break_tlb_miss_real_return_info),gr30 | ||
73 | setlo %lo(__break_tlb_miss_real_return_info),gr30 | ||
74 | lddi @(gr30,0),gr30 | ||
75 | movgs gr30,pcsr | ||
76 | movgs gr31,psr | ||
77 | |||
78 | movsg scr2,gr30 | ||
79 | movgs gr30,ccr | ||
80 | movgs gr29,scr2 /* save EAR0 value */ | ||
81 | sethi.p %hi(__kernel_current_task),gr29 | ||
82 | setlo %lo(__kernel_current_task),gr29 | ||
83 | ldi.p @(gr29,#0),gr29 /* restore GR29 */ | ||
84 | bra __entry_kernel_handle_mmu_fault_sstep | ||
85 | |||
86 | .size __tlb_kernel_fault, .-__tlb_kernel_fault | ||
87 | |||
88 | ############################################################################### | ||
89 | # | ||
90 | # handle a lookup failure of one sort or another in a user TLB handler | ||
91 | # On entry: | ||
92 | # GR28 - faulting address | ||
93 | # SCR2 - saved CCR | ||
94 | # | ||
95 | ############################################################################### | ||
96 | .type __tlb_user_fault,@function | ||
97 | __tlb_user_fault: | ||
98 | # see if we're supposed to re-enable single-step mode upon return | ||
99 | sethi.p %hi(__break_tlb_miss_return_break),gr30 | ||
100 | setlo %lo(__break_tlb_miss_return_break),gr30 | ||
101 | movsg pcsr,gr31 | ||
102 | subcc gr31,gr30,gr0,icc0 | ||
103 | beq icc0,#0,__tlb_user_fault_sstep | ||
104 | |||
105 | movsg scr2,gr30 | ||
106 | movgs gr30,ccr | ||
107 | bra __entry_uspace_handle_mmu_fault | ||
108 | |||
109 | # we've got to re-enable single-stepping | ||
110 | __tlb_user_fault_sstep: | ||
111 | sethi.p %hi(__break_tlb_miss_real_return_info),gr30 | ||
112 | setlo %lo(__break_tlb_miss_real_return_info),gr30 | ||
113 | lddi @(gr30,0),gr30 | ||
114 | movgs gr30,pcsr | ||
115 | movgs gr31,psr | ||
116 | movsg scr2,gr30 | ||
117 | movgs gr30,ccr | ||
118 | bra __entry_uspace_handle_mmu_fault_sstep | ||
119 | |||
120 | .size __tlb_user_fault, .-__tlb_user_fault | ||
121 | |||
122 | ############################################################################### | ||
123 | # | ||
124 | # Kernel instruction TLB miss handler | ||
125 | # On entry: | ||
126 | # GR1 - kernel stack pointer | ||
127 | # GR28 - saved exception frame pointer | ||
128 | # GR29 - faulting address | ||
129 | # GR31 - EAR0 ^ SCR0 | ||
130 | # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff) | ||
131 | # DAMR3 - mapped page directory | ||
132 | # DAMR4 - mapped page table as matched by SCR0 | ||
133 | # | ||
134 | ############################################################################### | ||
135 | .globl __entry_kernel_insn_tlb_miss | ||
136 | .type __entry_kernel_insn_tlb_miss,@function | ||
137 | __entry_kernel_insn_tlb_miss: | ||
138 | #if 0 | ||
139 | sethi.p %hi(0xe1200004),gr30 | ||
140 | setlo %lo(0xe1200004),gr30 | ||
141 | st gr0,@(gr30,gr0) | ||
142 | sethi.p %hi(0xffc00100),gr30 | ||
143 | setlo %lo(0xffc00100),gr30 | ||
144 | sth gr30,@(gr30,gr0) | ||
145 | membar | ||
146 | #endif | ||
147 | |||
148 | movsg ccr,gr30 /* save CCR */ | ||
149 | movgs gr30,scr2 | ||
150 | |||
151 | # see if the cached page table mapping is appropriate | ||
152 | srlicc.p gr31,#26,gr0,icc0 | ||
153 | setlos 0x3ffc,gr30 | ||
154 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
155 | bne icc0,#0,__itlb_k_PTD_miss | ||
156 | |||
157 | __itlb_k_PTD_mapped: | ||
158 | # access the PTD with EAR0[25:14] | ||
159 | # - DAMLR4 points to the virtual address of the appropriate page table | ||
160 | # - the PTD holds 4096 PTEs | ||
161 | # - the PTD must be accessed uncached | ||
162 | # - the PTE must be marked accessed if it was valid | ||
163 | # | ||
164 | and gr31,gr30,gr31 | ||
165 | movsg damlr4,gr30 | ||
166 | add gr30,gr31,gr31 | ||
167 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
168 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
169 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
170 | beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */ | ||
171 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
172 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
173 | |||
174 | # we're using IAMR1 as an extra TLB entry | ||
175 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
176 | # - need to check DAMR1 lest we cause an multiple-DAT-hit exception | ||
177 | # - IAMPR1 has no WP bit, and we mustn't lose WP information | ||
178 | movsg iampr1,gr31 | ||
179 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
180 | setlos.p 0xfffff000,gr31 | ||
181 | beq icc0,#0,__itlb_k_nopunt /* punt not required */ | ||
182 | |||
183 | movsg iamlr1,gr31 | ||
184 | movgs gr31,tplr /* set TPLR.CXN */ | ||
185 | tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */ | ||
186 | |||
187 | movsg dampr1,gr31 | ||
188 | ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */ | ||
189 | movgs gr31,tppr | ||
190 | movsg iamlr1,gr31 /* set TPLR.CXN */ | ||
191 | movgs gr31,tplr | ||
192 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
193 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
194 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
195 | setlos #0xfffff000,gr31 | ||
196 | bne icc0,#0,__tlb_kernel_fault | ||
197 | |||
198 | __itlb_k_nopunt: | ||
199 | |||
200 | # assemble the new TLB entry | ||
201 | and gr29,gr31,gr29 | ||
202 | movsg cxnr,gr31 | ||
203 | or gr29,gr31,gr29 | ||
204 | movgs gr29,iamlr1 /* xAMLR = address | context number */ | ||
205 | movgs gr30,iampr1 | ||
206 | movgs gr29,damlr1 | ||
207 | movgs gr30,dampr1 | ||
208 | |||
209 | # return, restoring registers | ||
210 | movsg scr2,gr30 | ||
211 | movgs gr30,ccr | ||
212 | sethi.p %hi(__kernel_current_task),gr29 | ||
213 | setlo %lo(__kernel_current_task),gr29 | ||
214 | ldi @(gr29,#0),gr29 | ||
215 | rett #0 | ||
216 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
217 | |||
218 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
219 | # appropriate page table and map that instead | ||
220 | # - access the PGD with EAR0[31:26] | ||
221 | # - DAMLR3 points to the virtual address of the page directory | ||
222 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
223 | __itlb_k_PTD_miss: | ||
224 | srli gr29,#26,gr31 /* calculate PGE offset */ | ||
225 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
226 | |||
227 | movsg damlr3,gr30 | ||
228 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
229 | |||
230 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
231 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
232 | |||
233 | # map this PTD instead and record coverage address | ||
234 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
235 | beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */ | ||
236 | slli.p gr31,#18,gr31 | ||
237 | bne icc1,#0,__itlb_k_bigpage | ||
238 | movgs gr30,dampr4 | ||
239 | movgs gr31,scr0 | ||
240 | |||
241 | # we can now resume normal service | ||
242 | setlos 0x3ffc,gr30 | ||
243 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
244 | bra __itlb_k_PTD_mapped | ||
245 | |||
246 | __itlb_k_bigpage: | ||
247 | break | ||
248 | nop | ||
249 | |||
250 | .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss | ||
251 | |||
252 | ############################################################################### | ||
253 | # | ||
254 | # Kernel data TLB miss handler | ||
255 | # On entry: | ||
256 | # GR1 - kernel stack pointer | ||
257 | # GR28 - saved exception frame pointer | ||
258 | # GR29 - faulting address | ||
259 | # GR31 - EAR0 ^ SCR1 | ||
260 | # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff) | ||
261 | # DAMR3 - mapped page directory | ||
262 | # DAMR5 - mapped page table as matched by SCR1 | ||
263 | # | ||
264 | ############################################################################### | ||
265 | .globl __entry_kernel_data_tlb_miss | ||
266 | .type __entry_kernel_data_tlb_miss,@function | ||
267 | __entry_kernel_data_tlb_miss: | ||
268 | #if 0 | ||
269 | sethi.p %hi(0xe1200004),gr30 | ||
270 | setlo %lo(0xe1200004),gr30 | ||
271 | st gr0,@(gr30,gr0) | ||
272 | sethi.p %hi(0xffc00100),gr30 | ||
273 | setlo %lo(0xffc00100),gr30 | ||
274 | sth gr30,@(gr30,gr0) | ||
275 | membar | ||
276 | #endif | ||
277 | |||
278 | movsg ccr,gr30 /* save CCR */ | ||
279 | movgs gr30,scr2 | ||
280 | |||
281 | # see if the cached page table mapping is appropriate | ||
282 | srlicc.p gr31,#26,gr0,icc0 | ||
283 | setlos 0x3ffc,gr30 | ||
284 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
285 | bne icc0,#0,__dtlb_k_PTD_miss | ||
286 | |||
287 | __dtlb_k_PTD_mapped: | ||
288 | # access the PTD with EAR0[25:14] | ||
289 | # - DAMLR5 points to the virtual address of the appropriate page table | ||
290 | # - the PTD holds 4096 PTEs | ||
291 | # - the PTD must be accessed uncached | ||
292 | # - the PTE must be marked accessed if it was valid | ||
293 | # | ||
294 | and gr31,gr30,gr31 | ||
295 | movsg damlr5,gr30 | ||
296 | add gr30,gr31,gr31 | ||
297 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
298 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
299 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
300 | beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */ | ||
301 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
302 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
303 | |||
304 | # we're using DAMR1 as an extra TLB entry | ||
305 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
306 | # - need to check IAMR1 lest we cause an multiple-DAT-hit exception | ||
307 | movsg dampr1,gr31 | ||
308 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
309 | setlos.p 0xfffff000,gr31 | ||
310 | beq icc0,#0,__dtlb_k_nopunt /* punt not required */ | ||
311 | |||
312 | movsg damlr1,gr31 | ||
313 | movgs gr31,tplr /* set TPLR.CXN */ | ||
314 | tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */ | ||
315 | |||
316 | movsg dampr1,gr31 | ||
317 | ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */ | ||
318 | movgs gr31,tppr | ||
319 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
320 | movgs gr31,tplr | ||
321 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
322 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
323 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
324 | setlos #0xfffff000,gr31 | ||
325 | bne icc0,#0,__tlb_kernel_fault | ||
326 | |||
327 | __dtlb_k_nopunt: | ||
328 | |||
329 | # assemble the new TLB entry | ||
330 | and gr29,gr31,gr29 | ||
331 | movsg cxnr,gr31 | ||
332 | or gr29,gr31,gr29 | ||
333 | movgs gr29,iamlr1 /* xAMLR = address | context number */ | ||
334 | movgs gr30,iampr1 | ||
335 | movgs gr29,damlr1 | ||
336 | movgs gr30,dampr1 | ||
337 | |||
338 | # return, restoring registers | ||
339 | movsg scr2,gr30 | ||
340 | movgs gr30,ccr | ||
341 | sethi.p %hi(__kernel_current_task),gr29 | ||
342 | setlo %lo(__kernel_current_task),gr29 | ||
343 | ldi @(gr29,#0),gr29 | ||
344 | rett #0 | ||
345 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
346 | |||
347 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
348 | # appropriate page table and map that instead | ||
349 | # - access the PGD with EAR0[31:26] | ||
350 | # - DAMLR3 points to the virtual address of the page directory | ||
351 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
352 | __dtlb_k_PTD_miss: | ||
353 | srli gr29,#26,gr31 /* calculate PGE offset */ | ||
354 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
355 | |||
356 | movsg damlr3,gr30 | ||
357 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
358 | |||
359 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
360 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
361 | |||
362 | # map this PTD instead and record coverage address | ||
363 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
364 | beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */ | ||
365 | slli.p gr31,#18,gr31 | ||
366 | bne icc1,#0,__dtlb_k_bigpage | ||
367 | movgs gr30,dampr5 | ||
368 | movgs gr31,scr1 | ||
369 | |||
370 | # we can now resume normal service | ||
371 | setlos 0x3ffc,gr30 | ||
372 | srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
373 | bra __dtlb_k_PTD_mapped | ||
374 | |||
375 | __dtlb_k_bigpage: | ||
376 | break | ||
377 | nop | ||
378 | |||
379 | .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss | ||
380 | |||
381 | ############################################################################### | ||
382 | # | ||
383 | # Userspace instruction TLB miss handler (with PGE prediction) | ||
384 | # On entry: | ||
385 | # GR28 - faulting address | ||
386 | # GR31 - EAR0 ^ SCR0 | ||
387 | # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff) | ||
388 | # DAMR3 - mapped page directory | ||
389 | # DAMR4 - mapped page table as matched by SCR0 | ||
390 | # | ||
391 | ############################################################################### | ||
392 | .globl __entry_user_insn_tlb_miss | ||
393 | .type __entry_user_insn_tlb_miss,@function | ||
394 | __entry_user_insn_tlb_miss: | ||
395 | #if 0 | ||
396 | sethi.p %hi(0xe1200004),gr30 | ||
397 | setlo %lo(0xe1200004),gr30 | ||
398 | st gr0,@(gr30,gr0) | ||
399 | sethi.p %hi(0xffc00100),gr30 | ||
400 | setlo %lo(0xffc00100),gr30 | ||
401 | sth gr30,@(gr30,gr0) | ||
402 | membar | ||
403 | #endif | ||
404 | |||
405 | movsg ccr,gr30 /* save CCR */ | ||
406 | movgs gr30,scr2 | ||
407 | |||
408 | # see if the cached page table mapping is appropriate | ||
409 | srlicc.p gr31,#26,gr0,icc0 | ||
410 | setlos 0x3ffc,gr30 | ||
411 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
412 | bne icc0,#0,__itlb_u_PTD_miss | ||
413 | |||
414 | __itlb_u_PTD_mapped: | ||
415 | # access the PTD with EAR0[25:14] | ||
416 | # - DAMLR4 points to the virtual address of the appropriate page table | ||
417 | # - the PTD holds 4096 PTEs | ||
418 | # - the PTD must be accessed uncached | ||
419 | # - the PTE must be marked accessed if it was valid | ||
420 | # | ||
421 | and gr31,gr30,gr31 | ||
422 | movsg damlr4,gr30 | ||
423 | add gr30,gr31,gr31 | ||
424 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
425 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
426 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
427 | beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */ | ||
428 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
429 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
430 | |||
431 | # we're using IAMR1/DAMR1 as an extra TLB entry | ||
432 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
433 | movsg dampr1,gr31 | ||
434 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
435 | setlos.p 0xfffff000,gr31 | ||
436 | beq icc0,#0,__itlb_u_nopunt /* punt not required */ | ||
437 | |||
438 | movsg dampr1,gr31 | ||
439 | movgs gr31,tppr | ||
440 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
441 | movgs gr31,tplr | ||
442 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
443 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
444 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
445 | setlos #0xfffff000,gr31 | ||
446 | bne icc0,#0,__tlb_user_fault | ||
447 | |||
448 | __itlb_u_nopunt: | ||
449 | |||
450 | # assemble the new TLB entry | ||
451 | and gr28,gr31,gr28 | ||
452 | movsg cxnr,gr31 | ||
453 | or gr28,gr31,gr28 | ||
454 | movgs gr28,iamlr1 /* xAMLR = address | context number */ | ||
455 | movgs gr30,iampr1 | ||
456 | movgs gr28,damlr1 | ||
457 | movgs gr30,dampr1 | ||
458 | |||
459 | # return, restoring registers | ||
460 | movsg scr2,gr30 | ||
461 | movgs gr30,ccr | ||
462 | rett #0 | ||
463 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
464 | |||
465 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
466 | # appropriate page table and map that instead | ||
467 | # - access the PGD with EAR0[31:26] | ||
468 | # - DAMLR3 points to the virtual address of the page directory | ||
469 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
470 | __itlb_u_PTD_miss: | ||
471 | srli gr28,#26,gr31 /* calculate PGE offset */ | ||
472 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
473 | |||
474 | movsg damlr3,gr30 | ||
475 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
476 | |||
477 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
478 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
479 | |||
480 | # map this PTD instead and record coverage address | ||
481 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
482 | beq icc0,#0,__tlb_user_fault /* jump if PGE not present */ | ||
483 | slli.p gr31,#18,gr31 | ||
484 | bne icc1,#0,__itlb_u_bigpage | ||
485 | movgs gr30,dampr4 | ||
486 | movgs gr31,scr0 | ||
487 | |||
488 | # we can now resume normal service | ||
489 | setlos 0x3ffc,gr30 | ||
490 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
491 | bra __itlb_u_PTD_mapped | ||
492 | |||
493 | __itlb_u_bigpage: | ||
494 | break | ||
495 | nop | ||
496 | |||
497 | .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss | ||
498 | |||
499 | ############################################################################### | ||
500 | # | ||
501 | # Userspace data TLB miss handler | ||
502 | # On entry: | ||
503 | # GR28 - faulting address | ||
504 | # GR31 - EAR0 ^ SCR1 | ||
505 | # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff) | ||
506 | # DAMR3 - mapped page directory | ||
507 | # DAMR5 - mapped page table as matched by SCR1 | ||
508 | # | ||
509 | ############################################################################### | ||
510 | .globl __entry_user_data_tlb_miss | ||
511 | .type __entry_user_data_tlb_miss,@function | ||
512 | __entry_user_data_tlb_miss: | ||
513 | #if 0 | ||
514 | sethi.p %hi(0xe1200004),gr30 | ||
515 | setlo %lo(0xe1200004),gr30 | ||
516 | st gr0,@(gr30,gr0) | ||
517 | sethi.p %hi(0xffc00100),gr30 | ||
518 | setlo %lo(0xffc00100),gr30 | ||
519 | sth gr30,@(gr30,gr0) | ||
520 | membar | ||
521 | #endif | ||
522 | |||
523 | movsg ccr,gr30 /* save CCR */ | ||
524 | movgs gr30,scr2 | ||
525 | |||
526 | # see if the cached page table mapping is appropriate | ||
527 | srlicc.p gr31,#26,gr0,icc0 | ||
528 | setlos 0x3ffc,gr30 | ||
529 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
530 | bne icc0,#0,__dtlb_u_PTD_miss | ||
531 | |||
532 | __dtlb_u_PTD_mapped: | ||
533 | # access the PTD with EAR0[25:14] | ||
534 | # - DAMLR5 points to the virtual address of the appropriate page table | ||
535 | # - the PTD holds 4096 PTEs | ||
536 | # - the PTD must be accessed uncached | ||
537 | # - the PTE must be marked accessed if it was valid | ||
538 | # | ||
539 | and gr31,gr30,gr31 | ||
540 | movsg damlr5,gr30 | ||
541 | |||
542 | __dtlb_u_using_iPTD: | ||
543 | add gr30,gr31,gr31 | ||
544 | ldi @(gr31,#0),gr30 /* fetch the PTE */ | ||
545 | andicc gr30,#_PAGE_PRESENT,gr0,icc0 | ||
546 | ori.p gr30,#_PAGE_ACCESSED,gr30 | ||
547 | beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */ | ||
548 | sti.p gr30,@(gr31,#0) /* update the PTE */ | ||
549 | andi gr30,#~_PAGE_ACCESSED,gr30 | ||
550 | |||
551 | # we're using DAMR1 as an extra TLB entry | ||
552 | # - punt the entry here (if valid) to the real TLB and then replace with the new PTE | ||
553 | movsg dampr1,gr31 | ||
554 | andicc gr31,#xAMPRx_V,gr0,icc0 | ||
555 | setlos.p 0xfffff000,gr31 | ||
556 | beq icc0,#0,__dtlb_u_nopunt /* punt not required */ | ||
557 | |||
558 | movsg dampr1,gr31 | ||
559 | movgs gr31,tppr | ||
560 | movsg damlr1,gr31 /* set TPLR.CXN */ | ||
561 | movgs gr31,tplr | ||
562 | tlbpr gr31,gr0,#2,#0 /* save to the TLB */ | ||
563 | movsg tpxr,gr31 /* check the TLB write error flag */ | ||
564 | andicc.p gr31,#TPXR_E,gr0,icc0 | ||
565 | setlos #0xfffff000,gr31 | ||
566 | bne icc0,#0,__tlb_user_fault | ||
567 | |||
568 | __dtlb_u_nopunt: | ||
569 | |||
570 | # assemble the new TLB entry | ||
571 | and gr28,gr31,gr28 | ||
572 | movsg cxnr,gr31 | ||
573 | or gr28,gr31,gr28 | ||
574 | movgs gr28,iamlr1 /* xAMLR = address | context number */ | ||
575 | movgs gr30,iampr1 | ||
576 | movgs gr28,damlr1 | ||
577 | movgs gr30,dampr1 | ||
578 | |||
579 | # return, restoring registers | ||
580 | movsg scr2,gr30 | ||
581 | movgs gr30,ccr | ||
582 | rett #0 | ||
583 | beq icc0,#3,0 /* prevent icache prefetch */ | ||
584 | |||
585 | # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more | ||
586 | # appropriate page table and map that instead | ||
587 | # - first of all, check the insn PGE cache - we may well get a hit there | ||
588 | # - access the PGD with EAR0[31:26] | ||
589 | # - DAMLR3 points to the virtual address of the page directory | ||
590 | # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables | ||
591 | __dtlb_u_PTD_miss: | ||
592 | movsg scr0,gr31 /* consult the insn-PGE-cache key */ | ||
593 | xor gr28,gr31,gr31 | ||
594 | srlicc gr31,#26,gr0,icc0 | ||
595 | srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
596 | bne icc0,#0,__dtlb_u_iPGE_miss | ||
597 | |||
598 | # what we're looking for is covered by the insn-PGE-cache | ||
599 | setlos 0x3ffc,gr30 | ||
600 | and gr31,gr30,gr31 | ||
601 | movsg damlr4,gr30 | ||
602 | bra __dtlb_u_using_iPTD | ||
603 | |||
604 | __dtlb_u_iPGE_miss: | ||
605 | srli gr28,#26,gr31 /* calculate PGE offset */ | ||
606 | slli gr31,#8,gr31 /* and clear bottom bits */ | ||
607 | |||
608 | movsg damlr3,gr30 | ||
609 | ld @(gr31,gr30),gr30 /* access the PGE */ | ||
610 | |||
611 | andicc.p gr30,#_PAGE_PRESENT,gr0,icc0 | ||
612 | andicc gr30,#xAMPRx_SS,gr0,icc1 | ||
613 | |||
614 | # map this PTD instead and record coverage address | ||
615 | ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30 | ||
616 | beq icc0,#0,__tlb_user_fault /* jump if PGE not present */ | ||
617 | slli.p gr31,#18,gr31 | ||
618 | bne icc1,#0,__dtlb_u_bigpage | ||
619 | movgs gr30,dampr5 | ||
620 | movgs gr31,scr1 | ||
621 | |||
622 | # we can now resume normal service | ||
623 | setlos 0x3ffc,gr30 | ||
624 | srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */ | ||
625 | bra __dtlb_u_PTD_mapped | ||
626 | |||
627 | __dtlb_u_bigpage: | ||
628 | break | ||
629 | nop | ||
630 | |||
631 | .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss | ||
diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c new file mode 100644 index 000000000000..09b361443fc2 --- /dev/null +++ b/arch/frv/mm/unaligned.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only) | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/signal.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/user.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/linkage.h> | ||
21 | #include <linux/init.h> | ||
22 | |||
23 | #include <asm/setup.h> | ||
24 | #include <asm/system.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | |||
27 | #if 0 | ||
28 | #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) | ||
29 | #else | ||
30 | #define kdebug(fmt, ...) do {} while(0) | ||
31 | #endif | ||
32 | |||
33 | #define _MA_SIGNED 0x01 | ||
34 | #define _MA_HALF 0x02 | ||
35 | #define _MA_WORD 0x04 | ||
36 | #define _MA_DWORD 0x08 | ||
37 | #define _MA_SZ_MASK 0x0e | ||
38 | #define _MA_LOAD 0x10 | ||
39 | #define _MA_STORE 0x20 | ||
40 | #define _MA_UPDATE 0x40 | ||
41 | #define _MA_IMM 0x80 | ||
42 | |||
43 | #define _MA_LDxU _MA_LOAD | _MA_UPDATE | ||
44 | #define _MA_LDxI _MA_LOAD | _MA_IMM | ||
45 | #define _MA_STxU _MA_STORE | _MA_UPDATE | ||
46 | #define _MA_STxI _MA_STORE | _MA_IMM | ||
47 | |||
48 | static const uint8_t tbl_LDGRk_reg[0x40] = { | ||
49 | [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED, /* LDSH @(GRi,GRj),GRk */ | ||
50 | [0x03] = _MA_LOAD | _MA_HALF, /* LDUH @(GRi,GRj),GRk */ | ||
51 | [0x04] = _MA_LOAD | _MA_WORD, /* LD @(GRi,GRj),GRk */ | ||
52 | [0x05] = _MA_LOAD | _MA_DWORD, /* LDD @(GRi,GRj),GRk */ | ||
53 | [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED, /* LDSHU @(GRi,GRj),GRk */ | ||
54 | [0x13] = _MA_LDxU | _MA_HALF, /* LDUHU @(GRi,GRj),GRk */ | ||
55 | [0x14] = _MA_LDxU | _MA_WORD, /* LDU @(GRi,GRj),GRk */ | ||
56 | [0x15] = _MA_LDxU | _MA_DWORD, /* LDDU @(GRi,GRj),GRk */ | ||
57 | }; | ||
58 | |||
59 | static const uint8_t tbl_STGRk_reg[0x40] = { | ||
60 | [0x01] = _MA_STORE | _MA_HALF, /* STH @(GRi,GRj),GRk */ | ||
61 | [0x02] = _MA_STORE | _MA_WORD, /* ST @(GRi,GRj),GRk */ | ||
62 | [0x03] = _MA_STORE | _MA_DWORD, /* STD @(GRi,GRj),GRk */ | ||
63 | [0x11] = _MA_STxU | _MA_HALF, /* STHU @(GRi,GRj),GRk */ | ||
64 | [0x12] = _MA_STxU | _MA_WORD, /* STU @(GRi,GRj),GRk */ | ||
65 | [0x13] = _MA_STxU | _MA_DWORD, /* STDU @(GRi,GRj),GRk */ | ||
66 | }; | ||
67 | |||
68 | static const uint8_t tbl_LDSTGRk_imm[0x80] = { | ||
69 | [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED, /* LDSHI @(GRi,d12),GRk */ | ||
70 | [0x32] = _MA_LDxI | _MA_WORD, /* LDI @(GRi,d12),GRk */ | ||
71 | [0x33] = _MA_LDxI | _MA_DWORD, /* LDDI @(GRi,d12),GRk */ | ||
72 | [0x36] = _MA_LDxI | _MA_HALF, /* LDUHI @(GRi,d12),GRk */ | ||
73 | [0x51] = _MA_STxI | _MA_HALF, /* STHI @(GRi,d12),GRk */ | ||
74 | [0x52] = _MA_STxI | _MA_WORD, /* STI @(GRi,d12),GRk */ | ||
75 | [0x53] = _MA_STxI | _MA_DWORD, /* STDI @(GRi,d12),GRk */ | ||
76 | }; | ||
77 | |||
78 | |||
79 | /*****************************************************************************/ | ||
80 | /* | ||
81 | * see if we can handle the exception by fixing up a misaligned memory access | ||
82 | */ | ||
83 | int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0) | ||
84 | { | ||
85 | unsigned long insn, addr, *greg; | ||
86 | int GRi, GRj, GRk, D12, op; | ||
87 | |||
88 | union { | ||
89 | uint64_t _64; | ||
90 | uint32_t _32[2]; | ||
91 | uint16_t _16; | ||
92 | uint8_t _8[8]; | ||
93 | } x; | ||
94 | |||
95 | if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7)) | ||
96 | return -EAGAIN; | ||
97 | |||
98 | epcr0 &= EPCR0_PC; | ||
99 | |||
100 | if (__frame->pc != epcr0) { | ||
101 | kdebug("MISALIGN: Execution not halted on excepting instruction\n"); | ||
102 | BUG(); | ||
103 | } | ||
104 | |||
105 | if (__get_user(insn, (unsigned long *) epcr0) < 0) | ||
106 | return -EFAULT; | ||
107 | |||
108 | /* determine the instruction type first */ | ||
109 | switch ((insn >> 18) & 0x7f) { | ||
110 | case 0x2: | ||
111 | /* LDx @(GRi,GRj),GRk */ | ||
112 | op = tbl_LDGRk_reg[(insn >> 6) & 0x3f]; | ||
113 | break; | ||
114 | |||
115 | case 0x3: | ||
116 | /* STx GRk,@(GRi,GRj) */ | ||
117 | op = tbl_STGRk_reg[(insn >> 6) & 0x3f]; | ||
118 | break; | ||
119 | |||
120 | default: | ||
121 | op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f]; | ||
122 | break; | ||
123 | } | ||
124 | |||
125 | if (!op) | ||
126 | return -EAGAIN; | ||
127 | |||
128 | kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op); | ||
129 | |||
130 | memset(&x, 0xba, 8); | ||
131 | |||
132 | /* validate the instruction parameters */ | ||
133 | greg = (unsigned long *) &__frame->tbr; | ||
134 | |||
135 | GRi = (insn >> 12) & 0x3f; | ||
136 | GRk = (insn >> 25) & 0x3f; | ||
137 | |||
138 | if (GRi > 31 || GRk > 31) | ||
139 | return -ENOENT; | ||
140 | |||
141 | if (op & _MA_DWORD && GRk & 1) | ||
142 | return -EINVAL; | ||
143 | |||
144 | if (op & _MA_IMM) { | ||
145 | D12 = insn & 0xfff; | ||
146 | asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */ | ||
147 | addr = (GRi ? greg[GRi] : 0) + D12; | ||
148 | } | ||
149 | else { | ||
150 | GRj = (insn >> 0) & 0x3f; | ||
151 | if (GRj > 31) | ||
152 | return -ENOENT; | ||
153 | addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0); | ||
154 | } | ||
155 | |||
156 | if (addr != ear0) { | ||
157 | kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n", | ||
158 | addr, ear0); | ||
159 | return -EFAULT; | ||
160 | } | ||
161 | |||
162 | /* check the address is okay */ | ||
163 | if (user_mode(__frame) && ___range_ok(ear0, 8) < 0) | ||
164 | return -EFAULT; | ||
165 | |||
166 | /* perform the memory op */ | ||
167 | if (op & _MA_STORE) { | ||
168 | /* perform a store */ | ||
169 | x._32[0] = 0; | ||
170 | if (GRk != 0) { | ||
171 | if (op & _MA_HALF) { | ||
172 | x._16 = greg[GRk]; | ||
173 | } | ||
174 | else { | ||
175 | x._32[0] = greg[GRk]; | ||
176 | } | ||
177 | } | ||
178 | if (op & _MA_DWORD) | ||
179 | x._32[1] = greg[GRk + 1]; | ||
180 | |||
181 | kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n", | ||
182 | GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK); | ||
183 | |||
184 | if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0) | ||
185 | return -EFAULT; | ||
186 | } | ||
187 | else { | ||
188 | /* perform a load */ | ||
189 | if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0) | ||
190 | return -EFAULT; | ||
191 | |||
192 | if (op & _MA_HALF) { | ||
193 | if (op & _MA_SIGNED) | ||
194 | asm ("slli %0,#16,%0 ! srai %0,#16,%0" | ||
195 | : "=r"(x._32[0]) : "0"(x._16)); | ||
196 | else | ||
197 | asm ("sethi #0,%0" | ||
198 | : "=r"(x._32[0]) : "0"(x._16)); | ||
199 | } | ||
200 | |||
201 | kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n", | ||
202 | addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]); | ||
203 | |||
204 | if (GRk != 0) | ||
205 | greg[GRk] = x._32[0]; | ||
206 | if (op & _MA_DWORD) | ||
207 | greg[GRk + 1] = x._32[1]; | ||
208 | } | ||
209 | |||
210 | /* update the base pointer if required */ | ||
211 | if (op & _MA_UPDATE) | ||
212 | greg[GRi] = addr; | ||
213 | |||
214 | /* well... we've done that insn */ | ||
215 | __frame->pc = __frame->pc + 4; | ||
216 | |||
217 | return 0; | ||
218 | } /* end handle_misalignment() */ | ||