diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m68k/mm |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/Makefile | 8 | ||||
-rw-r--r-- | arch/m68k/mm/fault.c | 219 | ||||
-rw-r--r-- | arch/m68k/mm/hwtest.c | 85 | ||||
-rw-r--r-- | arch/m68k/mm/init.c | 147 | ||||
-rw-r--r-- | arch/m68k/mm/kmap.c | 361 | ||||
-rw-r--r-- | arch/m68k/mm/memory.c | 471 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 285 | ||||
-rw-r--r-- | arch/m68k/mm/sun3kmap.c | 156 | ||||
-rw-r--r-- | arch/m68k/mm/sun3mmu.c | 102 |
9 files changed, 1834 insertions, 0 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile new file mode 100644 index 000000000000..90f1c735c110 --- /dev/null +++ b/arch/m68k/mm/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the linux m68k-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o fault.o hwtest.o | ||
6 | |||
7 | obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o | ||
8 | obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o | ||
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c new file mode 100644 index 000000000000..ac48b6d2aff6 --- /dev/null +++ b/arch/m68k/mm/fault.c | |||
@@ -0,0 +1,219 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Hamish Macdonald | ||
5 | */ | ||
6 | |||
7 | #include <linux/mman.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/ptrace.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | #include <asm/setup.h> | ||
15 | #include <asm/traps.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/pgalloc.h> | ||
19 | |||
20 | extern void die_if_kernel(char *, struct pt_regs *, long); | ||
21 | extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */ | ||
22 | |||
23 | int send_fault_sig(struct pt_regs *regs) | ||
24 | { | ||
25 | siginfo_t siginfo = { 0, 0, 0, }; | ||
26 | |||
27 | siginfo.si_signo = current->thread.signo; | ||
28 | siginfo.si_code = current->thread.code; | ||
29 | siginfo.si_addr = (void *)current->thread.faddr; | ||
30 | #ifdef DEBUG | ||
31 | printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); | ||
32 | #endif | ||
33 | |||
34 | if (user_mode(regs)) { | ||
35 | force_sig_info(siginfo.si_signo, | ||
36 | &siginfo, current); | ||
37 | } else { | ||
38 | const struct exception_table_entry *fixup; | ||
39 | |||
40 | /* Are we prepared to handle this kernel fault? */ | ||
41 | if ((fixup = search_exception_tables(regs->pc))) { | ||
42 | struct pt_regs *tregs; | ||
43 | /* Create a new four word stack frame, discarding the old | ||
44 | one. */ | ||
45 | regs->stkadj = frame_extra_sizes[regs->format]; | ||
46 | tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); | ||
47 | tregs->vector = regs->vector; | ||
48 | tregs->format = 0; | ||
49 | tregs->pc = fixup->fixup; | ||
50 | tregs->sr = regs->sr; | ||
51 | return -1; | ||
52 | } | ||
53 | |||
54 | //if (siginfo.si_signo == SIGBUS) | ||
55 | // force_sig_info(siginfo.si_signo, | ||
56 | // &siginfo, current); | ||
57 | |||
58 | /* | ||
59 | * Oops. The kernel tried to access some bad page. We'll have to | ||
60 | * terminate things with extreme prejudice. | ||
61 | */ | ||
62 | if ((unsigned long)siginfo.si_addr < PAGE_SIZE) | ||
63 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
64 | else | ||
65 | printk(KERN_ALERT "Unable to handle kernel access"); | ||
66 | printk(" at virtual address %p\n", siginfo.si_addr); | ||
67 | die_if_kernel("Oops", regs, 0 /*error_code*/); | ||
68 | do_exit(SIGKILL); | ||
69 | } | ||
70 | |||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * This routine handles page faults. It determines the problem, and | ||
76 | * then passes it off to one of the appropriate routines. | ||
77 | * | ||
78 | * error_code: | ||
79 | * bit 0 == 0 means no page found, 1 means protection fault | ||
80 | * bit 1 == 0 means read, 1 means write | ||
81 | * | ||
82 | * If this routine detects a bad access, it returns 1, otherwise it | ||
83 | * returns 0. | ||
84 | */ | ||
85 | int do_page_fault(struct pt_regs *regs, unsigned long address, | ||
86 | unsigned long error_code) | ||
87 | { | ||
88 | struct mm_struct *mm = current->mm; | ||
89 | struct vm_area_struct * vma; | ||
90 | int write, fault; | ||
91 | |||
92 | #ifdef DEBUG | ||
93 | printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", | ||
94 | regs->sr, regs->pc, address, error_code, | ||
95 | current->mm->pgd); | ||
96 | #endif | ||
97 | |||
98 | /* | ||
99 | * If we're in an interrupt or have no user | ||
100 | * context, we must not take the fault.. | ||
101 | */ | ||
102 | if (in_interrupt() || !mm) | ||
103 | goto no_context; | ||
104 | |||
105 | down_read(&mm->mmap_sem); | ||
106 | |||
107 | vma = find_vma(mm, address); | ||
108 | if (!vma) | ||
109 | goto map_err; | ||
110 | if (vma->vm_flags & VM_IO) | ||
111 | goto acc_err; | ||
112 | if (vma->vm_start <= address) | ||
113 | goto good_area; | ||
114 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
115 | goto map_err; | ||
116 | if (user_mode(regs)) { | ||
117 | /* Accessing the stack below usp is always a bug. The | ||
118 | "+ 256" is there due to some instructions doing | ||
119 | pre-decrement on the stack and that doesn't show up | ||
120 | until later. */ | ||
121 | if (address + 256 < rdusp()) | ||
122 | goto map_err; | ||
123 | } | ||
124 | if (expand_stack(vma, address)) | ||
125 | goto map_err; | ||
126 | |||
127 | /* | ||
128 | * Ok, we have a good vm_area for this memory access, so | ||
129 | * we can handle it.. | ||
130 | */ | ||
131 | good_area: | ||
132 | #ifdef DEBUG | ||
133 | printk("do_page_fault: good_area\n"); | ||
134 | #endif | ||
135 | write = 0; | ||
136 | switch (error_code & 3) { | ||
137 | default: /* 3: write, present */ | ||
138 | /* fall through */ | ||
139 | case 2: /* write, not present */ | ||
140 | if (!(vma->vm_flags & VM_WRITE)) | ||
141 | goto acc_err; | ||
142 | write++; | ||
143 | break; | ||
144 | case 1: /* read, present */ | ||
145 | goto acc_err; | ||
146 | case 0: /* read, not present */ | ||
147 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
148 | goto acc_err; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * If for any reason at all we couldn't handle the fault, | ||
153 | * make sure we exit gracefully rather than endlessly redo | ||
154 | * the fault. | ||
155 | */ | ||
156 | |||
157 | survive: | ||
158 | fault = handle_mm_fault(mm, vma, address, write); | ||
159 | #ifdef DEBUG | ||
160 | printk("handle_mm_fault returns %d\n",fault); | ||
161 | #endif | ||
162 | switch (fault) { | ||
163 | case 1: | ||
164 | current->min_flt++; | ||
165 | break; | ||
166 | case 2: | ||
167 | current->maj_flt++; | ||
168 | break; | ||
169 | case 0: | ||
170 | goto bus_err; | ||
171 | default: | ||
172 | goto out_of_memory; | ||
173 | } | ||
174 | |||
175 | up_read(&mm->mmap_sem); | ||
176 | return 0; | ||
177 | |||
178 | /* | ||
179 | * We ran out of memory, or some other thing happened to us that made | ||
180 | * us unable to handle the page fault gracefully. | ||
181 | */ | ||
182 | out_of_memory: | ||
183 | up_read(&mm->mmap_sem); | ||
184 | if (current->pid == 1) { | ||
185 | yield(); | ||
186 | down_read(&mm->mmap_sem); | ||
187 | goto survive; | ||
188 | } | ||
189 | |||
190 | printk("VM: killing process %s\n", current->comm); | ||
191 | if (user_mode(regs)) | ||
192 | do_exit(SIGKILL); | ||
193 | |||
194 | no_context: | ||
195 | current->thread.signo = SIGBUS; | ||
196 | current->thread.faddr = address; | ||
197 | return send_fault_sig(regs); | ||
198 | |||
199 | bus_err: | ||
200 | current->thread.signo = SIGBUS; | ||
201 | current->thread.code = BUS_ADRERR; | ||
202 | current->thread.faddr = address; | ||
203 | goto send_sig; | ||
204 | |||
205 | map_err: | ||
206 | current->thread.signo = SIGSEGV; | ||
207 | current->thread.code = SEGV_MAPERR; | ||
208 | current->thread.faddr = address; | ||
209 | goto send_sig; | ||
210 | |||
211 | acc_err: | ||
212 | current->thread.signo = SIGSEGV; | ||
213 | current->thread.code = SEGV_ACCERR; | ||
214 | current->thread.faddr = address; | ||
215 | |||
216 | send_sig: | ||
217 | up_read(&mm->mmap_sem); | ||
218 | return send_fault_sig(regs); | ||
219 | } | ||
diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c new file mode 100644 index 000000000000..2c7dde3c6430 --- /dev/null +++ b/arch/m68k/mm/hwtest.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* Tests for presence or absence of hardware registers. | ||
2 | * This code was originally in atari/config.c, but I noticed | ||
3 | * that it was also in drivers/nubus/nubus.c and I wanted to | ||
4 | * use it in hp300/config.c, so it seemed sensible to pull it | ||
5 | * out into its own file. | ||
6 | * | ||
7 | * The test is for use when trying to read a hardware register | ||
8 | * that isn't present would cause a bus error. We set up a | ||
9 | * temporary handler so that this doesn't kill the kernel. | ||
10 | * | ||
11 | * There is a test-by-reading and a test-by-writing; I present | ||
12 | * them here complete with the comments from the original atari | ||
13 | * config.c... | ||
14 | * -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998 | ||
15 | */ | ||
16 | |||
17 | /* This function tests for the presence of an address, specially a | ||
18 | * hardware register address. It is called very early in the kernel | ||
19 | * initialization process, when the VBR register isn't set up yet. On | ||
20 | * an Atari, it still points to address 0, which is unmapped. So a bus | ||
21 | * error would cause another bus error while fetching the exception | ||
22 | * vector, and the CPU would do nothing at all. So we needed to set up | ||
23 | * a temporary VBR and a vector table for the duration of the test. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | |||
28 | int hwreg_present( volatile void *regp ) | ||
29 | { | ||
30 | int ret = 0; | ||
31 | long save_sp, save_vbr; | ||
32 | long tmp_vectors[3]; | ||
33 | |||
34 | __asm__ __volatile__ | ||
35 | ( "movec %/vbr,%2\n\t" | ||
36 | "movel #Lberr1,%4@(8)\n\t" | ||
37 | "movec %4,%/vbr\n\t" | ||
38 | "movel %/sp,%1\n\t" | ||
39 | "moveq #0,%0\n\t" | ||
40 | "tstb %3@\n\t" | ||
41 | "nop\n\t" | ||
42 | "moveq #1,%0\n" | ||
43 | "Lberr1:\n\t" | ||
44 | "movel %1,%/sp\n\t" | ||
45 | "movec %2,%/vbr" | ||
46 | : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) | ||
47 | : "a" (regp), "a" (tmp_vectors) | ||
48 | ); | ||
49 | |||
50 | return( ret ); | ||
51 | } | ||
52 | EXPORT_SYMBOL(hwreg_present); | ||
53 | |||
54 | /* Basically the same, but writes a value into a word register, protected | ||
55 | * by a bus error handler. Returns 1 if successful, 0 otherwise. | ||
56 | */ | ||
57 | |||
58 | int hwreg_write( volatile void *regp, unsigned short val ) | ||
59 | { | ||
60 | int ret; | ||
61 | long save_sp, save_vbr; | ||
62 | long tmp_vectors[3]; | ||
63 | |||
64 | __asm__ __volatile__ | ||
65 | ( "movec %/vbr,%2\n\t" | ||
66 | "movel #Lberr2,%4@(8)\n\t" | ||
67 | "movec %4,%/vbr\n\t" | ||
68 | "movel %/sp,%1\n\t" | ||
69 | "moveq #0,%0\n\t" | ||
70 | "movew %5,%3@\n\t" | ||
71 | "nop \n\t" /* If this nop isn't present, 'ret' may already be | ||
72 | * loaded with 1 at the time the bus error | ||
73 | * happens! */ | ||
74 | "moveq #1,%0\n" | ||
75 | "Lberr2:\n\t" | ||
76 | "movel %1,%/sp\n\t" | ||
77 | "movec %2,%/vbr" | ||
78 | : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) | ||
79 | : "a" (regp), "a" (tmp_vectors), "g" (val) | ||
80 | ); | ||
81 | |||
82 | return( ret ); | ||
83 | } | ||
84 | EXPORT_SYMBOL(hwreg_write); | ||
85 | |||
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c new file mode 100644 index 000000000000..c45beb955943 --- /dev/null +++ b/arch/m68k/mm/init.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Hamish Macdonald | ||
5 | * | ||
6 | * Contains common initialization routines, specific init code moved | ||
7 | * to motorola.c and sun3mmu.c | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/swap.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/bootmem.h> | ||
20 | |||
21 | #include <asm/setup.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/machdep.h> | ||
27 | #include <asm/io.h> | ||
28 | #ifdef CONFIG_ATARI | ||
29 | #include <asm/atari_stram.h> | ||
30 | #endif | ||
31 | #include <asm/tlb.h> | ||
32 | |||
33 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
34 | |||
35 | /* | ||
36 | * ZERO_PAGE is a special page that is used for zero-initialized | ||
37 | * data and COW. | ||
38 | */ | ||
39 | |||
40 | void *empty_zero_page; | ||
41 | |||
42 | void show_mem(void) | ||
43 | { | ||
44 | unsigned long i; | ||
45 | int free = 0, total = 0, reserved = 0, shared = 0; | ||
46 | int cached = 0; | ||
47 | |||
48 | printk("\nMem-info:\n"); | ||
49 | show_free_areas(); | ||
50 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
51 | i = max_mapnr; | ||
52 | while (i-- > 0) { | ||
53 | total++; | ||
54 | if (PageReserved(mem_map+i)) | ||
55 | reserved++; | ||
56 | else if (PageSwapCache(mem_map+i)) | ||
57 | cached++; | ||
58 | else if (!page_count(mem_map+i)) | ||
59 | free++; | ||
60 | else | ||
61 | shared += page_count(mem_map+i) - 1; | ||
62 | } | ||
63 | printk("%d pages of RAM\n",total); | ||
64 | printk("%d free pages\n",free); | ||
65 | printk("%d reserved pages\n",reserved); | ||
66 | printk("%d pages shared\n",shared); | ||
67 | printk("%d pages swap cached\n",cached); | ||
68 | } | ||
69 | |||
70 | extern void init_pointer_table(unsigned long ptable); | ||
71 | |||
72 | /* References to section boundaries */ | ||
73 | |||
74 | extern char _text, _etext, _edata, __bss_start, _end; | ||
75 | extern char __init_begin, __init_end; | ||
76 | |||
77 | extern pmd_t *zero_pgtable; | ||
78 | |||
79 | void __init mem_init(void) | ||
80 | { | ||
81 | int codepages = 0; | ||
82 | int datapages = 0; | ||
83 | int initpages = 0; | ||
84 | unsigned long tmp; | ||
85 | #ifndef CONFIG_SUN3 | ||
86 | int i; | ||
87 | #endif | ||
88 | |||
89 | max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT); | ||
90 | |||
91 | #ifdef CONFIG_ATARI | ||
92 | if (MACH_IS_ATARI) | ||
93 | atari_stram_mem_init_hook(); | ||
94 | #endif | ||
95 | |||
96 | /* this will put all memory onto the freelists */ | ||
97 | totalram_pages = free_all_bootmem(); | ||
98 | |||
99 | for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) { | ||
100 | if (PageReserved(virt_to_page(tmp))) { | ||
101 | if (tmp >= (unsigned long)&_text | ||
102 | && tmp < (unsigned long)&_etext) | ||
103 | codepages++; | ||
104 | else if (tmp >= (unsigned long) &__init_begin | ||
105 | && tmp < (unsigned long) &__init_end) | ||
106 | initpages++; | ||
107 | else | ||
108 | datapages++; | ||
109 | continue; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | #ifndef CONFIG_SUN3 | ||
114 | /* insert pointer tables allocated so far into the tablelist */ | ||
115 | init_pointer_table((unsigned long)kernel_pg_dir); | ||
116 | for (i = 0; i < PTRS_PER_PGD; i++) { | ||
117 | if (pgd_present(kernel_pg_dir[i])) | ||
118 | init_pointer_table(__pgd_page(kernel_pg_dir[i])); | ||
119 | } | ||
120 | |||
121 | /* insert also pointer table that we used to unmap the zero page */ | ||
122 | if (zero_pgtable) | ||
123 | init_pointer_table((unsigned long)zero_pgtable); | ||
124 | #endif | ||
125 | |||
126 | printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", | ||
127 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | ||
128 | max_mapnr << (PAGE_SHIFT-10), | ||
129 | codepages << (PAGE_SHIFT-10), | ||
130 | datapages << (PAGE_SHIFT-10), | ||
131 | initpages << (PAGE_SHIFT-10)); | ||
132 | } | ||
133 | |||
134 | #ifdef CONFIG_BLK_DEV_INITRD | ||
135 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
136 | { | ||
137 | int pages = 0; | ||
138 | for (; start < end; start += PAGE_SIZE) { | ||
139 | ClearPageReserved(virt_to_page(start)); | ||
140 | set_page_count(virt_to_page(start), 1); | ||
141 | free_page(start); | ||
142 | totalram_pages++; | ||
143 | pages++; | ||
144 | } | ||
145 | printk ("Freeing initrd memory: %dk freed\n", pages); | ||
146 | } | ||
147 | #endif | ||
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c new file mode 100644 index 000000000000..5dcb3fa35ea9 --- /dev/null +++ b/arch/m68k/mm/kmap.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/kmap.c | ||
3 | * | ||
4 | * Copyright (C) 1997 Roman Hodek | ||
5 | * | ||
6 | * 10/01/99 cleaned up the code and changing to the same interface | ||
7 | * used by other architectures /Roman Zippel | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include <asm/setup.h> | ||
19 | #include <asm/segment.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | #define PTRTREESIZE (256*1024) | ||
28 | |||
29 | /* | ||
30 | * For 040/060 we can use the virtual memory area like other architectures, | ||
31 | * but for 020/030 we want to use early termination page descriptor and we | ||
32 | * can't mix this with normal page descriptors, so we have to copy that code | ||
33 | * (mm/vmalloc.c) and return appriorate aligned addresses. | ||
34 | */ | ||
35 | |||
36 | #ifdef CPU_M68040_OR_M68060_ONLY | ||
37 | |||
38 | #define IO_SIZE PAGE_SIZE | ||
39 | |||
40 | static inline struct vm_struct *get_io_area(unsigned long size) | ||
41 | { | ||
42 | return get_vm_area(size, VM_IOREMAP); | ||
43 | } | ||
44 | |||
45 | |||
46 | static inline void free_io_area(void *addr) | ||
47 | { | ||
48 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
49 | } | ||
50 | |||
51 | #else | ||
52 | |||
53 | #define IO_SIZE (256*1024) | ||
54 | |||
55 | static struct vm_struct *iolist; | ||
56 | |||
57 | static struct vm_struct *get_io_area(unsigned long size) | ||
58 | { | ||
59 | unsigned long addr; | ||
60 | struct vm_struct **p, *tmp, *area; | ||
61 | |||
62 | area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL); | ||
63 | if (!area) | ||
64 | return NULL; | ||
65 | addr = KMAP_START; | ||
66 | for (p = &iolist; (tmp = *p) ; p = &tmp->next) { | ||
67 | if (size + addr < (unsigned long)tmp->addr) | ||
68 | break; | ||
69 | if (addr > KMAP_END-size) | ||
70 | return NULL; | ||
71 | addr = tmp->size + (unsigned long)tmp->addr; | ||
72 | } | ||
73 | area->addr = (void *)addr; | ||
74 | area->size = size + IO_SIZE; | ||
75 | area->next = *p; | ||
76 | *p = area; | ||
77 | return area; | ||
78 | } | ||
79 | |||
80 | static inline void free_io_area(void *addr) | ||
81 | { | ||
82 | struct vm_struct **p, *tmp; | ||
83 | |||
84 | if (!addr) | ||
85 | return; | ||
86 | addr = (void *)((unsigned long)addr & -IO_SIZE); | ||
87 | for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { | ||
88 | if (tmp->addr == addr) { | ||
89 | *p = tmp->next; | ||
90 | __iounmap(tmp->addr, tmp->size); | ||
91 | kfree(tmp); | ||
92 | return; | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
97 | #endif | ||
98 | |||
99 | /* | ||
100 | * Map some physical address range into the kernel address space. The | ||
101 | * code is copied and adapted from map_chunk(). | ||
102 | */ | ||
103 | /* Rewritten by Andreas Schwab to remove all races. */ | ||
104 | |||
105 | void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | ||
106 | { | ||
107 | struct vm_struct *area; | ||
108 | unsigned long virtaddr, retaddr; | ||
109 | long offset; | ||
110 | pgd_t *pgd_dir; | ||
111 | pmd_t *pmd_dir; | ||
112 | pte_t *pte_dir; | ||
113 | |||
114 | /* | ||
115 | * Don't allow mappings that wrap.. | ||
116 | */ | ||
117 | if (!size || size > physaddr + size) | ||
118 | return NULL; | ||
119 | |||
120 | #ifdef CONFIG_AMIGA | ||
121 | if (MACH_IS_AMIGA) { | ||
122 | if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) | ||
123 | && (cacheflag == IOMAP_NOCACHE_SER)) | ||
124 | return (void *)physaddr; | ||
125 | } | ||
126 | #endif | ||
127 | |||
128 | #ifdef DEBUG | ||
129 | printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); | ||
130 | #endif | ||
131 | /* | ||
132 | * Mappings have to be aligned | ||
133 | */ | ||
134 | offset = physaddr & (IO_SIZE - 1); | ||
135 | physaddr &= -IO_SIZE; | ||
136 | size = (size + offset + IO_SIZE - 1) & -IO_SIZE; | ||
137 | |||
138 | /* | ||
139 | * Ok, go for it.. | ||
140 | */ | ||
141 | area = get_io_area(size); | ||
142 | if (!area) | ||
143 | return NULL; | ||
144 | |||
145 | virtaddr = (unsigned long)area->addr; | ||
146 | retaddr = virtaddr + offset; | ||
147 | #ifdef DEBUG | ||
148 | printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); | ||
149 | #endif | ||
150 | |||
151 | /* | ||
152 | * add cache and table flags to physical address | ||
153 | */ | ||
154 | if (CPU_IS_040_OR_060) { | ||
155 | physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | | ||
156 | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
157 | switch (cacheflag) { | ||
158 | case IOMAP_FULL_CACHING: | ||
159 | physaddr |= _PAGE_CACHE040; | ||
160 | break; | ||
161 | case IOMAP_NOCACHE_SER: | ||
162 | default: | ||
163 | physaddr |= _PAGE_NOCACHE_S; | ||
164 | break; | ||
165 | case IOMAP_NOCACHE_NONSER: | ||
166 | physaddr |= _PAGE_NOCACHE; | ||
167 | break; | ||
168 | case IOMAP_WRITETHROUGH: | ||
169 | physaddr |= _PAGE_CACHE040W; | ||
170 | break; | ||
171 | } | ||
172 | } else { | ||
173 | physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
174 | switch (cacheflag) { | ||
175 | case IOMAP_NOCACHE_SER: | ||
176 | case IOMAP_NOCACHE_NONSER: | ||
177 | default: | ||
178 | physaddr |= _PAGE_NOCACHE030; | ||
179 | break; | ||
180 | case IOMAP_FULL_CACHING: | ||
181 | case IOMAP_WRITETHROUGH: | ||
182 | break; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | while ((long)size > 0) { | ||
187 | #ifdef DEBUG | ||
188 | if (!(virtaddr & (PTRTREESIZE-1))) | ||
189 | printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); | ||
190 | #endif | ||
191 | pgd_dir = pgd_offset_k(virtaddr); | ||
192 | pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); | ||
193 | if (!pmd_dir) { | ||
194 | printk("ioremap: no mem for pmd_dir\n"); | ||
195 | return NULL; | ||
196 | } | ||
197 | |||
198 | if (CPU_IS_020_OR_030) { | ||
199 | pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; | ||
200 | physaddr += PTRTREESIZE; | ||
201 | virtaddr += PTRTREESIZE; | ||
202 | size -= PTRTREESIZE; | ||
203 | } else { | ||
204 | pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr); | ||
205 | if (!pte_dir) { | ||
206 | printk("ioremap: no mem for pte_dir\n"); | ||
207 | return NULL; | ||
208 | } | ||
209 | |||
210 | pte_val(*pte_dir) = physaddr; | ||
211 | virtaddr += PAGE_SIZE; | ||
212 | physaddr += PAGE_SIZE; | ||
213 | size -= PAGE_SIZE; | ||
214 | } | ||
215 | } | ||
216 | #ifdef DEBUG | ||
217 | printk("\n"); | ||
218 | #endif | ||
219 | flush_tlb_all(); | ||
220 | |||
221 | return (void *)retaddr; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Unmap a ioremap()ed region again | ||
226 | */ | ||
227 | void iounmap(void *addr) | ||
228 | { | ||
229 | #ifdef CONFIG_AMIGA | ||
230 | if ((!MACH_IS_AMIGA) || | ||
231 | (((unsigned long)addr < 0x40000000) || | ||
232 | ((unsigned long)addr > 0x60000000))) | ||
233 | free_io_area(addr); | ||
234 | #else | ||
235 | free_io_area(addr); | ||
236 | #endif | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * __iounmap unmaps nearly everything, so be careful | ||
241 | * it doesn't free currently pointer/page tables anymore but it | ||
242 | * wans't used anyway and might be added later. | ||
243 | */ | ||
244 | void __iounmap(void *addr, unsigned long size) | ||
245 | { | ||
246 | unsigned long virtaddr = (unsigned long)addr; | ||
247 | pgd_t *pgd_dir; | ||
248 | pmd_t *pmd_dir; | ||
249 | pte_t *pte_dir; | ||
250 | |||
251 | while ((long)size > 0) { | ||
252 | pgd_dir = pgd_offset_k(virtaddr); | ||
253 | if (pgd_bad(*pgd_dir)) { | ||
254 | printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
255 | pgd_clear(pgd_dir); | ||
256 | return; | ||
257 | } | ||
258 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
259 | |||
260 | if (CPU_IS_020_OR_030) { | ||
261 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
262 | |||
263 | if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { | ||
264 | pmd_dir->pmd[pmd_off] = 0; | ||
265 | virtaddr += PTRTREESIZE; | ||
266 | size -= PTRTREESIZE; | ||
267 | continue; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | if (pmd_bad(*pmd_dir)) { | ||
272 | printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
273 | pmd_clear(pmd_dir); | ||
274 | return; | ||
275 | } | ||
276 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
277 | |||
278 | pte_val(*pte_dir) = 0; | ||
279 | virtaddr += PAGE_SIZE; | ||
280 | size -= PAGE_SIZE; | ||
281 | } | ||
282 | |||
283 | flush_tlb_all(); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Set new cache mode for some kernel address space. | ||
288 | * The caller must push data for that range itself, if such data may already | ||
289 | * be in the cache. | ||
290 | */ | ||
291 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | ||
292 | { | ||
293 | unsigned long virtaddr = (unsigned long)addr; | ||
294 | pgd_t *pgd_dir; | ||
295 | pmd_t *pmd_dir; | ||
296 | pte_t *pte_dir; | ||
297 | |||
298 | if (CPU_IS_040_OR_060) { | ||
299 | switch (cmode) { | ||
300 | case IOMAP_FULL_CACHING: | ||
301 | cmode = _PAGE_CACHE040; | ||
302 | break; | ||
303 | case IOMAP_NOCACHE_SER: | ||
304 | default: | ||
305 | cmode = _PAGE_NOCACHE_S; | ||
306 | break; | ||
307 | case IOMAP_NOCACHE_NONSER: | ||
308 | cmode = _PAGE_NOCACHE; | ||
309 | break; | ||
310 | case IOMAP_WRITETHROUGH: | ||
311 | cmode = _PAGE_CACHE040W; | ||
312 | break; | ||
313 | } | ||
314 | } else { | ||
315 | switch (cmode) { | ||
316 | case IOMAP_NOCACHE_SER: | ||
317 | case IOMAP_NOCACHE_NONSER: | ||
318 | default: | ||
319 | cmode = _PAGE_NOCACHE030; | ||
320 | break; | ||
321 | case IOMAP_FULL_CACHING: | ||
322 | case IOMAP_WRITETHROUGH: | ||
323 | cmode = 0; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | while ((long)size > 0) { | ||
328 | pgd_dir = pgd_offset_k(virtaddr); | ||
329 | if (pgd_bad(*pgd_dir)) { | ||
330 | printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
331 | pgd_clear(pgd_dir); | ||
332 | return; | ||
333 | } | ||
334 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
335 | |||
336 | if (CPU_IS_020_OR_030) { | ||
337 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
338 | |||
339 | if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { | ||
340 | pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & | ||
341 | _CACHEMASK040) | cmode; | ||
342 | virtaddr += PTRTREESIZE; | ||
343 | size -= PTRTREESIZE; | ||
344 | continue; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | if (pmd_bad(*pmd_dir)) { | ||
349 | printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
350 | pmd_clear(pmd_dir); | ||
351 | return; | ||
352 | } | ||
353 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
354 | |||
355 | pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode; | ||
356 | virtaddr += PAGE_SIZE; | ||
357 | size -= PAGE_SIZE; | ||
358 | } | ||
359 | |||
360 | flush_tlb_all(); | ||
361 | } | ||
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c new file mode 100644 index 000000000000..1453a6013721 --- /dev/null +++ b/arch/m68k/mm/memory.c | |||
@@ -0,0 +1,471 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/memory.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Hamish Macdonald | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/pagemap.h> | ||
15 | |||
16 | #include <asm/setup.h> | ||
17 | #include <asm/segment.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/traps.h> | ||
22 | #include <asm/machdep.h> | ||
23 | |||
24 | |||
25 | /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from | ||
26 | struct page instead of separately kmalloced struct. Stolen from | ||
27 | arch/sparc/mm/srmmu.c ... */ | ||
28 | |||
29 | typedef struct list_head ptable_desc; | ||
30 | static LIST_HEAD(ptable_list); | ||
31 | |||
32 | #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) | ||
33 | #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) | ||
34 | #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) | ||
35 | |||
36 | #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) | ||
37 | |||
38 | void __init init_pointer_table(unsigned long ptable) | ||
39 | { | ||
40 | ptable_desc *dp; | ||
41 | unsigned long page = ptable & PAGE_MASK; | ||
42 | unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); | ||
43 | |||
44 | dp = PD_PTABLE(page); | ||
45 | if (!(PD_MARKBITS(dp) & mask)) { | ||
46 | PD_MARKBITS(dp) = 0xff; | ||
47 | list_add(dp, &ptable_list); | ||
48 | } | ||
49 | |||
50 | PD_MARKBITS(dp) &= ~mask; | ||
51 | #ifdef DEBUG | ||
52 | printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); | ||
53 | #endif | ||
54 | |||
55 | /* unreserve the page so it's possible to free that page */ | ||
56 | PD_PAGE(dp)->flags &= ~(1 << PG_reserved); | ||
57 | set_page_count(PD_PAGE(dp), 1); | ||
58 | |||
59 | return; | ||
60 | } | ||
61 | |||
62 | pmd_t *get_pointer_table (void) | ||
63 | { | ||
64 | ptable_desc *dp = ptable_list.next; | ||
65 | unsigned char mask = PD_MARKBITS (dp); | ||
66 | unsigned char tmp; | ||
67 | unsigned int off; | ||
68 | |||
69 | /* | ||
70 | * For a pointer table for a user process address space, a | ||
71 | * table is taken from a page allocated for the purpose. Each | ||
72 | * page can hold 8 pointer tables. The page is remapped in | ||
73 | * virtual address space to be noncacheable. | ||
74 | */ | ||
75 | if (mask == 0) { | ||
76 | void *page; | ||
77 | ptable_desc *new; | ||
78 | |||
79 | if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) | ||
80 | return NULL; | ||
81 | |||
82 | flush_tlb_kernel_page(page); | ||
83 | nocache_page(page); | ||
84 | |||
85 | new = PD_PTABLE(page); | ||
86 | PD_MARKBITS(new) = 0xfe; | ||
87 | list_add_tail(new, dp); | ||
88 | |||
89 | return (pmd_t *)page; | ||
90 | } | ||
91 | |||
92 | for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) | ||
93 | ; | ||
94 | PD_MARKBITS(dp) = mask & ~tmp; | ||
95 | if (!PD_MARKBITS(dp)) { | ||
96 | /* move to end of list */ | ||
97 | list_del(dp); | ||
98 | list_add_tail(dp, &ptable_list); | ||
99 | } | ||
100 | return (pmd_t *) (page_address(PD_PAGE(dp)) + off); | ||
101 | } | ||
102 | |||
103 | int free_pointer_table (pmd_t *ptable) | ||
104 | { | ||
105 | ptable_desc *dp; | ||
106 | unsigned long page = (unsigned long)ptable & PAGE_MASK; | ||
107 | unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); | ||
108 | |||
109 | dp = PD_PTABLE(page); | ||
110 | if (PD_MARKBITS (dp) & mask) | ||
111 | panic ("table already free!"); | ||
112 | |||
113 | PD_MARKBITS (dp) |= mask; | ||
114 | |||
115 | if (PD_MARKBITS(dp) == 0xff) { | ||
116 | /* all tables in page are free, free page */ | ||
117 | list_del(dp); | ||
118 | cache_page((void *)page); | ||
119 | free_page (page); | ||
120 | return 1; | ||
121 | } else if (ptable_list.next != dp) { | ||
122 | /* | ||
123 | * move this descriptor to the front of the list, since | ||
124 | * it has one or more free tables. | ||
125 | */ | ||
126 | list_del(dp); | ||
127 | list_add(dp, &ptable_list); | ||
128 | } | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | #ifdef DEBUG_INVALID_PTOV | ||
133 | int mm_inv_cnt = 5; | ||
134 | #endif | ||
135 | |||
136 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
137 | /* | ||
138 | * The following two routines map from a physical address to a kernel | ||
139 | * virtual address and vice versa. | ||
140 | */ | ||
141 | unsigned long mm_vtop(unsigned long vaddr) | ||
142 | { | ||
143 | int i=0; | ||
144 | unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; | ||
145 | |||
146 | do { | ||
147 | if (voff < m68k_memory[i].size) { | ||
148 | #ifdef DEBUGPV | ||
149 | printk ("VTOP(%p)=%lx\n", vaddr, | ||
150 | m68k_memory[i].addr + voff); | ||
151 | #endif | ||
152 | return m68k_memory[i].addr + voff; | ||
153 | } | ||
154 | voff -= m68k_memory[i].size; | ||
155 | } while (++i < m68k_num_memory); | ||
156 | |||
157 | /* As a special case allow `__pa(high_memory)'. */ | ||
158 | if (voff == 0) | ||
159 | return m68k_memory[i-1].addr + m68k_memory[i-1].size; | ||
160 | |||
161 | return -1; | ||
162 | } | ||
163 | #endif | ||
164 | |||
165 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
166 | unsigned long mm_ptov (unsigned long paddr) | ||
167 | { | ||
168 | int i = 0; | ||
169 | unsigned long poff, voff = PAGE_OFFSET; | ||
170 | |||
171 | do { | ||
172 | poff = paddr - m68k_memory[i].addr; | ||
173 | if (poff < m68k_memory[i].size) { | ||
174 | #ifdef DEBUGPV | ||
175 | printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); | ||
176 | #endif | ||
177 | return poff + voff; | ||
178 | } | ||
179 | voff += m68k_memory[i].size; | ||
180 | } while (++i < m68k_num_memory); | ||
181 | |||
182 | #ifdef DEBUG_INVALID_PTOV | ||
183 | if (mm_inv_cnt > 0) { | ||
184 | mm_inv_cnt--; | ||
185 | printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", | ||
186 | paddr, __builtin_return_address(0)); | ||
187 | } | ||
188 | #endif | ||
189 | return -1; | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | /* invalidate page in both caches */ | ||
194 | static inline void clear040(unsigned long paddr) | ||
195 | { | ||
196 | asm volatile ( | ||
197 | "nop\n\t" | ||
198 | ".chip 68040\n\t" | ||
199 | "cinvp %%bc,(%0)\n\t" | ||
200 | ".chip 68k" | ||
201 | : : "a" (paddr)); | ||
202 | } | ||
203 | |||
204 | /* invalidate page in i-cache */ | ||
205 | static inline void cleari040(unsigned long paddr) | ||
206 | { | ||
207 | asm volatile ( | ||
208 | "nop\n\t" | ||
209 | ".chip 68040\n\t" | ||
210 | "cinvp %%ic,(%0)\n\t" | ||
211 | ".chip 68k" | ||
212 | : : "a" (paddr)); | ||
213 | } | ||
214 | |||
215 | /* push page in both caches */ | ||
216 | /* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */ | ||
217 | static inline void push040(unsigned long paddr) | ||
218 | { | ||
219 | asm volatile ( | ||
220 | "nop\n\t" | ||
221 | ".chip 68040\n\t" | ||
222 | "cpushp %%bc,(%0)\n\t" | ||
223 | ".chip 68k" | ||
224 | : : "a" (paddr)); | ||
225 | } | ||
226 | |||
227 | /* push and invalidate page in both caches, must disable ints | ||
228 | * to avoid invalidating valid data */ | ||
229 | static inline void pushcl040(unsigned long paddr) | ||
230 | { | ||
231 | unsigned long flags; | ||
232 | |||
233 | local_irq_save(flags); | ||
234 | push040(paddr); | ||
235 | if (CPU_IS_060) | ||
236 | clear040(paddr); | ||
237 | local_irq_restore(flags); | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * 040: Hit every page containing an address in the range paddr..paddr+len-1. | ||
242 | * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). | ||
243 | * Hit every page until there is a page or less to go. Hit the next page, | ||
244 | * and the one after that if the range hits it. | ||
245 | */ | ||
246 | /* ++roman: A little bit more care is required here: The CINVP instruction | ||
247 | * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning | ||
248 | * and the end of the region must be treated differently if they are not | ||
249 | * exactly at the beginning or end of a page boundary. Else, maybe too much | ||
250 | * data becomes invalidated and thus lost forever. CPUSHP does what we need: | ||
251 | * it invalidates the page after pushing dirty data to memory. (Thanks to Jes | ||
252 | * for discovering the problem!) | ||
253 | */ | ||
254 | /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set | ||
255 | * the DPI bit in the CACR; would it cause problems with temporarily changing | ||
256 | * this?). So we have to push first and then additionally to invalidate. | ||
257 | */ | ||
258 | |||
259 | |||
260 | /* | ||
261 | * cache_clear() semantics: Clear any cache entries for the area in question, | ||
262 | * without writing back dirty entries first. This is useful if the data will | ||
263 | * be overwritten anyway, e.g. by DMA to memory. The range is defined by a | ||
264 | * _physical_ address. | ||
265 | */ | ||
266 | |||
267 | void cache_clear (unsigned long paddr, int len) | ||
268 | { | ||
269 | if (CPU_IS_040_OR_060) { | ||
270 | int tmp; | ||
271 | |||
272 | /* | ||
273 | * We need special treatment for the first page, in case it | ||
274 | * is not page-aligned. Page align the addresses to work | ||
275 | * around bug I17 in the 68060. | ||
276 | */ | ||
277 | if ((tmp = -paddr & (PAGE_SIZE - 1))) { | ||
278 | pushcl040(paddr & PAGE_MASK); | ||
279 | if ((len -= tmp) <= 0) | ||
280 | return; | ||
281 | paddr += tmp; | ||
282 | } | ||
283 | tmp = PAGE_SIZE; | ||
284 | paddr &= PAGE_MASK; | ||
285 | while ((len -= tmp) >= 0) { | ||
286 | clear040(paddr); | ||
287 | paddr += tmp; | ||
288 | } | ||
289 | if ((len += tmp)) | ||
290 | /* a page boundary gets crossed at the end */ | ||
291 | pushcl040(paddr); | ||
292 | } | ||
293 | else /* 68030 or 68020 */ | ||
294 | asm volatile ("movec %/cacr,%/d0\n\t" | ||
295 | "oriw %0,%/d0\n\t" | ||
296 | "movec %/d0,%/cacr" | ||
297 | : : "i" (FLUSH_I_AND_D) | ||
298 | : "d0"); | ||
299 | #ifdef CONFIG_M68K_L2_CACHE | ||
300 | if(mach_l2_flush) | ||
301 | mach_l2_flush(0); | ||
302 | #endif | ||
303 | } | ||
304 | |||
305 | |||
306 | /* | ||
307 | * cache_push() semantics: Write back any dirty cache data in the given area, | ||
308 | * and invalidate the range in the instruction cache. It needs not (but may) | ||
309 | * invalidate those entries also in the data cache. The range is defined by a | ||
310 | * _physical_ address. | ||
311 | */ | ||
312 | |||
313 | void cache_push (unsigned long paddr, int len) | ||
314 | { | ||
315 | if (CPU_IS_040_OR_060) { | ||
316 | int tmp = PAGE_SIZE; | ||
317 | |||
318 | /* | ||
319 | * on 68040 or 68060, push cache lines for pages in the range; | ||
320 | * on the '040 this also invalidates the pushed lines, but not on | ||
321 | * the '060! | ||
322 | */ | ||
323 | len += paddr & (PAGE_SIZE - 1); | ||
324 | |||
325 | /* | ||
326 | * Work around bug I17 in the 68060 affecting some instruction | ||
327 | * lines not being invalidated properly. | ||
328 | */ | ||
329 | paddr &= PAGE_MASK; | ||
330 | |||
331 | do { | ||
332 | push040(paddr); | ||
333 | paddr += tmp; | ||
334 | } while ((len -= tmp) > 0); | ||
335 | } | ||
336 | /* | ||
337 | * 68030/68020 have no writeback cache. On the other hand, | ||
338 | * cache_push is actually a superset of cache_clear (the lines | ||
339 | * get written back and invalidated), so we should make sure | ||
340 | * to perform the corresponding actions. After all, this is getting | ||
341 | * called in places where we've just loaded code, or whatever, so | ||
342 | * flushing the icache is appropriate; flushing the dcache shouldn't | ||
343 | * be required. | ||
344 | */ | ||
345 | else /* 68030 or 68020 */ | ||
346 | asm volatile ("movec %/cacr,%/d0\n\t" | ||
347 | "oriw %0,%/d0\n\t" | ||
348 | "movec %/d0,%/cacr" | ||
349 | : : "i" (FLUSH_I) | ||
350 | : "d0"); | ||
351 | #ifdef CONFIG_M68K_L2_CACHE | ||
352 | if(mach_l2_flush) | ||
353 | mach_l2_flush(1); | ||
354 | #endif | ||
355 | } | ||
356 | |||
357 | static unsigned long virt_to_phys_slow(unsigned long vaddr) | ||
358 | { | ||
359 | if (CPU_IS_060) { | ||
360 | mm_segment_t fs = get_fs(); | ||
361 | unsigned long paddr; | ||
362 | |||
363 | set_fs(get_ds()); | ||
364 | |||
365 | /* The PLPAR instruction causes an access error if the translation | ||
366 | * is not possible. To catch this we use the same exception mechanism | ||
367 | * as for user space accesses in <asm/uaccess.h>. */ | ||
368 | asm volatile (".chip 68060\n" | ||
369 | "1: plpar (%0)\n" | ||
370 | ".chip 68k\n" | ||
371 | "2:\n" | ||
372 | ".section .fixup,\"ax\"\n" | ||
373 | " .even\n" | ||
374 | "3: sub.l %0,%0\n" | ||
375 | " jra 2b\n" | ||
376 | ".previous\n" | ||
377 | ".section __ex_table,\"a\"\n" | ||
378 | " .align 4\n" | ||
379 | " .long 1b,3b\n" | ||
380 | ".previous" | ||
381 | : "=a" (paddr) | ||
382 | : "0" (vaddr)); | ||
383 | set_fs(fs); | ||
384 | return paddr; | ||
385 | } else if (CPU_IS_040) { | ||
386 | mm_segment_t fs = get_fs(); | ||
387 | unsigned long mmusr; | ||
388 | |||
389 | set_fs(get_ds()); | ||
390 | |||
391 | asm volatile (".chip 68040\n\t" | ||
392 | "ptestr (%1)\n\t" | ||
393 | "movec %%mmusr, %0\n\t" | ||
394 | ".chip 68k" | ||
395 | : "=r" (mmusr) | ||
396 | : "a" (vaddr)); | ||
397 | set_fs(fs); | ||
398 | |||
399 | if (mmusr & MMU_R_040) | ||
400 | return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | ||
401 | } else { | ||
402 | unsigned short mmusr; | ||
403 | unsigned long *descaddr; | ||
404 | |||
405 | asm volatile ("ptestr #5,%2@,#7,%0\n\t" | ||
406 | "pmove %%psr,%1@" | ||
407 | : "=a&" (descaddr) | ||
408 | : "a" (&mmusr), "a" (vaddr)); | ||
409 | if (mmusr & (MMU_I|MMU_B|MMU_L)) | ||
410 | return 0; | ||
411 | descaddr = phys_to_virt((unsigned long)descaddr); | ||
412 | switch (mmusr & MMU_NUM) { | ||
413 | case 1: | ||
414 | return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); | ||
415 | case 2: | ||
416 | return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); | ||
417 | case 3: | ||
418 | return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); | ||
419 | } | ||
420 | } | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /* Push n pages at kernel virtual address and clear the icache */ | ||
425 | /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ | ||
426 | void flush_icache_range(unsigned long address, unsigned long endaddr) | ||
427 | { | ||
428 | if (CPU_IS_040_OR_060) { | ||
429 | address &= PAGE_MASK; | ||
430 | |||
431 | if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) { | ||
432 | do { | ||
433 | asm volatile ("nop\n\t" | ||
434 | ".chip 68040\n\t" | ||
435 | "cpushp %%bc,(%0)\n\t" | ||
436 | ".chip 68k" | ||
437 | : : "a" (virt_to_phys((void *)address))); | ||
438 | address += PAGE_SIZE; | ||
439 | } while (address < endaddr); | ||
440 | } else { | ||
441 | do { | ||
442 | asm volatile ("nop\n\t" | ||
443 | ".chip 68040\n\t" | ||
444 | "cpushp %%bc,(%0)\n\t" | ||
445 | ".chip 68k" | ||
446 | : : "a" (virt_to_phys_slow(address))); | ||
447 | address += PAGE_SIZE; | ||
448 | } while (address < endaddr); | ||
449 | } | ||
450 | } else { | ||
451 | unsigned long tmp; | ||
452 | asm volatile ("movec %%cacr,%0\n\t" | ||
453 | "orw %1,%0\n\t" | ||
454 | "movec %0,%%cacr" | ||
455 | : "=&d" (tmp) | ||
456 | : "di" (FLUSH_I)); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | |||
461 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
462 | int mm_end_of_chunk (unsigned long addr, int len) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | for (i = 0; i < m68k_num_memory; i++) | ||
467 | if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) | ||
468 | return 1; | ||
469 | return 0; | ||
470 | } | ||
471 | #endif | ||
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c new file mode 100644 index 000000000000..d855fec26317 --- /dev/null +++ b/arch/m68k/mm/motorola.c | |||
@@ -0,0 +1,285 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/motorola.c | ||
3 | * | ||
4 | * Routines specific to the Motorola MMU, originally from: | ||
5 | * linux/arch/m68k/init.c | ||
6 | * which are Copyright (C) 1995 Hamish Macdonald | ||
7 | * | ||
8 | * Moved 8/20/1999 Sam Creasey | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/swap.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | |||
23 | #include <asm/setup.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/pgalloc.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/machdep.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/dma.h> | ||
31 | #ifdef CONFIG_ATARI | ||
32 | #include <asm/atari_stram.h> | ||
33 | #endif | ||
34 | |||
35 | #undef DEBUG | ||
36 | |||
37 | #ifndef mm_cachebits | ||
38 | /* | ||
39 | * Bits to add to page descriptors for "normal" caching mode. | ||
40 | * For 68020/030 this is 0. | ||
41 | * For 68040, this is _PAGE_CACHE040 (cachable, copyback) | ||
42 | */ | ||
43 | unsigned long mm_cachebits; | ||
44 | EXPORT_SYMBOL(mm_cachebits); | ||
45 | #endif | ||
46 | |||
47 | static pte_t * __init kernel_page_table(void) | ||
48 | { | ||
49 | pte_t *ptablep; | ||
50 | |||
51 | ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
52 | |||
53 | clear_page(ptablep); | ||
54 | __flush_page_to_ram(ptablep); | ||
55 | flush_tlb_kernel_page(ptablep); | ||
56 | nocache_page(ptablep); | ||
57 | |||
58 | return ptablep; | ||
59 | } | ||
60 | |||
61 | static pmd_t *last_pgtable __initdata = NULL; | ||
62 | pmd_t *zero_pgtable __initdata = NULL; | ||
63 | |||
64 | static pmd_t * __init kernel_ptr_table(void) | ||
65 | { | ||
66 | if (!last_pgtable) { | ||
67 | unsigned long pmd, last; | ||
68 | int i; | ||
69 | |||
70 | /* Find the last ptr table that was used in head.S and | ||
71 | * reuse the remaining space in that page for further | ||
72 | * ptr tables. | ||
73 | */ | ||
74 | last = (unsigned long)kernel_pg_dir; | ||
75 | for (i = 0; i < PTRS_PER_PGD; i++) { | ||
76 | if (!pgd_present(kernel_pg_dir[i])) | ||
77 | continue; | ||
78 | pmd = __pgd_page(kernel_pg_dir[i]); | ||
79 | if (pmd > last) | ||
80 | last = pmd; | ||
81 | } | ||
82 | |||
83 | last_pgtable = (pmd_t *)last; | ||
84 | #ifdef DEBUG | ||
85 | printk("kernel_ptr_init: %p\n", last_pgtable); | ||
86 | #endif | ||
87 | } | ||
88 | |||
89 | last_pgtable += PTRS_PER_PMD; | ||
90 | if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { | ||
91 | last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
92 | |||
93 | clear_page(last_pgtable); | ||
94 | __flush_page_to_ram(last_pgtable); | ||
95 | flush_tlb_kernel_page(last_pgtable); | ||
96 | nocache_page(last_pgtable); | ||
97 | } | ||
98 | |||
99 | return last_pgtable; | ||
100 | } | ||
101 | |||
102 | static unsigned long __init | ||
103 | map_chunk (unsigned long addr, long size) | ||
104 | { | ||
105 | #define PTRTREESIZE (256*1024) | ||
106 | #define ROOTTREESIZE (32*1024*1024) | ||
107 | static unsigned long virtaddr = PAGE_OFFSET; | ||
108 | unsigned long physaddr; | ||
109 | pgd_t *pgd_dir; | ||
110 | pmd_t *pmd_dir; | ||
111 | pte_t *pte_dir; | ||
112 | |||
113 | physaddr = (addr | m68k_supervisor_cachemode | | ||
114 | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
115 | if (CPU_IS_040_OR_060) | ||
116 | physaddr |= _PAGE_GLOBAL040; | ||
117 | |||
118 | while (size > 0) { | ||
119 | #ifdef DEBUG | ||
120 | if (!(virtaddr & (PTRTREESIZE-1))) | ||
121 | printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, | ||
122 | virtaddr); | ||
123 | #endif | ||
124 | pgd_dir = pgd_offset_k(virtaddr); | ||
125 | if (virtaddr && CPU_IS_020_OR_030) { | ||
126 | if (!(virtaddr & (ROOTTREESIZE-1)) && | ||
127 | size >= ROOTTREESIZE) { | ||
128 | #ifdef DEBUG | ||
129 | printk ("[very early term]"); | ||
130 | #endif | ||
131 | pgd_val(*pgd_dir) = physaddr; | ||
132 | size -= ROOTTREESIZE; | ||
133 | virtaddr += ROOTTREESIZE; | ||
134 | physaddr += ROOTTREESIZE; | ||
135 | continue; | ||
136 | } | ||
137 | } | ||
138 | if (!pgd_present(*pgd_dir)) { | ||
139 | pmd_dir = kernel_ptr_table(); | ||
140 | #ifdef DEBUG | ||
141 | printk ("[new pointer %p]", pmd_dir); | ||
142 | #endif | ||
143 | pgd_set(pgd_dir, pmd_dir); | ||
144 | } else | ||
145 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
146 | |||
147 | if (CPU_IS_020_OR_030) { | ||
148 | if (virtaddr) { | ||
149 | #ifdef DEBUG | ||
150 | printk ("[early term]"); | ||
151 | #endif | ||
152 | pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; | ||
153 | physaddr += PTRTREESIZE; | ||
154 | } else { | ||
155 | int i; | ||
156 | #ifdef DEBUG | ||
157 | printk ("[zero map]"); | ||
158 | #endif | ||
159 | zero_pgtable = kernel_ptr_table(); | ||
160 | pte_dir = (pte_t *)zero_pgtable; | ||
161 | pmd_dir->pmd[0] = virt_to_phys(pte_dir) | | ||
162 | _PAGE_TABLE | _PAGE_ACCESSED; | ||
163 | pte_val(*pte_dir++) = 0; | ||
164 | physaddr += PAGE_SIZE; | ||
165 | for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) | ||
166 | pte_val(*pte_dir++) = physaddr; | ||
167 | } | ||
168 | size -= PTRTREESIZE; | ||
169 | virtaddr += PTRTREESIZE; | ||
170 | } else { | ||
171 | if (!pmd_present(*pmd_dir)) { | ||
172 | #ifdef DEBUG | ||
173 | printk ("[new table]"); | ||
174 | #endif | ||
175 | pte_dir = kernel_page_table(); | ||
176 | pmd_set(pmd_dir, pte_dir); | ||
177 | } | ||
178 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
179 | |||
180 | if (virtaddr) { | ||
181 | if (!pte_present(*pte_dir)) | ||
182 | pte_val(*pte_dir) = physaddr; | ||
183 | } else | ||
184 | pte_val(*pte_dir) = 0; | ||
185 | size -= PAGE_SIZE; | ||
186 | virtaddr += PAGE_SIZE; | ||
187 | physaddr += PAGE_SIZE; | ||
188 | } | ||
189 | |||
190 | } | ||
191 | #ifdef DEBUG | ||
192 | printk("\n"); | ||
193 | #endif | ||
194 | |||
195 | return virtaddr; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * paging_init() continues the virtual memory environment setup which | ||
200 | * was begun by the code in arch/head.S. | ||
201 | */ | ||
202 | void __init paging_init(void) | ||
203 | { | ||
204 | int chunk; | ||
205 | unsigned long mem_avail = 0; | ||
206 | unsigned long zones_size[3] = { 0, }; | ||
207 | |||
208 | #ifdef DEBUG | ||
209 | { | ||
210 | extern unsigned long availmem; | ||
211 | printk ("start of paging_init (%p, %lx, %lx, %lx)\n", | ||
212 | kernel_pg_dir, availmem, start_mem, end_mem); | ||
213 | } | ||
214 | #endif | ||
215 | |||
216 | /* Fix the cache mode in the page descriptors for the 680[46]0. */ | ||
217 | if (CPU_IS_040_OR_060) { | ||
218 | int i; | ||
219 | #ifndef mm_cachebits | ||
220 | mm_cachebits = _PAGE_CACHE040; | ||
221 | #endif | ||
222 | for (i = 0; i < 16; i++) | ||
223 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * Map the physical memory available into the kernel virtual | ||
228 | * address space. It may allocate some memory for page | ||
229 | * tables and thus modify availmem. | ||
230 | */ | ||
231 | |||
232 | for (chunk = 0; chunk < m68k_num_memory; chunk++) { | ||
233 | mem_avail = map_chunk (m68k_memory[chunk].addr, | ||
234 | m68k_memory[chunk].size); | ||
235 | |||
236 | } | ||
237 | |||
238 | flush_tlb_all(); | ||
239 | #ifdef DEBUG | ||
240 | printk ("memory available is %ldKB\n", mem_avail >> 10); | ||
241 | printk ("start_mem is %#lx\nvirtual_end is %#lx\n", | ||
242 | start_mem, end_mem); | ||
243 | #endif | ||
244 | |||
245 | /* | ||
246 | * initialize the bad page table and bad page to point | ||
247 | * to a couple of allocated pages | ||
248 | */ | ||
249 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | ||
250 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
251 | |||
252 | /* | ||
253 | * Set up SFC/DFC registers | ||
254 | */ | ||
255 | set_fs(KERNEL_DS); | ||
256 | |||
257 | #ifdef DEBUG | ||
258 | printk ("before free_area_init\n"); | ||
259 | #endif | ||
260 | zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ? | ||
261 | (mach_max_dma_address+1) : (unsigned long)high_memory); | ||
262 | zones_size[1] = (unsigned long)high_memory - zones_size[0]; | ||
263 | |||
264 | zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT; | ||
265 | zones_size[1] >>= PAGE_SHIFT; | ||
266 | |||
267 | free_area_init(zones_size); | ||
268 | } | ||
269 | |||
270 | extern char __init_begin, __init_end; | ||
271 | |||
272 | void free_initmem(void) | ||
273 | { | ||
274 | unsigned long addr; | ||
275 | |||
276 | addr = (unsigned long)&__init_begin; | ||
277 | for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { | ||
278 | virt_to_page(addr)->flags &= ~(1 << PG_reserved); | ||
279 | set_page_count(virt_to_page(addr), 1); | ||
280 | free_page(addr); | ||
281 | totalram_pages++; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | |||
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c new file mode 100644 index 000000000000..7f0d86f3fe73 --- /dev/null +++ b/arch/m68k/mm/sun3kmap.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/sun3kmap.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Sam Creasey <sammy@sammy.net> | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | |||
16 | #include <asm/page.h> | ||
17 | #include <asm/pgtable.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/sun3mmu.h> | ||
20 | |||
21 | #undef SUN3_KMAP_DEBUG | ||
22 | |||
23 | #ifdef SUN3_KMAP_DEBUG | ||
24 | extern void print_pte_vaddr(unsigned long vaddr); | ||
25 | #endif | ||
26 | |||
27 | extern void mmu_emu_map_pmeg (int context, int vaddr); | ||
28 | |||
29 | static inline void do_page_mapin(unsigned long phys, unsigned long virt, | ||
30 | unsigned long type) | ||
31 | { | ||
32 | unsigned long pte; | ||
33 | pte_t ptep; | ||
34 | |||
35 | ptep = pfn_pte(phys >> PAGE_SHIFT, PAGE_KERNEL); | ||
36 | pte = pte_val(ptep); | ||
37 | pte |= type; | ||
38 | |||
39 | sun3_put_pte(virt, pte); | ||
40 | |||
41 | #ifdef SUN3_KMAP_DEBUG | ||
42 | print_pte_vaddr(virt); | ||
43 | #endif | ||
44 | |||
45 | } | ||
46 | |||
47 | static inline void do_pmeg_mapin(unsigned long phys, unsigned long virt, | ||
48 | unsigned long type, int pages) | ||
49 | { | ||
50 | |||
51 | if(sun3_get_segmap(virt & ~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG) | ||
52 | mmu_emu_map_pmeg(sun3_get_context(), virt); | ||
53 | |||
54 | while(pages) { | ||
55 | do_page_mapin(phys, virt, type); | ||
56 | phys += PAGE_SIZE; | ||
57 | virt += PAGE_SIZE; | ||
58 | pages--; | ||
59 | } | ||
60 | } | ||
61 | |||
62 | void *sun3_ioremap(unsigned long phys, unsigned long size, | ||
63 | unsigned long type) | ||
64 | { | ||
65 | struct vm_struct *area; | ||
66 | unsigned long offset, virt, ret; | ||
67 | int pages; | ||
68 | |||
69 | if(!size) | ||
70 | return NULL; | ||
71 | |||
72 | /* page align */ | ||
73 | offset = phys & (PAGE_SIZE-1); | ||
74 | phys &= ~(PAGE_SIZE-1); | ||
75 | |||
76 | size += offset; | ||
77 | size = PAGE_ALIGN(size); | ||
78 | if((area = get_vm_area(size, VM_IOREMAP)) == NULL) | ||
79 | return NULL; | ||
80 | |||
81 | #ifdef SUN3_KMAP_DEBUG | ||
82 | printk("ioremap: got virt %p size %lx(%lx)\n", | ||
83 | area->addr, size, area->size); | ||
84 | #endif | ||
85 | |||
86 | pages = size / PAGE_SIZE; | ||
87 | virt = (unsigned long)area->addr; | ||
88 | ret = virt + offset; | ||
89 | |||
90 | while(pages) { | ||
91 | int seg_pages; | ||
92 | |||
93 | seg_pages = (SUN3_PMEG_SIZE - (virt & SUN3_PMEG_MASK)) / PAGE_SIZE; | ||
94 | if(seg_pages > pages) | ||
95 | seg_pages = pages; | ||
96 | |||
97 | do_pmeg_mapin(phys, virt, type, seg_pages); | ||
98 | |||
99 | pages -= seg_pages; | ||
100 | phys += seg_pages * PAGE_SIZE; | ||
101 | virt += seg_pages * PAGE_SIZE; | ||
102 | } | ||
103 | |||
104 | return (void *)ret; | ||
105 | |||
106 | } | ||
107 | |||
108 | |||
109 | void *__ioremap(unsigned long phys, unsigned long size, int cache) | ||
110 | { | ||
111 | |||
112 | return sun3_ioremap(phys, size, SUN3_PAGE_TYPE_IO); | ||
113 | |||
114 | } | ||
115 | |||
116 | void iounmap(void *addr) | ||
117 | { | ||
118 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
119 | } | ||
120 | |||
121 | /* sun3_map_test(addr, val) -- Reads a byte from addr, storing to val, | ||
122 | * trapping the potential read fault. Returns 0 if the access faulted, | ||
123 | * 1 on success. | ||
124 | * | ||
125 | * This function is primarily used to check addresses on the VME bus. | ||
126 | * | ||
127 | * Mucking with the page fault handler seems a little hackish to me, but | ||
128 | * SunOS, NetBSD, and Mach all implemented this check in such a manner, | ||
129 | * so I figure we're allowed. | ||
130 | */ | ||
131 | int sun3_map_test(unsigned long addr, char *val) | ||
132 | { | ||
133 | int ret = 0; | ||
134 | |||
135 | __asm__ __volatile__ | ||
136 | (".globl _sun3_map_test_start\n" | ||
137 | "_sun3_map_test_start:\n" | ||
138 | "1: moveb (%2), (%0)\n" | ||
139 | " moveq #1, %1\n" | ||
140 | "2:\n" | ||
141 | ".section .fixup,\"ax\"\n" | ||
142 | ".even\n" | ||
143 | "3: moveq #0, %1\n" | ||
144 | " jmp 2b\n" | ||
145 | ".previous\n" | ||
146 | ".section __ex_table,\"a\"\n" | ||
147 | ".align 4\n" | ||
148 | ".long 1b,3b\n" | ||
149 | ".previous\n" | ||
150 | ".globl _sun3_map_test_end\n" | ||
151 | "_sun3_map_test_end:\n" | ||
152 | : "=a"(val), "=r"(ret) | ||
153 | : "a"(addr)); | ||
154 | |||
155 | return ret; | ||
156 | } | ||
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c new file mode 100644 index 000000000000..a47be196a47c --- /dev/null +++ b/arch/m68k/mm/sun3mmu.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/sun3mmu.c | ||
3 | * | ||
4 | * Implementations of mm routines specific to the sun3 MMU. | ||
5 | * | ||
6 | * Moved here 8/20/1999 Sam Creasey | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/signal.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/swap.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | |||
20 | #include <asm/setup.h> | ||
21 | #include <asm/uaccess.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/system.h> | ||
25 | #include <asm/machdep.h> | ||
26 | #include <asm/io.h> | ||
27 | |||
28 | extern void mmu_emu_init (unsigned long bootmem_end); | ||
29 | |||
30 | const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n"; | ||
31 | |||
32 | extern unsigned long num_pages; | ||
33 | |||
34 | void free_initmem(void) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | /* For the sun3 we try to follow the i386 paging_init() more closely */ | ||
39 | /* start_mem and end_mem have PAGE_OFFSET added already */ | ||
40 | /* now sets up tables using sun3 PTEs rather than i386 as before. --m */ | ||
41 | void __init paging_init(void) | ||
42 | { | ||
43 | pgd_t * pg_dir; | ||
44 | pte_t * pg_table; | ||
45 | int i; | ||
46 | unsigned long address; | ||
47 | unsigned long next_pgtable; | ||
48 | unsigned long bootmem_end; | ||
49 | unsigned long zones_size[3] = {0, 0, 0}; | ||
50 | unsigned long size; | ||
51 | |||
52 | |||
53 | #ifdef TEST_VERIFY_AREA | ||
54 | wp_works_ok = 0; | ||
55 | #endif | ||
56 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | ||
57 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
58 | |||
59 | address = PAGE_OFFSET; | ||
60 | pg_dir = swapper_pg_dir; | ||
61 | memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir)); | ||
62 | memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir)); | ||
63 | |||
64 | size = num_pages * sizeof(pte_t); | ||
65 | size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); | ||
66 | |||
67 | next_pgtable = (unsigned long)alloc_bootmem_pages(size); | ||
68 | bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; | ||
69 | |||
70 | /* Map whole memory from PAGE_OFFSET (0x0E000000) */ | ||
71 | pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; | ||
72 | |||
73 | while (address < (unsigned long)high_memory) { | ||
74 | pg_table = (pte_t *) __pa (next_pgtable); | ||
75 | next_pgtable += PTRS_PER_PTE * sizeof (pte_t); | ||
76 | pgd_val(*pg_dir) = (unsigned long) pg_table; | ||
77 | pg_dir++; | ||
78 | |||
79 | /* now change pg_table to kernel virtual addresses */ | ||
80 | pg_table = (pte_t *) __va ((unsigned long) pg_table); | ||
81 | for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { | ||
82 | pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); | ||
83 | if (address >= (unsigned long)high_memory) | ||
84 | pte_val (pte) = 0; | ||
85 | set_pte (pg_table, pte); | ||
86 | address += PAGE_SIZE; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | mmu_emu_init(bootmem_end); | ||
91 | |||
92 | current->mm = NULL; | ||
93 | |||
94 | /* memory sizing is a hack stolen from motorola.c.. hope it works for us */ | ||
95 | zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; | ||
96 | zones_size[1] = 0; | ||
97 | |||
98 | free_area_init(zones_size); | ||
99 | |||
100 | } | ||
101 | |||
102 | |||