diff options
Diffstat (limited to 'arch/m68knommu/mm')
-rw-r--r-- | arch/m68knommu/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/m68knommu/mm/fault.c | 57 | ||||
-rw-r--r-- | arch/m68knommu/mm/init.c | 231 | ||||
-rw-r--r-- | arch/m68knommu/mm/kmap.c | 56 | ||||
-rw-r--r-- | arch/m68knommu/mm/memory.c | 132 |
5 files changed, 481 insertions, 0 deletions
diff --git a/arch/m68knommu/mm/Makefile b/arch/m68knommu/mm/Makefile new file mode 100644 index 000000000000..fc91f254f51b --- /dev/null +++ b/arch/m68knommu/mm/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the linux m68knommu specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y += init.o fault.o memory.o kmap.o | ||
diff --git a/arch/m68knommu/mm/fault.c b/arch/m68knommu/mm/fault.c new file mode 100644 index 000000000000..6f6673cb5829 --- /dev/null +++ b/arch/m68knommu/mm/fault.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * linux/arch/m68knommu/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, | ||
5 | * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) | ||
6 | * | ||
7 | * Based on: | ||
8 | * | ||
9 | * linux/arch/m68k/mm/fault.c | ||
10 | * | ||
11 | * Copyright (C) 1995 Hamish Macdonald | ||
12 | */ | ||
13 | |||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | |||
19 | #include <asm/system.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | |||
22 | extern void die_if_kernel(char *, struct pt_regs *, long); | ||
23 | |||
24 | /* | ||
25 | * This routine handles page faults. It determines the problem, and | ||
26 | * then passes it off to one of the appropriate routines. | ||
27 | * | ||
28 | * error_code: | ||
29 | * bit 0 == 0 means no page found, 1 means protection fault | ||
30 | * bit 1 == 0 means read, 1 means write | ||
31 | * | ||
32 | * If this routine detects a bad access, it returns 1, otherwise it | ||
33 | * returns 0. | ||
34 | */ | ||
35 | asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, | ||
36 | unsigned long error_code) | ||
37 | { | ||
38 | #ifdef DEBUG | ||
39 | printk (KERN_DEBUG "regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n", | ||
40 | regs->sr, regs->pc, address, error_code); | ||
41 | #endif | ||
42 | |||
43 | /* | ||
44 | * Oops. The kernel tried to access some bad page. We'll have to | ||
45 | * terminate things with extreme prejudice. | ||
46 | */ | ||
47 | if ((unsigned long) address < PAGE_SIZE) { | ||
48 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
49 | } else | ||
50 | printk(KERN_ALERT "Unable to handle kernel access"); | ||
51 | printk(KERN_ALERT " at virtual address %08lx\n",address); | ||
52 | die_if_kernel("Oops", regs, error_code); | ||
53 | do_exit(SIGKILL); | ||
54 | |||
55 | return 1; | ||
56 | } | ||
57 | |||
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c new file mode 100644 index 000000000000..89f0b554ffb7 --- /dev/null +++ b/arch/m68knommu/mm/init.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * linux/arch/m68knommu/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, | ||
5 | * Kenneth Albanowski <kjahds@kjahds.com>, | ||
6 | * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) | ||
7 | * | ||
8 | * Based on: | ||
9 | * | ||
10 | * linux/arch/m68k/mm/init.c | ||
11 | * | ||
12 | * Copyright (C) 1995 Hamish Macdonald | ||
13 | * | ||
14 | * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com) | ||
15 | * DEC/2000 -- linux 2.4 support <davidm@snapgear.com> | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/signal.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/mman.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/swap.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/pagemap.h> | ||
32 | #include <linux/bootmem.h> | ||
33 | #include <linux/slab.h> | ||
34 | |||
35 | #include <asm/setup.h> | ||
36 | #include <asm/segment.h> | ||
37 | #include <asm/page.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/system.h> | ||
40 | #include <asm/machdep.h> | ||
41 | |||
42 | #undef DEBUG | ||
43 | |||
44 | extern void die_if_kernel(char *,struct pt_regs *,long); | ||
45 | extern void free_initmem(void); | ||
46 | |||
47 | /* | ||
48 | * BAD_PAGE is the page that is used for page faults when linux | ||
49 | * is out-of-memory. Older versions of linux just did a | ||
50 | * do_exit(), but using this instead means there is less risk | ||
51 | * for a process dying in kernel mode, possibly leaving a inode | ||
52 | * unused etc.. | ||
53 | * | ||
54 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | ||
55 | * to point to BAD_PAGE entries. | ||
56 | * | ||
57 | * ZERO_PAGE is a special page that is used for zero-initialized | ||
58 | * data and COW. | ||
59 | */ | ||
60 | static unsigned long empty_bad_page_table; | ||
61 | |||
62 | static unsigned long empty_bad_page; | ||
63 | |||
64 | unsigned long empty_zero_page; | ||
65 | |||
66 | extern unsigned long rom_length; | ||
67 | |||
68 | void show_mem(void) | ||
69 | { | ||
70 | unsigned long i; | ||
71 | int free = 0, total = 0, reserved = 0, shared = 0; | ||
72 | int cached = 0; | ||
73 | |||
74 | printk(KERN_INFO "\nMem-info:\n"); | ||
75 | show_free_areas(); | ||
76 | i = max_mapnr; | ||
77 | while (i-- > 0) { | ||
78 | total++; | ||
79 | if (PageReserved(mem_map+i)) | ||
80 | reserved++; | ||
81 | else if (PageSwapCache(mem_map+i)) | ||
82 | cached++; | ||
83 | else if (!page_count(mem_map+i)) | ||
84 | free++; | ||
85 | else | ||
86 | shared += page_count(mem_map+i) - 1; | ||
87 | } | ||
88 | printk(KERN_INFO "%d pages of RAM\n",total); | ||
89 | printk(KERN_INFO "%d free pages\n",free); | ||
90 | printk(KERN_INFO "%d reserved pages\n",reserved); | ||
91 | printk(KERN_INFO "%d pages shared\n",shared); | ||
92 | printk(KERN_INFO "%d pages swap cached\n",cached); | ||
93 | } | ||
94 | |||
95 | extern unsigned long memory_start; | ||
96 | extern unsigned long memory_end; | ||
97 | |||
98 | /* | ||
99 | * paging_init() continues the virtual memory environment setup which | ||
100 | * was begun by the code in arch/head.S. | ||
101 | * The parameters are pointers to where to stick the starting and ending | ||
102 | * addresses of available kernel virtual memory. | ||
103 | */ | ||
104 | void paging_init(void) | ||
105 | { | ||
106 | /* | ||
107 | * Make sure start_mem is page aligned, otherwise bootmem and | ||
108 | * page_alloc get different views of the world. | ||
109 | */ | ||
110 | #ifdef DEBUG | ||
111 | unsigned long start_mem = PAGE_ALIGN(memory_start); | ||
112 | #endif | ||
113 | unsigned long end_mem = memory_end & PAGE_MASK; | ||
114 | |||
115 | #ifdef DEBUG | ||
116 | printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n", | ||
117 | start_mem, end_mem); | ||
118 | #endif | ||
119 | |||
120 | /* | ||
121 | * Initialize the bad page table and bad page to point | ||
122 | * to a couple of allocated pages. | ||
123 | */ | ||
124 | empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | ||
125 | empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | ||
126 | empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | ||
127 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | ||
128 | |||
129 | /* | ||
130 | * Set up SFC/DFC registers (user data space). | ||
131 | */ | ||
132 | set_fs (USER_DS); | ||
133 | |||
134 | #ifdef DEBUG | ||
135 | printk (KERN_DEBUG "before free_area_init\n"); | ||
136 | |||
137 | printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", | ||
138 | start_mem, end_mem); | ||
139 | #endif | ||
140 | |||
141 | { | ||
142 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
143 | |||
144 | zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; | ||
145 | zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; | ||
146 | #ifdef CONFIG_HIGHMEM | ||
147 | zones_size[ZONE_HIGHMEM] = 0; | ||
148 | #endif | ||
149 | free_area_init(zones_size); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | void mem_init(void) | ||
154 | { | ||
155 | int codek = 0, datak = 0, initk = 0; | ||
156 | unsigned long tmp; | ||
157 | extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end; | ||
158 | extern unsigned int _ramend, _rambase; | ||
159 | unsigned long len = _ramend - _rambase; | ||
160 | unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ | ||
161 | unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */ | ||
162 | |||
163 | #ifdef DEBUG | ||
164 | printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem); | ||
165 | #endif | ||
166 | |||
167 | end_mem &= PAGE_MASK; | ||
168 | high_memory = (void *) end_mem; | ||
169 | |||
170 | start_mem = PAGE_ALIGN(start_mem); | ||
171 | max_mapnr = num_physpages = (((unsigned long) high_memory) - PAGE_OFFSET) >> PAGE_SHIFT; | ||
172 | |||
173 | /* this will put all memory onto the freelists */ | ||
174 | totalram_pages = free_all_bootmem(); | ||
175 | |||
176 | codek = (&_etext - &_stext) >> 10; | ||
177 | datak = (&_ebss - &_sdata) >> 10; | ||
178 | initk = (&__init_begin - &__init_end) >> 10; | ||
179 | |||
180 | tmp = nr_free_pages() << PAGE_SHIFT; | ||
181 | printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", | ||
182 | tmp >> 10, | ||
183 | len >> 10, | ||
184 | (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, | ||
185 | rom_length >> 10, | ||
186 | codek, | ||
187 | datak | ||
188 | ); | ||
189 | } | ||
190 | |||
191 | |||
192 | #ifdef CONFIG_BLK_DEV_INITRD | ||
193 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
194 | { | ||
195 | int pages = 0; | ||
196 | for (; start < end; start += PAGE_SIZE) { | ||
197 | ClearPageReserved(virt_to_page(start)); | ||
198 | set_page_count(virt_to_page(start), 1); | ||
199 | free_page(start); | ||
200 | totalram_pages++; | ||
201 | pages++; | ||
202 | } | ||
203 | printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); | ||
204 | } | ||
205 | #endif | ||
206 | |||
207 | void | ||
208 | free_initmem() | ||
209 | { | ||
210 | #ifdef CONFIG_RAMKERNEL | ||
211 | unsigned long addr; | ||
212 | extern char __init_begin, __init_end; | ||
213 | /* | ||
214 | * The following code should be cool even if these sections | ||
215 | * are not page aligned. | ||
216 | */ | ||
217 | addr = PAGE_ALIGN((unsigned long)(&__init_begin)); | ||
218 | /* next to check that the page we free is not a partial page */ | ||
219 | for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { | ||
220 | ClearPageReserved(virt_to_page(addr)); | ||
221 | set_page_count(virt_to_page(addr), 1); | ||
222 | free_page(addr); | ||
223 | totalram_pages++; | ||
224 | } | ||
225 | printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", | ||
226 | (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, | ||
227 | (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), | ||
228 | (int)(addr - PAGE_SIZE)); | ||
229 | #endif | ||
230 | } | ||
231 | |||
diff --git a/arch/m68knommu/mm/kmap.c b/arch/m68knommu/mm/kmap.c new file mode 100644 index 000000000000..04213e1c1e57 --- /dev/null +++ b/arch/m68knommu/mm/kmap.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * linux/arch/m68knommu/mm/kmap.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Lineo, <davidm@snapgear.com> | ||
5 | * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | |||
16 | #include <asm/setup.h> | ||
17 | #include <asm/segment.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <asm/system.h> | ||
22 | |||
23 | #undef DEBUG | ||
24 | |||
25 | /* | ||
26 | * Map some physical address range into the kernel address space. | ||
27 | */ | ||
28 | void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | ||
29 | { | ||
30 | return (void *)physaddr; | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Unmap a ioremap()ed region again. | ||
35 | */ | ||
36 | void iounmap(void *addr) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * __iounmap unmaps nearly everything, so be careful | ||
42 | * it doesn't free currently pointer/page tables anymore but it | ||
43 | * wans't used anyway and might be added later. | ||
44 | */ | ||
45 | void __iounmap(void *addr, unsigned long size) | ||
46 | { | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Set new cache mode for some kernel address space. | ||
51 | * The caller must push data for that range itself, if such data may already | ||
52 | * be in the cache. | ||
53 | */ | ||
54 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | ||
55 | { | ||
56 | } | ||
diff --git a/arch/m68knommu/mm/memory.c b/arch/m68knommu/mm/memory.c new file mode 100644 index 000000000000..0eef72915e61 --- /dev/null +++ b/arch/m68knommu/mm/memory.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * linux/arch/m68knommu/mm/memory.c | ||
3 | * | ||
4 | * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>, | ||
5 | * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) | ||
6 | * | ||
7 | * Based on: | ||
8 | * | ||
9 | * linux/arch/m68k/mm/memory.c | ||
10 | * | ||
11 | * Copyright (C) 1995 Hamish Macdonald | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | #include <asm/setup.h> | ||
22 | #include <asm/segment.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/traps.h> | ||
27 | #include <asm/io.h> | ||
28 | |||
29 | /* | ||
30 | * cache_clear() semantics: Clear any cache entries for the area in question, | ||
31 | * without writing back dirty entries first. This is useful if the data will | ||
32 | * be overwritten anyway, e.g. by DMA to memory. The range is defined by a | ||
33 | * _physical_ address. | ||
34 | */ | ||
35 | |||
36 | void cache_clear (unsigned long paddr, int len) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | |||
41 | /* | ||
42 | * Define cache invalidate functions. The ColdFire 5407 is really | ||
43 | * the only processor that needs to do some work here. Anything | ||
44 | * that has separate data and instruction caches will be a problem. | ||
45 | */ | ||
46 | #ifdef CONFIG_M5407 | ||
47 | |||
48 | static __inline__ void cache_invalidate_lines(unsigned long paddr, int len) | ||
49 | { | ||
50 | unsigned long sset, eset; | ||
51 | |||
52 | sset = (paddr & 0x00000ff0); | ||
53 | eset = ((paddr + len) & 0x0000ff0) + 0x10; | ||
54 | |||
55 | __asm__ __volatile__ ( | ||
56 | "nop\n\t" | ||
57 | "clrl %%d0\n\t" | ||
58 | "1:\n\t" | ||
59 | "movel %0,%%a0\n\t" | ||
60 | "addl %%d0,%%a0\n\t" | ||
61 | "2:\n\t" | ||
62 | ".word 0xf4e8\n\t" | ||
63 | "addl #0x10,%%a0\n\t" | ||
64 | "cmpl %1,%%a0\n\t" | ||
65 | "blt 2b\n\t" | ||
66 | "addql #1,%%d0\n\t" | ||
67 | "cmpil #4,%%d0\n\t" | ||
68 | "bne 1b" | ||
69 | : : "a" (sset), "a" (eset) : "d0", "a0" ); | ||
70 | } | ||
71 | |||
72 | #else | ||
73 | #define cache_invalidate_lines(a,b) | ||
74 | #endif | ||
75 | |||
76 | |||
77 | /* | ||
78 | * cache_push() semantics: Write back any dirty cache data in the given area, | ||
79 | * and invalidate the range in the instruction cache. It needs not (but may) | ||
80 | * invalidate those entries also in the data cache. The range is defined by a | ||
81 | * _physical_ address. | ||
82 | */ | ||
83 | |||
84 | void cache_push (unsigned long paddr, int len) | ||
85 | { | ||
86 | cache_invalidate_lines(paddr, len); | ||
87 | } | ||
88 | |||
89 | |||
90 | /* | ||
91 | * cache_push_v() semantics: Write back any dirty cache data in the given | ||
92 | * area, and invalidate those entries at least in the instruction cache. This | ||
93 | * is intended to be used after data has been written that can be executed as | ||
94 | * code later. The range is defined by a _user_mode_ _virtual_ address (or, | ||
95 | * more exactly, the space is defined by the %sfc/%dfc register.) | ||
96 | */ | ||
97 | |||
98 | void cache_push_v (unsigned long vaddr, int len) | ||
99 | { | ||
100 | cache_invalidate_lines(vaddr, len); | ||
101 | } | ||
102 | |||
103 | /* Map some physical address range into the kernel address space. The | ||
104 | * code is copied and adapted from map_chunk(). | ||
105 | */ | ||
106 | |||
107 | unsigned long kernel_map(unsigned long paddr, unsigned long size, | ||
108 | int nocacheflag, unsigned long *memavailp ) | ||
109 | { | ||
110 | return paddr; | ||
111 | } | ||
112 | |||
113 | |||
114 | int is_in_rom(unsigned long addr) | ||
115 | { | ||
116 | extern unsigned long _ramstart, _ramend; | ||
117 | |||
118 | /* | ||
119 | * What we are really trying to do is determine if addr is | ||
120 | * in an allocated kernel memory region. If not then assume | ||
121 | * we cannot free it or otherwise de-allocate it. Ideally | ||
122 | * we could restrict this to really being in a ROM or flash, | ||
123 | * but that would need to be done on a board by board basis, | ||
124 | * not globally. | ||
125 | */ | ||
126 | if ((addr < _ramstart) || (addr >= _ramend)) | ||
127 | return(1); | ||
128 | |||
129 | /* Default case, not in ROM */ | ||
130 | return(0); | ||
131 | } | ||
132 | |||