aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/Makefile13
-rw-r--r--arch/m68k/mm/Makefile_mm8
-rw-r--r--arch/m68k/mm/Makefile_no5
-rw-r--r--arch/m68k/mm/init.c153
-rw-r--r--arch/m68k/mm/init_mm.c150
-rw-r--r--arch/m68k/mm/init_no.c193
-rw-r--r--arch/m68k/mm/kmap.c368
-rw-r--r--arch/m68k/mm/kmap_mm.c367
-rw-r--r--arch/m68k/mm/kmap_no.c45
9 files changed, 780 insertions, 522 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 5eaa43c4cb3c..b60270e4954b 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -1,8 +1,5 @@
1# 1ifdef CONFIG_MMU
2# Makefile for the linux m68k-specific parts of the memory manager. 2include arch/m68k/mm/Makefile_mm
3# 3else
4 4include arch/m68k/mm/Makefile_no
5obj-y := cache.o init.o fault.o hwtest.o 5endif
6
7obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
8obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_mm b/arch/m68k/mm/Makefile_mm
new file mode 100644
index 000000000000..5eaa43c4cb3c
--- /dev/null
+++ b/arch/m68k/mm/Makefile_mm
@@ -0,0 +1,8 @@
1#
2# Makefile for the linux m68k-specific parts of the memory manager.
3#
4
5obj-y := cache.o init.o fault.o hwtest.o
6
7obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
8obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/Makefile_no b/arch/m68k/mm/Makefile_no
new file mode 100644
index 000000000000..b54ab6b4b523
--- /dev/null
+++ b/arch/m68k/mm/Makefile_no
@@ -0,0 +1,5 @@
1#
2# Makefile for the linux m68knommu specific parts of the memory manager.
3#
4
5obj-y += init.o kmap.o
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8bc842554e5b..27b5ce089a34 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -1,150 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * linux/arch/m68k/mm/init.c 2#include "init_mm.c"
3 * 3#else
4 * Copyright (C) 1995 Hamish Macdonald 4#include "init_no.c"
5 *
6 * Contains common initialization routines, specific init code moved
7 * to motorola.c and sun3mmu.c
8 */
9
10#include <linux/module.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/gfp.h>
21
22#include <asm/setup.h>
23#include <asm/uaccess.h>
24#include <asm/page.h>
25#include <asm/pgalloc.h>
26#include <asm/system.h>
27#include <asm/machdep.h>
28#include <asm/io.h>
29#ifdef CONFIG_ATARI
30#include <asm/atari_stram.h>
31#endif
32#include <asm/sections.h>
33#include <asm/tlb.h>
34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37pg_data_t pg_data_map[MAX_NUMNODES];
38EXPORT_SYMBOL(pg_data_map);
39
40int m68k_virt_to_node_shift;
41
42#ifndef CONFIG_SINGLE_MEMORY_CHUNK
43pg_data_t *pg_data_table[65];
44EXPORT_SYMBOL(pg_data_table);
45#endif
46
47void __init m68k_setup_node(int node)
48{
49#ifndef CONFIG_SINGLE_MEMORY_CHUNK
50 struct mem_info *info = m68k_memory + node;
51 int i, end;
52
53 i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
54 end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
55 for (; i <= end; i++) {
56 if (pg_data_table[i])
57 printk("overlap at %u for chunk %u\n", i, node);
58 pg_data_table[i] = pg_data_map + node;
59 }
60#endif
61 pg_data_map[node].bdata = bootmem_node_data + node;
62 node_set_online(node);
63}
64
65
66/*
67 * ZERO_PAGE is a special page that is used for zero-initialized
68 * data and COW.
69 */
70
71void *empty_zero_page;
72EXPORT_SYMBOL(empty_zero_page);
73
74extern void init_pointer_table(unsigned long ptable);
75
76/* References to section boundaries */
77
78extern pmd_t *zero_pgtable;
79
80void __init mem_init(void)
81{
82 pg_data_t *pgdat;
83 int codepages = 0;
84 int datapages = 0;
85 int initpages = 0;
86 int i;
87
88#ifdef CONFIG_ATARI
89 if (MACH_IS_ATARI)
90 atari_stram_mem_init_hook();
91#endif
92
93 /* this will put all memory onto the freelists */
94 totalram_pages = num_physpages = 0;
95 for_each_online_pgdat(pgdat) {
96 num_physpages += pgdat->node_present_pages;
97
98 totalram_pages += free_all_bootmem_node(pgdat);
99 for (i = 0; i < pgdat->node_spanned_pages; i++) {
100 struct page *page = pgdat->node_mem_map + i;
101 char *addr = page_to_virt(page);
102
103 if (!PageReserved(page))
104 continue;
105 if (addr >= _text &&
106 addr < _etext)
107 codepages++;
108 else if (addr >= __init_begin &&
109 addr < __init_end)
110 initpages++;
111 else
112 datapages++;
113 }
114 }
115
116#ifndef CONFIG_SUN3
117 /* insert pointer tables allocated so far into the tablelist */
118 init_pointer_table((unsigned long)kernel_pg_dir);
119 for (i = 0; i < PTRS_PER_PGD; i++) {
120 if (pgd_present(kernel_pg_dir[i]))
121 init_pointer_table(__pgd_page(kernel_pg_dir[i]));
122 }
123
124 /* insert also pointer table that we used to unmap the zero page */
125 if (zero_pgtable)
126 init_pointer_table((unsigned long)zero_pgtable);
127#endif
128
129 printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
130 nr_free_pages() << (PAGE_SHIFT-10),
131 totalram_pages << (PAGE_SHIFT-10),
132 codepages << (PAGE_SHIFT-10),
133 datapages << (PAGE_SHIFT-10),
134 initpages << (PAGE_SHIFT-10));
135}
136
137#ifdef CONFIG_BLK_DEV_INITRD
138void free_initrd_mem(unsigned long start, unsigned long end)
139{
140 int pages = 0;
141 for (; start < end; start += PAGE_SIZE) {
142 ClearPageReserved(virt_to_page(start));
143 init_page_count(virt_to_page(start));
144 free_page(start);
145 totalram_pages++;
146 pages++;
147 }
148 printk ("Freeing initrd memory: %dk freed\n", pages);
149}
150#endif 5#endif
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
new file mode 100644
index 000000000000..8bc842554e5b
--- /dev/null
+++ b/arch/m68k/mm/init_mm.c
@@ -0,0 +1,150 @@
1/*
2 * linux/arch/m68k/mm/init.c
3 *
4 * Copyright (C) 1995 Hamish Macdonald
5 *
6 * Contains common initialization routines, specific init code moved
7 * to motorola.c and sun3mmu.c
8 */
9
10#include <linux/module.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/gfp.h>
21
22#include <asm/setup.h>
23#include <asm/uaccess.h>
24#include <asm/page.h>
25#include <asm/pgalloc.h>
26#include <asm/system.h>
27#include <asm/machdep.h>
28#include <asm/io.h>
29#ifdef CONFIG_ATARI
30#include <asm/atari_stram.h>
31#endif
32#include <asm/sections.h>
33#include <asm/tlb.h>
34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37pg_data_t pg_data_map[MAX_NUMNODES];
38EXPORT_SYMBOL(pg_data_map);
39
40int m68k_virt_to_node_shift;
41
42#ifndef CONFIG_SINGLE_MEMORY_CHUNK
43pg_data_t *pg_data_table[65];
44EXPORT_SYMBOL(pg_data_table);
45#endif
46
47void __init m68k_setup_node(int node)
48{
49#ifndef CONFIG_SINGLE_MEMORY_CHUNK
50 struct mem_info *info = m68k_memory + node;
51 int i, end;
52
53 i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
54 end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
55 for (; i <= end; i++) {
56 if (pg_data_table[i])
57 printk("overlap at %u for chunk %u\n", i, node);
58 pg_data_table[i] = pg_data_map + node;
59 }
60#endif
61 pg_data_map[node].bdata = bootmem_node_data + node;
62 node_set_online(node);
63}
64
65
66/*
67 * ZERO_PAGE is a special page that is used for zero-initialized
68 * data and COW.
69 */
70
71void *empty_zero_page;
72EXPORT_SYMBOL(empty_zero_page);
73
74extern void init_pointer_table(unsigned long ptable);
75
76/* References to section boundaries */
77
78extern pmd_t *zero_pgtable;
79
80void __init mem_init(void)
81{
82 pg_data_t *pgdat;
83 int codepages = 0;
84 int datapages = 0;
85 int initpages = 0;
86 int i;
87
88#ifdef CONFIG_ATARI
89 if (MACH_IS_ATARI)
90 atari_stram_mem_init_hook();
91#endif
92
93 /* this will put all memory onto the freelists */
94 totalram_pages = num_physpages = 0;
95 for_each_online_pgdat(pgdat) {
96 num_physpages += pgdat->node_present_pages;
97
98 totalram_pages += free_all_bootmem_node(pgdat);
99 for (i = 0; i < pgdat->node_spanned_pages; i++) {
100 struct page *page = pgdat->node_mem_map + i;
101 char *addr = page_to_virt(page);
102
103 if (!PageReserved(page))
104 continue;
105 if (addr >= _text &&
106 addr < _etext)
107 codepages++;
108 else if (addr >= __init_begin &&
109 addr < __init_end)
110 initpages++;
111 else
112 datapages++;
113 }
114 }
115
116#ifndef CONFIG_SUN3
117 /* insert pointer tables allocated so far into the tablelist */
118 init_pointer_table((unsigned long)kernel_pg_dir);
119 for (i = 0; i < PTRS_PER_PGD; i++) {
120 if (pgd_present(kernel_pg_dir[i]))
121 init_pointer_table(__pgd_page(kernel_pg_dir[i]));
122 }
123
124 /* insert also pointer table that we used to unmap the zero page */
125 if (zero_pgtable)
126 init_pointer_table((unsigned long)zero_pgtable);
127#endif
128
129 printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
130 nr_free_pages() << (PAGE_SHIFT-10),
131 totalram_pages << (PAGE_SHIFT-10),
132 codepages << (PAGE_SHIFT-10),
133 datapages << (PAGE_SHIFT-10),
134 initpages << (PAGE_SHIFT-10));
135}
136
137#ifdef CONFIG_BLK_DEV_INITRD
138void free_initrd_mem(unsigned long start, unsigned long end)
139{
140 int pages = 0;
141 for (; start < end; start += PAGE_SIZE) {
142 ClearPageReserved(virt_to_page(start));
143 init_page_count(virt_to_page(start));
144 free_page(start);
145 totalram_pages++;
146 pages++;
147 }
148 printk ("Freeing initrd memory: %dk freed\n", pages);
149}
150#endif
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
new file mode 100644
index 000000000000..8a6653f56bd8
--- /dev/null
+++ b/arch/m68k/mm/init_no.c
@@ -0,0 +1,193 @@
1/*
2 * linux/arch/m68knommu/mm/init.c
3 *
4 * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
5 * Kenneth Albanowski <kjahds@kjahds.com>,
6 * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
7 *
8 * Based on:
9 *
10 * linux/arch/m68k/mm/init.c
11 *
12 * Copyright (C) 1995 Hamish Macdonald
13 *
14 * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
15 * DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
16 */
17
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/init.h>
29#include <linux/highmem.h>
30#include <linux/pagemap.h>
31#include <linux/bootmem.h>
32#include <linux/gfp.h>
33
34#include <asm/setup.h>
35#include <asm/segment.h>
36#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/system.h>
39#include <asm/machdep.h>
40
41#undef DEBUG
42
43extern void die_if_kernel(char *,struct pt_regs *,long);
44extern void free_initmem(void);
45
46/*
47 * BAD_PAGE is the page that is used for page faults when linux
48 * is out-of-memory. Older versions of linux just did a
49 * do_exit(), but using this instead means there is less risk
50 * for a process dying in kernel mode, possibly leaving a inode
51 * unused etc..
52 *
53 * BAD_PAGETABLE is the accompanying page-table: it is initialized
54 * to point to BAD_PAGE entries.
55 *
56 * ZERO_PAGE is a special page that is used for zero-initialized
57 * data and COW.
58 */
59static unsigned long empty_bad_page_table;
60
61static unsigned long empty_bad_page;
62
63unsigned long empty_zero_page;
64
65extern unsigned long memory_start;
66extern unsigned long memory_end;
67
68/*
69 * paging_init() continues the virtual memory environment setup which
70 * was begun by the code in arch/head.S.
71 * The parameters are pointers to where to stick the starting and ending
72 * addresses of available kernel virtual memory.
73 */
74void __init paging_init(void)
75{
76 /*
77 * Make sure start_mem is page aligned, otherwise bootmem and
78 * page_alloc get different views of the world.
79 */
80#ifdef DEBUG
81 unsigned long start_mem = PAGE_ALIGN(memory_start);
82#endif
83 unsigned long end_mem = memory_end & PAGE_MASK;
84
85#ifdef DEBUG
86 printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n",
87 start_mem, end_mem);
88#endif
89
90 /*
91 * Initialize the bad page table and bad page to point
92 * to a couple of allocated pages.
93 */
94 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
95 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
96 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
97 memset((void *)empty_zero_page, 0, PAGE_SIZE);
98
99 /*
100 * Set up SFC/DFC registers (user data space).
101 */
102 set_fs (USER_DS);
103
104#ifdef DEBUG
105 printk (KERN_DEBUG "before free_area_init\n");
106
107 printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
108 start_mem, end_mem);
109#endif
110
111 {
112 unsigned long zones_size[MAX_NR_ZONES] = {0, };
113
114 zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
115 free_area_init(zones_size);
116 }
117}
118
119void __init mem_init(void)
120{
121 int codek = 0, datak = 0, initk = 0;
122 unsigned long tmp;
123 extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
124 extern unsigned int _ramend, _rambase;
125 unsigned long len = _ramend - _rambase;
126 unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
127 unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
128
129 pr_debug("Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
130
131 end_mem &= PAGE_MASK;
132 high_memory = (void *) end_mem;
133
134 start_mem = PAGE_ALIGN(start_mem);
135 max_mapnr = num_physpages = (((unsigned long) high_memory) - PAGE_OFFSET) >> PAGE_SHIFT;
136
137 /* this will put all memory onto the freelists */
138 totalram_pages = free_all_bootmem();
139
140 codek = (&_etext - &_stext) >> 10;
141 datak = (&_ebss - &_sdata) >> 10;
142 initk = (&__init_begin - &__init_end) >> 10;
143
144 tmp = nr_free_pages() << PAGE_SHIFT;
145 printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n",
146 tmp >> 10,
147 len >> 10,
148 codek,
149 datak
150 );
151}
152
153
154#ifdef CONFIG_BLK_DEV_INITRD
155void free_initrd_mem(unsigned long start, unsigned long end)
156{
157 int pages = 0;
158 for (; start < end; start += PAGE_SIZE) {
159 ClearPageReserved(virt_to_page(start));
160 init_page_count(virt_to_page(start));
161 free_page(start);
162 totalram_pages++;
163 pages++;
164 }
165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
166}
167#endif
168
169void
170free_initmem()
171{
172#ifdef CONFIG_RAMKERNEL
173 unsigned long addr;
174 extern char __init_begin, __init_end;
175 /*
176 * The following code should be cool even if these sections
177 * are not page aligned.
178 */
179 addr = PAGE_ALIGN((unsigned long)(&__init_begin));
180 /* next to check that the page we free is not a partial page */
181 for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
182 ClearPageReserved(virt_to_page(addr));
183 init_page_count(virt_to_page(addr));
184 free_page(addr);
185 totalram_pages++;
186 }
187 printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
188 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
189 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
190 (int)(addr - PAGE_SIZE));
191#endif
192}
193
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 69345849454b..a373d136b2b2 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -1,367 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * linux/arch/m68k/mm/kmap.c 2#include "kmap_mm.c"
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else 3#else
52 4#include "kmap_no.c"
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
99#endif 5#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_mm.c b/arch/m68k/mm/kmap_mm.c
new file mode 100644
index 000000000000..69345849454b
--- /dev/null
+++ b/arch/m68k/mm/kmap_mm.c
@@ -0,0 +1,367 @@
1/*
2 * linux/arch/m68k/mm/kmap.c
3 *
4 * Copyright (C) 1997 Roman Hodek
5 *
6 * 10/01/99 cleaned up the code and changing to the same interface
7 * used by other architectures /Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE (256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42 return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48 vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE (256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59 unsigned long addr;
60 struct vm_struct **p, *tmp, *area;
61
62 area = kmalloc(sizeof(*area), GFP_KERNEL);
63 if (!area)
64 return NULL;
65 addr = KMAP_START;
66 for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 if (size + addr < (unsigned long)tmp->addr)
68 break;
69 if (addr > KMAP_END-size) {
70 kfree(area);
71 return NULL;
72 }
73 addr = tmp->size + (unsigned long)tmp->addr;
74 }
75 area->addr = (void *)addr;
76 area->size = size + IO_SIZE;
77 area->next = *p;
78 *p = area;
79 return area;
80}
81
82static inline void free_io_area(void *addr)
83{
84 struct vm_struct **p, *tmp;
85
86 if (!addr)
87 return;
88 addr = (void *)((unsigned long)addr & -IO_SIZE);
89 for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 if (tmp->addr == addr) {
91 *p = tmp->next;
92 __iounmap(tmp->addr, tmp->size);
93 kfree(tmp);
94 return;
95 }
96 }
97}
98
99#endif
100
101/*
102 * Map some physical address range into the kernel address space.
103 */
104/* Rewritten by Andreas Schwab to remove all races. */
105
106void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
107{
108 struct vm_struct *area;
109 unsigned long virtaddr, retaddr;
110 long offset;
111 pgd_t *pgd_dir;
112 pmd_t *pmd_dir;
113 pte_t *pte_dir;
114
115 /*
116 * Don't allow mappings that wrap..
117 */
118 if (!size || physaddr > (unsigned long)(-size))
119 return NULL;
120
121#ifdef CONFIG_AMIGA
122 if (MACH_IS_AMIGA) {
123 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
124 && (cacheflag == IOMAP_NOCACHE_SER))
125 return (void __iomem *)physaddr;
126 }
127#endif
128
129#ifdef DEBUG
130 printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
131#endif
132 /*
133 * Mappings have to be aligned
134 */
135 offset = physaddr & (IO_SIZE - 1);
136 physaddr &= -IO_SIZE;
137 size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
138
139 /*
140 * Ok, go for it..
141 */
142 area = get_io_area(size);
143 if (!area)
144 return NULL;
145
146 virtaddr = (unsigned long)area->addr;
147 retaddr = virtaddr + offset;
148#ifdef DEBUG
149 printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
150#endif
151
152 /*
153 * add cache and table flags to physical address
154 */
155 if (CPU_IS_040_OR_060) {
156 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
157 _PAGE_ACCESSED | _PAGE_DIRTY);
158 switch (cacheflag) {
159 case IOMAP_FULL_CACHING:
160 physaddr |= _PAGE_CACHE040;
161 break;
162 case IOMAP_NOCACHE_SER:
163 default:
164 physaddr |= _PAGE_NOCACHE_S;
165 break;
166 case IOMAP_NOCACHE_NONSER:
167 physaddr |= _PAGE_NOCACHE;
168 break;
169 case IOMAP_WRITETHROUGH:
170 physaddr |= _PAGE_CACHE040W;
171 break;
172 }
173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
175 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER:
178 default:
179 physaddr |= _PAGE_NOCACHE030;
180 break;
181 case IOMAP_FULL_CACHING:
182 case IOMAP_WRITETHROUGH:
183 break;
184 }
185 }
186
187 while ((long)size > 0) {
188#ifdef DEBUG
189 if (!(virtaddr & (PTRTREESIZE-1)))
190 printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
191#endif
192 pgd_dir = pgd_offset_k(virtaddr);
193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
194 if (!pmd_dir) {
195 printk("ioremap: no mem for pmd_dir\n");
196 return NULL;
197 }
198
199 if (CPU_IS_020_OR_030) {
200 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
201 physaddr += PTRTREESIZE;
202 virtaddr += PTRTREESIZE;
203 size -= PTRTREESIZE;
204 } else {
205 pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
206 if (!pte_dir) {
207 printk("ioremap: no mem for pte_dir\n");
208 return NULL;
209 }
210
211 pte_val(*pte_dir) = physaddr;
212 virtaddr += PAGE_SIZE;
213 physaddr += PAGE_SIZE;
214 size -= PAGE_SIZE;
215 }
216 }
217#ifdef DEBUG
218 printk("\n");
219#endif
220 flush_tlb_all();
221
222 return (void __iomem *)retaddr;
223}
224EXPORT_SYMBOL(__ioremap);
225
226/*
227 * Unmap a ioremap()ed region again
228 */
229void iounmap(void __iomem *addr)
230{
231#ifdef CONFIG_AMIGA
232 if ((!MACH_IS_AMIGA) ||
233 (((unsigned long)addr < 0x40000000) ||
234 ((unsigned long)addr > 0x60000000)))
235 free_io_area((__force void *)addr);
236#else
237 free_io_area((__force void *)addr);
238#endif
239}
240EXPORT_SYMBOL(iounmap);
241
242/*
243 * __iounmap unmaps nearly everything, so be careful
244 * it doesn't free currently pointer/page tables anymore but it
245 * wans't used anyway and might be added later.
246 */
247void __iounmap(void *addr, unsigned long size)
248{
249 unsigned long virtaddr = (unsigned long)addr;
250 pgd_t *pgd_dir;
251 pmd_t *pmd_dir;
252 pte_t *pte_dir;
253
254 while ((long)size > 0) {
255 pgd_dir = pgd_offset_k(virtaddr);
256 if (pgd_bad(*pgd_dir)) {
257 printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258 pgd_clear(pgd_dir);
259 return;
260 }
261 pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
263 if (CPU_IS_020_OR_030) {
264 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
266
267 if (pmd_type == _PAGE_PRESENT) {
268 pmd_dir->pmd[pmd_off] = 0;
269 virtaddr += PTRTREESIZE;
270 size -= PTRTREESIZE;
271 continue;
272 } else if (pmd_type == 0)
273 continue;
274 }
275
276 if (pmd_bad(*pmd_dir)) {
277 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
278 pmd_clear(pmd_dir);
279 return;
280 }
281 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
282
283 pte_val(*pte_dir) = 0;
284 virtaddr += PAGE_SIZE;
285 size -= PAGE_SIZE;
286 }
287
288 flush_tlb_all();
289}
290
291/*
292 * Set new cache mode for some kernel address space.
293 * The caller must push data for that range itself, if such data may already
294 * be in the cache.
295 */
296void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
297{
298 unsigned long virtaddr = (unsigned long)addr;
299 pgd_t *pgd_dir;
300 pmd_t *pmd_dir;
301 pte_t *pte_dir;
302
303 if (CPU_IS_040_OR_060) {
304 switch (cmode) {
305 case IOMAP_FULL_CACHING:
306 cmode = _PAGE_CACHE040;
307 break;
308 case IOMAP_NOCACHE_SER:
309 default:
310 cmode = _PAGE_NOCACHE_S;
311 break;
312 case IOMAP_NOCACHE_NONSER:
313 cmode = _PAGE_NOCACHE;
314 break;
315 case IOMAP_WRITETHROUGH:
316 cmode = _PAGE_CACHE040W;
317 break;
318 }
319 } else {
320 switch (cmode) {
321 case IOMAP_NOCACHE_SER:
322 case IOMAP_NOCACHE_NONSER:
323 default:
324 cmode = _PAGE_NOCACHE030;
325 break;
326 case IOMAP_FULL_CACHING:
327 case IOMAP_WRITETHROUGH:
328 cmode = 0;
329 }
330 }
331
332 while ((long)size > 0) {
333 pgd_dir = pgd_offset_k(virtaddr);
334 if (pgd_bad(*pgd_dir)) {
335 printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
336 pgd_clear(pgd_dir);
337 return;
338 }
339 pmd_dir = pmd_offset(pgd_dir, virtaddr);
340
341 if (CPU_IS_020_OR_030) {
342 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
343
344 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
345 pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
346 _CACHEMASK040) | cmode;
347 virtaddr += PTRTREESIZE;
348 size -= PTRTREESIZE;
349 continue;
350 }
351 }
352
353 if (pmd_bad(*pmd_dir)) {
354 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
355 pmd_clear(pmd_dir);
356 return;
357 }
358 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
359
360 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
361 virtaddr += PAGE_SIZE;
362 size -= PAGE_SIZE;
363 }
364
365 flush_tlb_all();
366}
367EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/kmap_no.c b/arch/m68k/mm/kmap_no.c
new file mode 100644
index 000000000000..ece8d5ad4e6c
--- /dev/null
+++ b/arch/m68k/mm/kmap_no.c
@@ -0,0 +1,45 @@
1/*
2 * linux/arch/m68knommu/mm/kmap.c
3 *
4 * Copyright (C) 2000 Lineo, <davidm@snapgear.com>
5 * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/vmalloc.h>
13
14#include <asm/setup.h>
15#include <asm/segment.h>
16#include <asm/page.h>
17#include <asm/pgalloc.h>
18#include <asm/io.h>
19#include <asm/system.h>
20
21#undef DEBUG
22
23/*
24 * Map some physical address range into the kernel address space.
25 */
26void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
27{
28 return (void *)physaddr;
29}
30
31/*
32 * Unmap a ioremap()ed region again.
33 */
34void iounmap(void *addr)
35{
36}
37
38/*
39 * Set new cache mode for some kernel address space.
40 * The caller must push data for that range itself, if such data may already
41 * be in the cache.
42 */
43void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
44{
45}