aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/Makefile8
-rw-r--r--arch/m68k/mm/cache.c24
-rw-r--r--arch/m68k/mm/init_mm.c36
-rw-r--r--arch/m68k/mm/kmap.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c198
-rw-r--r--arch/m68k/mm/memory.c8
6 files changed, 267 insertions, 10 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 09cadf1058d5..cfbf3205724a 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -4,6 +4,8 @@
4 4
5obj-y := init.o 5obj-y := init.o
6 6
7obj-$(CONFIG_MMU) += cache.o fault.o hwtest.o 7obj-$(CONFIG_MMU) += cache.o fault.o
8obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o 8obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
9obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o 9obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
10obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
11
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 5437fff5fe07..95d0bf66e2e2 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
74/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ 74/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
75void flush_icache_range(unsigned long address, unsigned long endaddr) 75void flush_icache_range(unsigned long address, unsigned long endaddr)
76{ 76{
77 77 if (CPU_IS_COLDFIRE) {
78 if (CPU_IS_040_OR_060) { 78 unsigned long start, end;
79 start = address & ICACHE_SET_MASK;
80 end = endaddr & ICACHE_SET_MASK;
81 if (start > end) {
82 flush_cf_icache(0, end);
83 end = ICACHE_MAX_ADDR;
84 }
85 flush_cf_icache(start, end);
86 } else if (CPU_IS_040_OR_060) {
79 address &= PAGE_MASK; 87 address &= PAGE_MASK;
80 88
81 do { 89 do {
@@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
100void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 108void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
101 unsigned long addr, int len) 109 unsigned long addr, int len)
102{ 110{
103 if (CPU_IS_040_OR_060) { 111 if (CPU_IS_COLDFIRE) {
112 unsigned long start, end;
113 start = addr & ICACHE_SET_MASK;
114 end = (addr + len) & ICACHE_SET_MASK;
115 if (start > end) {
116 flush_cf_icache(0, end);
117 end = ICACHE_MAX_ADDR;
118 }
119 flush_cf_icache(start, end);
120
121 } else if (CPU_IS_040_OR_060) {
104 asm volatile ("nop\n\t" 122 asm volatile ("nop\n\t"
105 ".chip 68040\n\t" 123 ".chip 68040\n\t"
106 "cpushp %%bc,(%0)\n\t" 124 "cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index bbe525434ccb..89f3b203814b 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -24,6 +24,7 @@
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/traps.h>
27#include <asm/machdep.h> 28#include <asm/machdep.h>
28#include <asm/io.h> 29#include <asm/io.h>
29#ifdef CONFIG_ATARI 30#ifdef CONFIG_ATARI
@@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable);
75 76
76extern pmd_t *zero_pgtable; 77extern pmd_t *zero_pgtable;
77 78
79#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
80#define VECTORS &vectors[0]
81#else
82#define VECTORS _ramvec
83#endif
84
85void __init print_memmap(void)
86{
87#define UL(x) ((unsigned long) (x))
88#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
89#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
90#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
91
92 pr_notice("Virtual kernel memory layout:\n"
93 " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
94 " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
95 " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
96 " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
97 " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
98 " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
99 " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
100 " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
101 MLK(VECTORS, VECTORS + 256),
102 MLM(KMAP_START, KMAP_END),
103 MLM(VMALLOC_START, VMALLOC_END),
104 MLM(PAGE_OFFSET, (unsigned long)high_memory),
105 MLK_ROUNDUP(__init_begin, __init_end),
106 MLK_ROUNDUP(_stext, _etext),
107 MLK_ROUNDUP(_sdata, _edata),
108 MLK_ROUNDUP(_sbss, _ebss));
109}
110
78void __init mem_init(void) 111void __init mem_init(void)
79{ 112{
80 pg_data_t *pgdat; 113 pg_data_t *pgdat;
@@ -106,7 +139,7 @@ void __init mem_init(void)
106 } 139 }
107 } 140 }
108 141
109#ifndef CONFIG_SUN3 142#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
110 /* insert pointer tables allocated so far into the tablelist */ 143 /* insert pointer tables allocated so far into the tablelist */
111 init_pointer_table((unsigned long)kernel_pg_dir); 144 init_pointer_table((unsigned long)kernel_pg_dir);
112 for (i = 0; i < PTRS_PER_PGD; i++) { 145 for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -125,6 +158,7 @@ void __init mem_init(void)
125 codepages << (PAGE_SHIFT-10), 158 codepages << (PAGE_SHIFT-10),
126 datapages << (PAGE_SHIFT-10), 159 datapages << (PAGE_SHIFT-10),
127 initpages << (PAGE_SHIFT-10)); 160 initpages << (PAGE_SHIFT-10));
161 print_memmap();
128} 162}
129 163
130#ifdef CONFIG_BLK_DEV_INITRD 164#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 69345849454b..1cc2bed4c3dd 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
171 break; 171 break;
172 } 172 }
173 } else { 173 } else {
174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); 174 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
175 _PAGE_DIRTY | _PAGE_READWRITE);
175 switch (cacheflag) { 176 switch (cacheflag) {
176 case IOMAP_NOCACHE_SER: 177 case IOMAP_NOCACHE_SER:
177 case IOMAP_NOCACHE_NONSER: 178 case IOMAP_NOCACHE_NONSER:
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644
index 000000000000..babd5a97cdcb
--- /dev/null
+++ b/arch/m68k/mm/mcfmmu.c
@@ -0,0 +1,198 @@
1/*
2 * Based upon linux/arch/m68k/mm/sun3mmu.c
3 * Based upon linux/arch/ppc/mm/mmu_context.c
4 *
5 * Implementations of mm routines specific to the Coldfire MMU.
6 *
7 * Copyright (c) 2008 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/bootmem.h>
16
17#include <asm/setup.h>
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23
24#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
25
26mm_context_t next_mmu_context;
27unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
28atomic_t nr_free_contexts;
29struct mm_struct *context_mm[LAST_CONTEXT+1];
30extern unsigned long num_pages;
31
32void free_initmem(void)
33{
34}
35
36/*
37 * ColdFire paging_init derived from sun3.
38 */
39void __init paging_init(void)
40{
41 pgd_t *pg_dir;
42 pte_t *pg_table;
43 unsigned long address, size;
44 unsigned long next_pgtable, bootmem_end;
45 unsigned long zones_size[MAX_NR_ZONES];
46 enum zone_type zone;
47 int i;
48
49 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
50 memset((void *) empty_zero_page, 0, PAGE_SIZE);
51
52 pg_dir = swapper_pg_dir;
53 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
54
55 size = num_pages * sizeof(pte_t);
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 next_pgtable = (unsigned long) alloc_bootmem_pages(size);
58
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
61
62 address = PAGE_OFFSET;
63 while (address < (unsigned long)high_memory) {
64 pg_table = (pte_t *) next_pgtable;
65 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
66 pgd_val(*pg_dir) = (unsigned long) pg_table;
67 pg_dir++;
68
69 /* now change pg_table to kernel virtual addresses */
70 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
71 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
72 if (address >= (unsigned long) high_memory)
73 pte_val(pte) = 0;
74
75 set_pte(pg_table, pte);
76 address += PAGE_SIZE;
77 }
78 }
79
80 current->mm = NULL;
81
82 for (zone = 0; zone < MAX_NR_ZONES; zone++)
83 zones_size[zone] = 0x0;
84 zones_size[ZONE_DMA] = num_pages;
85 free_area_init(zones_size);
86}
87
88int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
89{
90 unsigned long flags, mmuar;
91 struct mm_struct *mm;
92 pgd_t *pgd;
93 pmd_t *pmd;
94 pte_t *pte;
95 int asid;
96
97 local_irq_save(flags);
98
99 mmuar = (dtlb) ? mmu_read(MMUAR) :
100 regs->pc + (extension_word * sizeof(long));
101
102 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103 if (!mm) {
104 local_irq_restore(flags);
105 return -1;
106 }
107
108 pgd = pgd_offset(mm, mmuar);
109 if (pgd_none(*pgd)) {
110 local_irq_restore(flags);
111 return -1;
112 }
113
114 pmd = pmd_offset(pgd, mmuar);
115 if (pmd_none(*pmd)) {
116 local_irq_restore(flags);
117 return -1;
118 }
119
120 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
121 : pte_offset_map(pmd, mmuar);
122 if (pte_none(*pte) || !pte_present(*pte)) {
123 local_irq_restore(flags);
124 return -1;
125 }
126
127 if (write) {
128 if (!pte_write(*pte)) {
129 local_irq_restore(flags);
130 return -1;
131 }
132 set_pte(pte, pte_mkdirty(*pte));
133 }
134
135 set_pte(pte, pte_mkyoung(*pte));
136 asid = mm->context & 0xff;
137 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138 set_pte(pte, pte_wrprotect(*pte));
139
140 mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
141 (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
142 >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
143
144 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
145 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
146
147 if (dtlb)
148 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
149 else
150 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
151
152 local_irq_restore(flags);
153 return 0;
154}
155
156/*
157 * Initialize the context management stuff.
158 * The following was taken from arch/ppc/mmu_context.c
159 */
160void __init mmu_context_init(void)
161{
162 /*
163 * Some processors have too few contexts to reserve one for
164 * init_mm, and require using context 0 for a normal task.
165 * Other processors reserve the use of context zero for the kernel.
166 * This code assumes FIRST_CONTEXT < 32.
167 */
168 context_map[0] = (1 << FIRST_CONTEXT) - 1;
169 next_mmu_context = FIRST_CONTEXT;
170 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
171}
172
173/*
174 * Steal a context from a task that has one at the moment.
175 * This is only used on 8xx and 4xx and we presently assume that
176 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
177 * whether the MM we steal is in use.
178 * We also assume that this is only used on systems that don't
179 * use an MMU hash table - this is true for 8xx and 4xx.
180 * This isn't an LRU system, it just frees up each context in
181 * turn (sort-of pseudo-random replacement :). This would be the
182 * place to implement an LRU scheme if anyone was motivated to do it.
183 * -- paulus
184 */
185void steal_context(void)
186{
187 struct mm_struct *mm;
188 /*
189 * free up context `next_mmu_context'
190 * if we shouldn't free context 0, don't...
191 */
192 if (next_mmu_context < FIRST_CONTEXT)
193 next_mmu_context = FIRST_CONTEXT;
194 mm = context_mm[next_mmu_context];
195 flush_tlb_mm(mm);
196 destroy_context(mm);
197}
198
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 34c77ce24fba..a5dbb74fe1de 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr)
203 203
204void cache_clear (unsigned long paddr, int len) 204void cache_clear (unsigned long paddr, int len)
205{ 205{
206 if (CPU_IS_040_OR_060) { 206 if (CPU_IS_COLDFIRE) {
207 flush_cf_bcache(0, DCACHE_MAX_ADDR);
208 } else if (CPU_IS_040_OR_060) {
207 int tmp; 209 int tmp;
208 210
209 /* 211 /*
@@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear);
250 252
251void cache_push (unsigned long paddr, int len) 253void cache_push (unsigned long paddr, int len)
252{ 254{
253 if (CPU_IS_040_OR_060) { 255 if (CPU_IS_COLDFIRE) {
256 flush_cf_bcache(0, DCACHE_MAX_ADDR);
257 } else if (CPU_IS_040_OR_060) {
254 int tmp = PAGE_SIZE; 258 int tmp = PAGE_SIZE;
255 259
256 /* 260 /*