diff options
Diffstat (limited to 'arch/arm26/mm/memc.c')
-rw-r--r-- | arch/arm26/mm/memc.c | 202 |
1 files changed, 202 insertions, 0 deletions
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c new file mode 100644 index 000000000000..8e8a2bb2487d --- /dev/null +++ b/arch/arm26/mm/memc.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * linux/arch/arm26/mm/memc.c | ||
3 | * | ||
4 | * Copyright (C) 1998-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Page table sludge for older ARM processor architectures. | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | |||
17 | #include <asm/pgtable.h> | ||
18 | #include <asm/pgalloc.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/memory.h> | ||
21 | #include <asm/hardware.h> | ||
22 | |||
23 | #include <asm/map.h> | ||
24 | |||
25 | #define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) | ||
26 | |||
27 | kmem_cache_t *pte_cache, *pgd_cache; | ||
28 | int page_nr; | ||
29 | |||
30 | /* | ||
31 | * Allocate space for a page table and a MEMC table. | ||
32 | * Note that we place the MEMC | ||
33 | * table before the page directory. This means we can | ||
34 | * easily get to both tightly-associated data structures | ||
35 | * with a single pointer. | ||
36 | */ | ||
37 | static inline pgd_t *alloc_pgd_table(void) | ||
38 | { | ||
39 | void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | ||
40 | |||
41 | if (pg2k) | ||
42 | pg2k += MEMC_TABLE_SIZE; | ||
43 | |||
44 | return (pgd_t *)pg2k; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Free a page table. this function is the counterpart to get_pgd_slow | ||
49 | * below, not alloc_pgd_table above. | ||
50 | */ | ||
51 | void free_pgd_slow(pgd_t *pgd) | ||
52 | { | ||
53 | unsigned long tbl = (unsigned long)pgd; | ||
54 | |||
55 | tbl -= MEMC_TABLE_SIZE; | ||
56 | |||
57 | kmem_cache_free(pgd_cache, (void *)tbl); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Allocate a new pgd and fill it in ready for use | ||
62 | * | ||
63 | * A new tasks pgd is completely empty (all pages !present) except for: | ||
64 | * | ||
65 | * o The machine vectors at virtual address 0x0 | ||
66 | * o The vmalloc region at the top of address space | ||
67 | * | ||
68 | */ | ||
69 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
70 | |||
71 | pgd_t *get_pgd_slow(struct mm_struct *mm) | ||
72 | { | ||
73 | pgd_t *new_pgd, *init_pgd; | ||
74 | pmd_t *new_pmd, *init_pmd; | ||
75 | pte_t *new_pte, *init_pte; | ||
76 | |||
77 | new_pgd = alloc_pgd_table(); | ||
78 | if (!new_pgd) | ||
79 | goto no_pgd; | ||
80 | |||
81 | /* | ||
82 | * This lock is here just to satisfy pmd_alloc and pte_lock | ||
83 | * FIXME: I bet we could avoid taking it pretty much altogether | ||
84 | */ | ||
85 | spin_lock(&mm->page_table_lock); | ||
86 | |||
87 | /* | ||
88 | * On ARM, first page must always be allocated since it contains | ||
89 | * the machine vectors. | ||
90 | */ | ||
91 | new_pmd = pmd_alloc(mm, new_pgd, 0); | ||
92 | if (!new_pmd) | ||
93 | goto no_pmd; | ||
94 | |||
95 | new_pte = pte_alloc_kernel(mm, new_pmd, 0); | ||
96 | if (!new_pte) | ||
97 | goto no_pte; | ||
98 | |||
99 | init_pgd = pgd_offset(&init_mm, 0); | ||
100 | init_pmd = pmd_offset(init_pgd, 0); | ||
101 | init_pte = pte_offset(init_pmd, 0); | ||
102 | |||
103 | set_pte(new_pte, *init_pte); | ||
104 | |||
105 | /* | ||
106 | * the page table entries are zeroed | ||
107 | * when the table is created. (see the cache_ctor functions below) | ||
108 | * Now we need to plonk the kernel (vmalloc) area at the end of | ||
109 | * the address space. We copy this from the init thread, just like | ||
110 | * the init_pte we copied above... | ||
111 | */ | ||
112 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
113 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
114 | |||
115 | spin_unlock(&mm->page_table_lock); | ||
116 | |||
117 | /* update MEMC tables */ | ||
118 | cpu_memc_update_all(new_pgd); | ||
119 | return new_pgd; | ||
120 | |||
121 | no_pte: | ||
122 | spin_unlock(&mm->page_table_lock); | ||
123 | pmd_free(new_pmd); | ||
124 | free_pgd_slow(new_pgd); | ||
125 | return NULL; | ||
126 | |||
127 | no_pmd: | ||
128 | spin_unlock(&mm->page_table_lock); | ||
129 | free_pgd_slow(new_pgd); | ||
130 | return NULL; | ||
131 | |||
132 | no_pgd: | ||
133 | return NULL; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * No special code is required here. | ||
138 | */ | ||
139 | void setup_mm_for_reboot(char mode) | ||
140 | { | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * This contains the code to setup the memory map on an ARM2/ARM250/ARM3 | ||
145 | * o swapper_pg_dir = 0x0207d000 | ||
146 | * o kernel proper starts at 0x0208000 | ||
147 | * o create (allocate) a pte to contain the machine vectors | ||
148 | * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?) | ||
149 | * o populate the init tasks page directory (pgd) with the new pte | ||
150 | * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!) | ||
151 | */ | ||
152 | void __init memtable_init(struct meminfo *mi) | ||
153 | { | ||
154 | pte_t *pte; | ||
155 | int i; | ||
156 | |||
157 | page_nr = max_low_pfn; | ||
158 | |||
159 | pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t)); | ||
160 | pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY); | ||
161 | pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte); | ||
162 | |||
163 | for (i = 1; i < PTRS_PER_PGD; i++) | ||
164 | pgd_val(swapper_pg_dir[i]) = 0; | ||
165 | } | ||
166 | |||
167 | void __init iotable_init(struct map_desc *io_desc) | ||
168 | { | ||
169 | /* nothing to do */ | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * We never have holes in the memmap | ||
174 | */ | ||
175 | void __init create_memmap_holes(struct meminfo *mi) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) | ||
180 | { | ||
181 | memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); | ||
182 | } | ||
183 | |||
184 | static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) | ||
185 | { | ||
186 | memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); | ||
187 | } | ||
188 | |||
189 | void __init pgtable_cache_init(void) | ||
190 | { | ||
191 | pte_cache = kmem_cache_create("pte-cache", | ||
192 | sizeof(pte_t) * PTRS_PER_PTE, | ||
193 | 0, 0, pte_cache_ctor, NULL); | ||
194 | if (!pte_cache) | ||
195 | BUG(); | ||
196 | |||
197 | pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + | ||
198 | sizeof(pgd_t) * PTRS_PER_PGD, | ||
199 | 0, 0, pgd_cache_ctor, NULL); | ||
200 | if (!pgd_cache) | ||
201 | BUG(); | ||
202 | } | ||