diff options
author | GuanXuetao <gxt@mprc.pku.edu.cn> | 2011-01-15 05:16:59 -0500 |
---|---|---|
committer | GuanXuetao <gxt@mprc.pku.edu.cn> | 2011-03-16 21:19:08 -0400 |
commit | b50f1704e9c441c58cf6dc05e72953ca30e1d4d2 (patch) | |
tree | bfd7f81c849aa42f6355d9fb383f2167c5f0e087 /arch/unicore32/mm | |
parent | f73670e8a55c11d47c28dca35dc4bc7dfbd4e6eb (diff) |
unicore32 core architecture: mm related: generic codes
This patch includes generic codes for memory management.
Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/unicore32/mm')
-rw-r--r-- | arch/unicore32/mm/Kconfig | 50 | ||||
-rw-r--r-- | arch/unicore32/mm/Makefile | 15 | ||||
-rw-r--r-- | arch/unicore32/mm/init.c | 517 | ||||
-rw-r--r-- | arch/unicore32/mm/iomap.c | 56 | ||||
-rw-r--r-- | arch/unicore32/mm/ioremap.c | 261 | ||||
-rw-r--r-- | arch/unicore32/mm/mm.h | 39 |
6 files changed, 938 insertions, 0 deletions
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig new file mode 100644 index 000000000000..5f77fb3c63be --- /dev/null +++ b/arch/unicore32/mm/Kconfig | |||
@@ -0,0 +1,50 @@ | |||
1 | comment "Processor Type" | ||
2 | |||
3 | # Select CPU types depending on the architecture selected. This selects | ||
4 | # which CPUs we support in the kernel image, and the compiler instruction | ||
5 | # optimiser behaviour. | ||
6 | |||
7 | config CPU_UCV2 | ||
8 | def_bool y | ||
9 | |||
10 | comment "Processor Features" | ||
11 | |||
12 | config CPU_ICACHE_DISABLE | ||
13 | bool "Disable I-Cache (I-bit)" | ||
14 | help | ||
15 | Say Y here to disable the processor instruction cache. Unless | ||
16 | you have a reason not to or are unsure, say N. | ||
17 | |||
18 | config CPU_DCACHE_DISABLE | ||
19 | bool "Disable D-Cache (D-bit)" | ||
20 | help | ||
21 | Say Y here to disable the processor data cache. Unless | ||
22 | you have a reason not to or are unsure, say N. | ||
23 | |||
24 | config CPU_DCACHE_WRITETHROUGH | ||
25 | bool "Force write through D-cache" | ||
26 | help | ||
27 | Say Y here to use the data cache in writethrough mode. Unless you | ||
28 | specifically require this or are unsure, say N. | ||
29 | |||
30 | config CPU_DCACHE_LINE_DISABLE | ||
31 | bool "Disable D-cache line ops" | ||
32 | default y | ||
33 | help | ||
34 | Say Y here to disable the data cache line operations. | ||
35 | |||
36 | config CPU_TLB_SINGLE_ENTRY_DISABLE | ||
37 | bool "Disable TLB single entry ops" | ||
38 | default y | ||
39 | help | ||
40 | Say Y here to disable the TLB single entry operations. | ||
41 | |||
42 | config SWIOTLB | ||
43 | def_bool y | ||
44 | |||
45 | config IOMMU_HELPER | ||
46 | def_bool SWIOTLB | ||
47 | |||
48 | config NEED_SG_DMA_LENGTH | ||
49 | def_bool SWIOTLB | ||
50 | |||
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile new file mode 100644 index 000000000000..f3ff41039f51 --- /dev/null +++ b/arch/unicore32/mm/Makefile | |||
@@ -0,0 +1,15 @@ | |||
1 | # | ||
2 | # Makefile for the linux unicore-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := extable.o fault.o init.o pgd.o mmu.o | ||
6 | obj-y += iomap.o flush.o ioremap.o | ||
7 | |||
8 | obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o | ||
9 | |||
10 | obj-$(CONFIG_MODULES) += proc-syms.o | ||
11 | |||
12 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o | ||
13 | |||
14 | obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o | ||
15 | |||
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c new file mode 100644 index 000000000000..3dbe3709b69d --- /dev/null +++ b/arch/unicore32/mm/init.c | |||
@@ -0,0 +1,517 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/mm/init.c | ||
3 | * | ||
4 | * Copyright (C) 2010 GUAN Xue-tao | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/swap.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/nodemask.h> | ||
17 | #include <linux/initrd.h> | ||
18 | #include <linux/highmem.h> | ||
19 | #include <linux/gfp.h> | ||
20 | #include <linux/memblock.h> | ||
21 | #include <linux/sort.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | |||
24 | #include <asm/sections.h> | ||
25 | #include <asm/setup.h> | ||
26 | #include <asm/sizes.h> | ||
27 | #include <asm/tlb.h> | ||
28 | #include <mach/map.h> | ||
29 | |||
30 | #include "mm.h" | ||
31 | |||
32 | static unsigned long phys_initrd_start __initdata = 0x01000000; | ||
33 | static unsigned long phys_initrd_size __initdata = SZ_8M; | ||
34 | |||
35 | static int __init early_initrd(char *p) | ||
36 | { | ||
37 | unsigned long start, size; | ||
38 | char *endp; | ||
39 | |||
40 | start = memparse(p, &endp); | ||
41 | if (*endp == ',') { | ||
42 | size = memparse(endp + 1, NULL); | ||
43 | |||
44 | phys_initrd_start = start; | ||
45 | phys_initrd_size = size; | ||
46 | } | ||
47 | return 0; | ||
48 | } | ||
49 | early_param("initrd", early_initrd); | ||
50 | |||
51 | /* | ||
52 | * This keeps memory configuration data used by a couple memory | ||
53 | * initialization functions, as well as show_mem() for the skipping | ||
54 | * of holes in the memory map. It is populated by uc32_add_memory(). | ||
55 | */ | ||
56 | struct meminfo meminfo; | ||
57 | |||
58 | void show_mem(void) | ||
59 | { | ||
60 | int free = 0, total = 0, reserved = 0; | ||
61 | int shared = 0, cached = 0, slab = 0, i; | ||
62 | struct meminfo *mi = &meminfo; | ||
63 | |||
64 | printk(KERN_DEFAULT "Mem-info:\n"); | ||
65 | show_free_areas(); | ||
66 | |||
67 | for_each_bank(i, mi) { | ||
68 | struct membank *bank = &mi->bank[i]; | ||
69 | unsigned int pfn1, pfn2; | ||
70 | struct page *page, *end; | ||
71 | |||
72 | pfn1 = bank_pfn_start(bank); | ||
73 | pfn2 = bank_pfn_end(bank); | ||
74 | |||
75 | page = pfn_to_page(pfn1); | ||
76 | end = pfn_to_page(pfn2 - 1) + 1; | ||
77 | |||
78 | do { | ||
79 | total++; | ||
80 | if (PageReserved(page)) | ||
81 | reserved++; | ||
82 | else if (PageSwapCache(page)) | ||
83 | cached++; | ||
84 | else if (PageSlab(page)) | ||
85 | slab++; | ||
86 | else if (!page_count(page)) | ||
87 | free++; | ||
88 | else | ||
89 | shared += page_count(page) - 1; | ||
90 | page++; | ||
91 | } while (page < end); | ||
92 | } | ||
93 | |||
94 | printk(KERN_DEFAULT "%d pages of RAM\n", total); | ||
95 | printk(KERN_DEFAULT "%d free pages\n", free); | ||
96 | printk(KERN_DEFAULT "%d reserved pages\n", reserved); | ||
97 | printk(KERN_DEFAULT "%d slab pages\n", slab); | ||
98 | printk(KERN_DEFAULT "%d pages shared\n", shared); | ||
99 | printk(KERN_DEFAULT "%d pages swap cached\n", cached); | ||
100 | } | ||
101 | |||
102 | static void __init find_limits(unsigned long *min, unsigned long *max_low, | ||
103 | unsigned long *max_high) | ||
104 | { | ||
105 | struct meminfo *mi = &meminfo; | ||
106 | int i; | ||
107 | |||
108 | *min = -1UL; | ||
109 | *max_low = *max_high = 0; | ||
110 | |||
111 | for_each_bank(i, mi) { | ||
112 | struct membank *bank = &mi->bank[i]; | ||
113 | unsigned long start, end; | ||
114 | |||
115 | start = bank_pfn_start(bank); | ||
116 | end = bank_pfn_end(bank); | ||
117 | |||
118 | if (*min > start) | ||
119 | *min = start; | ||
120 | if (*max_high < end) | ||
121 | *max_high = end; | ||
122 | if (bank->highmem) | ||
123 | continue; | ||
124 | if (*max_low < end) | ||
125 | *max_low = end; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static void __init uc32_bootmem_init(unsigned long start_pfn, | ||
130 | unsigned long end_pfn) | ||
131 | { | ||
132 | struct memblock_region *reg; | ||
133 | unsigned int boot_pages; | ||
134 | phys_addr_t bitmap; | ||
135 | pg_data_t *pgdat; | ||
136 | |||
137 | /* | ||
138 | * Allocate the bootmem bitmap page. This must be in a region | ||
139 | * of memory which has already been mapped. | ||
140 | */ | ||
141 | boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | ||
142 | bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, | ||
143 | __pfn_to_phys(end_pfn)); | ||
144 | |||
145 | /* | ||
146 | * Initialise the bootmem allocator, handing the | ||
147 | * memory banks over to bootmem. | ||
148 | */ | ||
149 | node_set_online(0); | ||
150 | pgdat = NODE_DATA(0); | ||
151 | init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); | ||
152 | |||
153 | /* Free the lowmem regions from memblock into bootmem. */ | ||
154 | for_each_memblock(memory, reg) { | ||
155 | unsigned long start = memblock_region_memory_base_pfn(reg); | ||
156 | unsigned long end = memblock_region_memory_end_pfn(reg); | ||
157 | |||
158 | if (end >= end_pfn) | ||
159 | end = end_pfn; | ||
160 | if (start >= end) | ||
161 | break; | ||
162 | |||
163 | free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); | ||
164 | } | ||
165 | |||
166 | /* Reserve the lowmem memblock reserved regions in bootmem. */ | ||
167 | for_each_memblock(reserved, reg) { | ||
168 | unsigned long start = memblock_region_reserved_base_pfn(reg); | ||
169 | unsigned long end = memblock_region_reserved_end_pfn(reg); | ||
170 | |||
171 | if (end >= end_pfn) | ||
172 | end = end_pfn; | ||
173 | if (start >= end) | ||
174 | break; | ||
175 | |||
176 | reserve_bootmem(__pfn_to_phys(start), | ||
177 | (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low, | ||
182 | unsigned long max_high) | ||
183 | { | ||
184 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | ||
185 | struct memblock_region *reg; | ||
186 | |||
187 | /* | ||
188 | * initialise the zones. | ||
189 | */ | ||
190 | memset(zone_size, 0, sizeof(zone_size)); | ||
191 | |||
192 | /* | ||
193 | * The memory size has already been determined. If we need | ||
194 | * to do anything fancy with the allocation of this memory | ||
195 | * to the zones, now is the time to do it. | ||
196 | */ | ||
197 | zone_size[0] = max_low - min; | ||
198 | |||
199 | /* | ||
200 | * Calculate the size of the holes. | ||
201 | * holes = node_size - sum(bank_sizes) | ||
202 | */ | ||
203 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | ||
204 | for_each_memblock(memory, reg) { | ||
205 | unsigned long start = memblock_region_memory_base_pfn(reg); | ||
206 | unsigned long end = memblock_region_memory_end_pfn(reg); | ||
207 | |||
208 | if (start < max_low) { | ||
209 | unsigned long low_end = min(end, max_low); | ||
210 | zhole_size[0] -= low_end - start; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * Adjust the sizes according to any special requirements for | ||
216 | * this machine type. | ||
217 | */ | ||
218 | arch_adjust_zones(zone_size, zhole_size); | ||
219 | |||
220 | free_area_init_node(0, zone_size, min, zhole_size); | ||
221 | } | ||
222 | |||
223 | int pfn_valid(unsigned long pfn) | ||
224 | { | ||
225 | return memblock_is_memory(pfn << PAGE_SHIFT); | ||
226 | } | ||
227 | EXPORT_SYMBOL(pfn_valid); | ||
228 | |||
229 | static void uc32_memory_present(void) | ||
230 | { | ||
231 | } | ||
232 | |||
233 | static int __init meminfo_cmp(const void *_a, const void *_b) | ||
234 | { | ||
235 | const struct membank *a = _a, *b = _b; | ||
236 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
237 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
238 | } | ||
239 | |||
240 | void __init uc32_memblock_init(struct meminfo *mi) | ||
241 | { | ||
242 | int i; | ||
243 | |||
244 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), | ||
245 | meminfo_cmp, NULL); | ||
246 | |||
247 | memblock_init(); | ||
248 | for (i = 0; i < mi->nr_banks; i++) | ||
249 | memblock_add(mi->bank[i].start, mi->bank[i].size); | ||
250 | |||
251 | /* Register the kernel text, kernel data and initrd with memblock. */ | ||
252 | memblock_reserve(__pa(_text), _end - _text); | ||
253 | |||
254 | #ifdef CONFIG_BLK_DEV_INITRD | ||
255 | if (phys_initrd_size) { | ||
256 | memblock_reserve(phys_initrd_start, phys_initrd_size); | ||
257 | |||
258 | /* Now convert initrd to virtual addresses */ | ||
259 | initrd_start = __phys_to_virt(phys_initrd_start); | ||
260 | initrd_end = initrd_start + phys_initrd_size; | ||
261 | } | ||
262 | #endif | ||
263 | |||
264 | uc32_mm_memblock_reserve(); | ||
265 | |||
266 | memblock_analyze(); | ||
267 | memblock_dump_all(); | ||
268 | } | ||
269 | |||
270 | void __init bootmem_init(void) | ||
271 | { | ||
272 | unsigned long min, max_low, max_high; | ||
273 | |||
274 | max_low = max_high = 0; | ||
275 | |||
276 | find_limits(&min, &max_low, &max_high); | ||
277 | |||
278 | uc32_bootmem_init(min, max_low); | ||
279 | |||
280 | #ifdef CONFIG_SWIOTLB | ||
281 | swiotlb_init(1); | ||
282 | #endif | ||
283 | /* | ||
284 | * Sparsemem tries to allocate bootmem in memory_present(), | ||
285 | * so must be done after the fixed reservations | ||
286 | */ | ||
287 | uc32_memory_present(); | ||
288 | |||
289 | /* | ||
290 | * sparse_init() needs the bootmem allocator up and running. | ||
291 | */ | ||
292 | sparse_init(); | ||
293 | |||
294 | /* | ||
295 | * Now free the memory - free_area_init_node needs | ||
296 | * the sparse mem_map arrays initialized by sparse_init() | ||
297 | * for memmap_init_zone(), otherwise all PFNs are invalid. | ||
298 | */ | ||
299 | uc32_bootmem_free(min, max_low, max_high); | ||
300 | |||
301 | high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; | ||
302 | |||
303 | /* | ||
304 | * This doesn't seem to be used by the Linux memory manager any | ||
305 | * more, but is used by ll_rw_block. If we can get rid of it, we | ||
306 | * also get rid of some of the stuff above as well. | ||
307 | * | ||
308 | * Note: max_low_pfn and max_pfn reflect the number of _pages_ in | ||
309 | * the system, not the maximum PFN. | ||
310 | */ | ||
311 | max_low_pfn = max_low - PHYS_PFN_OFFSET; | ||
312 | max_pfn = max_high - PHYS_PFN_OFFSET; | ||
313 | } | ||
314 | |||
315 | static inline int free_area(unsigned long pfn, unsigned long end, char *s) | ||
316 | { | ||
317 | unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); | ||
318 | |||
319 | for (; pfn < end; pfn++) { | ||
320 | struct page *page = pfn_to_page(pfn); | ||
321 | ClearPageReserved(page); | ||
322 | init_page_count(page); | ||
323 | __free_page(page); | ||
324 | pages++; | ||
325 | } | ||
326 | |||
327 | if (size && s) | ||
328 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | ||
329 | |||
330 | return pages; | ||
331 | } | ||
332 | |||
333 | static inline void | ||
334 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | ||
335 | { | ||
336 | struct page *start_pg, *end_pg; | ||
337 | unsigned long pg, pgend; | ||
338 | |||
339 | /* | ||
340 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
341 | */ | ||
342 | start_pg = pfn_to_page(start_pfn - 1) + 1; | ||
343 | end_pg = pfn_to_page(end_pfn); | ||
344 | |||
345 | /* | ||
346 | * Convert to physical addresses, and | ||
347 | * round start upwards and end downwards. | ||
348 | */ | ||
349 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
350 | pgend = __pa(end_pg) & PAGE_MASK; | ||
351 | |||
352 | /* | ||
353 | * If there are free pages between these, | ||
354 | * free the section of the memmap array. | ||
355 | */ | ||
356 | if (pg < pgend) | ||
357 | free_bootmem(pg, pgend - pg); | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * The mem_map array can get very big. Free the unused area of the memory map. | ||
362 | */ | ||
363 | static void __init free_unused_memmap(struct meminfo *mi) | ||
364 | { | ||
365 | unsigned long bank_start, prev_bank_end = 0; | ||
366 | unsigned int i; | ||
367 | |||
368 | /* | ||
369 | * This relies on each bank being in address order. | ||
370 | * The banks are sorted previously in bootmem_init(). | ||
371 | */ | ||
372 | for_each_bank(i, mi) { | ||
373 | struct membank *bank = &mi->bank[i]; | ||
374 | |||
375 | bank_start = bank_pfn_start(bank); | ||
376 | |||
377 | /* | ||
378 | * If we had a previous bank, and there is a space | ||
379 | * between the current bank and the previous, free it. | ||
380 | */ | ||
381 | if (prev_bank_end && prev_bank_end < bank_start) | ||
382 | free_memmap(prev_bank_end, bank_start); | ||
383 | |||
384 | /* | ||
385 | * Align up here since the VM subsystem insists that the | ||
386 | * memmap entries are valid from the bank end aligned to | ||
387 | * MAX_ORDER_NR_PAGES. | ||
388 | */ | ||
389 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | ||
390 | } | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * mem_init() marks the free areas in the mem_map and tells us how much | ||
395 | * memory is free. This is done after various parts of the system have | ||
396 | * claimed their memory after the kernel image. | ||
397 | */ | ||
398 | void __init mem_init(void) | ||
399 | { | ||
400 | unsigned long reserved_pages, free_pages; | ||
401 | struct memblock_region *reg; | ||
402 | int i; | ||
403 | |||
404 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | ||
405 | |||
406 | /* this will put all unused low memory onto the freelists */ | ||
407 | free_unused_memmap(&meminfo); | ||
408 | |||
409 | totalram_pages += free_all_bootmem(); | ||
410 | |||
411 | reserved_pages = free_pages = 0; | ||
412 | |||
413 | for_each_bank(i, &meminfo) { | ||
414 | struct membank *bank = &meminfo.bank[i]; | ||
415 | unsigned int pfn1, pfn2; | ||
416 | struct page *page, *end; | ||
417 | |||
418 | pfn1 = bank_pfn_start(bank); | ||
419 | pfn2 = bank_pfn_end(bank); | ||
420 | |||
421 | page = pfn_to_page(pfn1); | ||
422 | end = pfn_to_page(pfn2 - 1) + 1; | ||
423 | |||
424 | do { | ||
425 | if (PageReserved(page)) | ||
426 | reserved_pages++; | ||
427 | else if (!page_count(page)) | ||
428 | free_pages++; | ||
429 | page++; | ||
430 | } while (page < end); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Since our memory may not be contiguous, calculate the | ||
435 | * real number of pages we have in this system | ||
436 | */ | ||
437 | printk(KERN_INFO "Memory:"); | ||
438 | num_physpages = 0; | ||
439 | for_each_memblock(memory, reg) { | ||
440 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
441 | memblock_region_memory_base_pfn(reg); | ||
442 | num_physpages += pages; | ||
443 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
444 | } | ||
445 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
446 | |||
447 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", | ||
448 | nr_free_pages() << (PAGE_SHIFT-10), | ||
449 | free_pages << (PAGE_SHIFT-10), | ||
450 | reserved_pages << (PAGE_SHIFT-10), | ||
451 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
452 | |||
453 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" | ||
454 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
455 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
456 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
457 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
458 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
459 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
460 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n", | ||
461 | |||
462 | VECTORS_BASE, VECTORS_BASE + PAGE_SIZE, | ||
463 | DIV_ROUND_UP(PAGE_SIZE, SZ_1K), | ||
464 | VMALLOC_START, VMALLOC_END, | ||
465 | DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M), | ||
466 | PAGE_OFFSET, (unsigned long)high_memory, | ||
467 | DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M), | ||
468 | MODULES_VADDR, MODULES_END, | ||
469 | DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M), | ||
470 | |||
471 | __init_begin, __init_end, | ||
472 | DIV_ROUND_UP((__init_end - __init_begin), SZ_1K), | ||
473 | _stext, _etext, | ||
474 | DIV_ROUND_UP((_etext - _stext), SZ_1K), | ||
475 | _sdata, _edata, | ||
476 | DIV_ROUND_UP((_edata - _sdata), SZ_1K)); | ||
477 | |||
478 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
479 | BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
480 | |||
481 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | ||
482 | /* | ||
483 | * On a machine this small we won't get | ||
484 | * anywhere without overcommit, so turn | ||
485 | * it on by default. | ||
486 | */ | ||
487 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | ||
488 | } | ||
489 | } | ||
490 | |||
491 | void free_initmem(void) | ||
492 | { | ||
493 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | ||
494 | __phys_to_pfn(__pa(__init_end)), | ||
495 | "init"); | ||
496 | } | ||
497 | |||
498 | #ifdef CONFIG_BLK_DEV_INITRD | ||
499 | |||
500 | static int keep_initrd; | ||
501 | |||
502 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
503 | { | ||
504 | if (!keep_initrd) | ||
505 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), | ||
506 | __phys_to_pfn(__pa(end)), | ||
507 | "initrd"); | ||
508 | } | ||
509 | |||
510 | static int __init keepinitrd_setup(char *__unused) | ||
511 | { | ||
512 | keep_initrd = 1; | ||
513 | return 1; | ||
514 | } | ||
515 | |||
516 | __setup("keepinitrd", keepinitrd_setup); | ||
517 | #endif | ||
diff --git a/arch/unicore32/mm/iomap.c b/arch/unicore32/mm/iomap.c new file mode 100644 index 000000000000..a7e1a3d2e069 --- /dev/null +++ b/arch/unicore32/mm/iomap.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/mm/iomap.c | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * Map IO port and PCI memory spaces so that {read,write}[bwl] can | ||
13 | * be used to access this memory. | ||
14 | */ | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/pci.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/io.h> | ||
19 | |||
20 | #ifdef __io | ||
21 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | ||
22 | { | ||
23 | /* we map PC lagcy 64K IO port to PCI IO space 0x80030000 */ | ||
24 | return (void __iomem *) (unsigned long) | ||
25 | io_p2v((port & 0xffff) + PKUNITY_PCILIO_BASE); | ||
26 | } | ||
27 | EXPORT_SYMBOL(ioport_map); | ||
28 | |||
29 | void ioport_unmap(void __iomem *addr) | ||
30 | { | ||
31 | } | ||
32 | EXPORT_SYMBOL(ioport_unmap); | ||
33 | #endif | ||
34 | |||
35 | #ifdef CONFIG_PCI | ||
36 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
37 | { | ||
38 | resource_size_t start = pci_resource_start(dev, bar); | ||
39 | resource_size_t len = pci_resource_len(dev, bar); | ||
40 | unsigned long flags = pci_resource_flags(dev, bar); | ||
41 | |||
42 | if (!len || !start) | ||
43 | return NULL; | ||
44 | if (maxlen && len > maxlen) | ||
45 | len = maxlen; | ||
46 | if (flags & IORESOURCE_IO) | ||
47 | return ioport_map(start, len); | ||
48 | if (flags & IORESOURCE_MEM) { | ||
49 | if (flags & IORESOURCE_CACHEABLE) | ||
50 | return ioremap(start, len); | ||
51 | return ioremap_nocache(start, len); | ||
52 | } | ||
53 | return NULL; | ||
54 | } | ||
55 | EXPORT_SYMBOL(pci_iomap); | ||
56 | #endif | ||
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c new file mode 100644 index 000000000000..b7a605597b08 --- /dev/null +++ b/arch/unicore32/mm/ioremap.c | |||
@@ -0,0 +1,261 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/mm/ioremap.c | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * | ||
13 | * Re-map IO memory to kernel address space so that we can access it. | ||
14 | * | ||
15 | * This allows a driver to remap an arbitrary region of bus memory into | ||
16 | * virtual space. One should *only* use readl, writel, memcpy_toio and | ||
17 | * so on with such remapped areas. | ||
18 | * | ||
19 | * Because UniCore only has a 32-bit address space we can't address the | ||
20 | * whole of the (physical) PCI space at once. PCI huge-mode addressing | ||
21 | * allows us to circumvent this restriction by splitting PCI space into | ||
22 | * two 2GB chunks and mapping only one at a time into processor memory. | ||
23 | * We use MMU protection domains to trap any attempt to access the bank | ||
24 | * that is not currently mapped. (This isn't fully implemented yet.) | ||
25 | */ | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/vmalloc.h> | ||
30 | #include <linux/io.h> | ||
31 | |||
32 | #include <asm/cputype.h> | ||
33 | #include <asm/cacheflush.h> | ||
34 | #include <asm/mmu_context.h> | ||
35 | #include <asm/pgalloc.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/sizes.h> | ||
38 | |||
39 | #include <mach/map.h> | ||
40 | #include "mm.h" | ||
41 | |||
42 | /* | ||
43 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | ||
44 | * I/O regions in vm_struct->flags field. | ||
45 | */ | ||
46 | #define VM_UNICORE_SECTION_MAPPING 0x80000000 | ||
47 | |||
48 | int ioremap_page(unsigned long virt, unsigned long phys, | ||
49 | const struct mem_type *mtype) | ||
50 | { | ||
51 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, | ||
52 | __pgprot(mtype->prot_pte)); | ||
53 | } | ||
54 | EXPORT_SYMBOL(ioremap_page); | ||
55 | |||
56 | /* | ||
57 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | ||
58 | * the other CPUs will not see this change until their next context switch. | ||
59 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | ||
60 | * which requires the new ioremap'd region to be referenced, the CPU will | ||
61 | * reference the _old_ region. | ||
62 | * | ||
63 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to | ||
64 | * mask the size back to 4MB aligned or we will overflow in the loop below. | ||
65 | */ | ||
66 | static void unmap_area_sections(unsigned long virt, unsigned long size) | ||
67 | { | ||
68 | unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1)); | ||
69 | pgd_t *pgd; | ||
70 | |||
71 | flush_cache_vunmap(addr, end); | ||
72 | pgd = pgd_offset_k(addr); | ||
73 | do { | ||
74 | pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); | ||
75 | |||
76 | pmd = *pmdp; | ||
77 | if (!pmd_none(pmd)) { | ||
78 | /* | ||
79 | * Clear the PMD from the page table, and | ||
80 | * increment the kvm sequence so others | ||
81 | * notice this change. | ||
82 | * | ||
83 | * Note: this is still racy on SMP machines. | ||
84 | */ | ||
85 | pmd_clear(pmdp); | ||
86 | |||
87 | /* | ||
88 | * Free the page table, if there was one. | ||
89 | */ | ||
90 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | ||
91 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); | ||
92 | } | ||
93 | |||
94 | addr += PGDIR_SIZE; | ||
95 | pgd++; | ||
96 | } while (addr < end); | ||
97 | |||
98 | flush_tlb_kernel_range(virt, end); | ||
99 | } | ||
100 | |||
101 | static int | ||
102 | remap_area_sections(unsigned long virt, unsigned long pfn, | ||
103 | size_t size, const struct mem_type *type) | ||
104 | { | ||
105 | unsigned long addr = virt, end = virt + size; | ||
106 | pgd_t *pgd; | ||
107 | |||
108 | /* | ||
109 | * Remove and free any PTE-based mapping, and | ||
110 | * sync the current kernel mapping. | ||
111 | */ | ||
112 | unmap_area_sections(virt, size); | ||
113 | |||
114 | pgd = pgd_offset_k(addr); | ||
115 | do { | ||
116 | pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); | ||
117 | |||
118 | set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect)); | ||
119 | pfn += SZ_4M >> PAGE_SHIFT; | ||
120 | flush_pmd_entry(pmd); | ||
121 | |||
122 | addr += PGDIR_SIZE; | ||
123 | pgd++; | ||
124 | } while (addr < end); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn, | ||
130 | unsigned long offset, size_t size, unsigned int mtype, void *caller) | ||
131 | { | ||
132 | const struct mem_type *type; | ||
133 | int err; | ||
134 | unsigned long addr; | ||
135 | struct vm_struct *area; | ||
136 | |||
137 | /* | ||
138 | * High mappings must be section aligned | ||
139 | */ | ||
140 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK)) | ||
141 | return NULL; | ||
142 | |||
143 | /* | ||
144 | * Don't allow RAM to be mapped | ||
145 | */ | ||
146 | if (pfn_valid(pfn)) { | ||
147 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n" | ||
148 | "system memory. This leads to architecturally\n" | ||
149 | "unpredictable behaviour, and ioremap() will fail in\n" | ||
150 | "the next kernel release. Please fix your driver.\n"); | ||
151 | WARN_ON(1); | ||
152 | } | ||
153 | |||
154 | type = get_mem_type(mtype); | ||
155 | if (!type) | ||
156 | return NULL; | ||
157 | |||
158 | /* | ||
159 | * Page align the mapping size, taking account of any offset. | ||
160 | */ | ||
161 | size = PAGE_ALIGN(offset + size); | ||
162 | |||
163 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | ||
164 | if (!area) | ||
165 | return NULL; | ||
166 | addr = (unsigned long)area->addr; | ||
167 | |||
168 | if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | ||
169 | area->flags |= VM_UNICORE_SECTION_MAPPING; | ||
170 | err = remap_area_sections(addr, pfn, size, type); | ||
171 | } else | ||
172 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), | ||
173 | __pgprot(type->prot_pte)); | ||
174 | |||
175 | if (err) { | ||
176 | vunmap((void *)addr); | ||
177 | return NULL; | ||
178 | } | ||
179 | |||
180 | flush_cache_vmap(addr, addr + size); | ||
181 | return (void __iomem *) (offset + addr); | ||
182 | } | ||
183 | |||
184 | void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size, | ||
185 | unsigned int mtype, void *caller) | ||
186 | { | ||
187 | unsigned long last_addr; | ||
188 | unsigned long offset = phys_addr & ~PAGE_MASK; | ||
189 | unsigned long pfn = __phys_to_pfn(phys_addr); | ||
190 | |||
191 | /* | ||
192 | * Don't allow wraparound or zero size | ||
193 | */ | ||
194 | last_addr = phys_addr + size - 1; | ||
195 | if (!size || last_addr < phys_addr) | ||
196 | return NULL; | ||
197 | |||
198 | return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Remap an arbitrary physical address space into the kernel virtual | ||
203 | * address space. Needed when the kernel wants to access high addresses | ||
204 | * directly. | ||
205 | * | ||
206 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
207 | * have to convert them into an offset in a page-aligned mapping, but the | ||
208 | * caller shouldn't need to know that small detail. | ||
209 | */ | ||
210 | void __iomem * | ||
211 | __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
212 | unsigned int mtype) | ||
213 | { | ||
214 | return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, | ||
215 | __builtin_return_address(0)); | ||
216 | } | ||
217 | EXPORT_SYMBOL(__uc32_ioremap_pfn); | ||
218 | |||
219 | void __iomem * | ||
220 | __uc32_ioremap(unsigned long phys_addr, size_t size) | ||
221 | { | ||
222 | return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE, | ||
223 | __builtin_return_address(0)); | ||
224 | } | ||
225 | EXPORT_SYMBOL(__uc32_ioremap); | ||
226 | |||
227 | void __iomem * | ||
228 | __uc32_ioremap_cached(unsigned long phys_addr, size_t size) | ||
229 | { | ||
230 | return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED, | ||
231 | __builtin_return_address(0)); | ||
232 | } | ||
233 | EXPORT_SYMBOL(__uc32_ioremap_cached); | ||
234 | |||
235 | void __uc32_iounmap(volatile void __iomem *io_addr) | ||
236 | { | ||
237 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | ||
238 | struct vm_struct **p, *tmp; | ||
239 | |||
240 | /* | ||
241 | * If this is a section based mapping we need to handle it | ||
242 | * specially as the VM subsystem does not know how to handle | ||
243 | * such a beast. We need the lock here b/c we need to clear | ||
244 | * all the mappings before the area can be reclaimed | ||
245 | * by someone else. | ||
246 | */ | ||
247 | write_lock(&vmlist_lock); | ||
248 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | ||
249 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | ||
250 | if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { | ||
251 | unmap_area_sections((unsigned long)tmp->addr, | ||
252 | tmp->size); | ||
253 | } | ||
254 | break; | ||
255 | } | ||
256 | } | ||
257 | write_unlock(&vmlist_lock); | ||
258 | |||
259 | vunmap(addr); | ||
260 | } | ||
261 | EXPORT_SYMBOL(__uc32_iounmap); | ||
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h new file mode 100644 index 000000000000..3296bca0f1f7 --- /dev/null +++ b/arch/unicore32/mm/mm.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * linux/arch/unicore32/mm/mm.h | ||
3 | * | ||
4 | * Code specific to PKUnity SoC and UniCore ISA | ||
5 | * | ||
6 | * Copyright (C) 2001-2010 GUAN Xue-tao | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | /* the upper-most page table pointer */ | ||
13 | extern pmd_t *top_pmd; | ||
14 | extern int sysctl_overcommit_memory; | ||
15 | |||
16 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) | ||
17 | |||
18 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) | ||
19 | { | ||
20 | return pmd_offset((pud_t *)pgd, virt); | ||
21 | } | ||
22 | |||
23 | static inline pmd_t *pmd_off_k(unsigned long virt) | ||
24 | { | ||
25 | return pmd_off(pgd_offset_k(virt), virt); | ||
26 | } | ||
27 | |||
28 | struct mem_type { | ||
29 | unsigned int prot_pte; | ||
30 | unsigned int prot_l1; | ||
31 | unsigned int prot_sect; | ||
32 | }; | ||
33 | |||
34 | const struct mem_type *get_mem_type(unsigned int type); | ||
35 | |||
36 | extern void __flush_dcache_page(struct address_space *, struct page *); | ||
37 | |||
38 | void __init bootmem_init(void); | ||
39 | void uc32_mm_memblock_reserve(void); | ||