diff options
Diffstat (limited to 'arch/microblaze')
-rw-r--r-- | arch/microblaze/Kconfig | 24 | ||||
-rw-r--r-- | arch/microblaze/include/asm/fixmap.h | 8 | ||||
-rw-r--r-- | arch/microblaze/include/asm/highmem.h | 96 | ||||
-rw-r--r-- | arch/microblaze/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/microblaze/mm/highmem.c | 88 | ||||
-rw-r--r-- | arch/microblaze/mm/init.c | 68 |
6 files changed, 272 insertions, 13 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 3267cc5065d5..86ae27871f41 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -159,20 +159,18 @@ config XILINX_UNCACHED_SHADOW | |||
159 | The feature requires the design to define the RAM memory controller | 159 | The feature requires the design to define the RAM memory controller |
160 | window to be twice as large as the actual physical memory. | 160 | window to be twice as large as the actual physical memory. |
161 | 161 | ||
162 | config HIGHMEM_START_BOOL | 162 | config HIGHMEM |
163 | bool "Set high memory pool address" | 163 | bool "High memory support" |
164 | depends on ADVANCED_OPTIONS && HIGHMEM | ||
165 | help | ||
166 | This option allows you to set the base address of the kernel virtual | ||
167 | area used to map high memory pages. This can be useful in | ||
168 | optimizing the layout of kernel virtual memory. | ||
169 | |||
170 | Say N here unless you know what you are doing. | ||
171 | |||
172 | config HIGHMEM_START | ||
173 | hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL | ||
174 | depends on MMU | 164 | depends on MMU |
175 | default "0xfe000000" | 165 | help |
166 | The address space of Microblaze processors is only 4 Gigabytes large | ||
167 | and it has to accommodate user address space, kernel address | ||
168 | space as well as some memory mapped IO. That means that, if you | ||
169 | have a large amount of physical memory and/or IO, not all of the | ||
170 | memory can be "permanently mapped" by the kernel. The physical | ||
171 | memory that is not permanently mapped is called "high memory". | ||
172 | |||
173 | If unsure, say n. | ||
176 | 174 | ||
177 | config LOWMEM_SIZE_BOOL | 175 | config LOWMEM_SIZE_BOOL |
178 | bool "Set maximum low memory" | 176 | bool "Set maximum low memory" |
diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h index dd89754f0a8d..f2b312e10b10 100644 --- a/arch/microblaze/include/asm/fixmap.h +++ b/arch/microblaze/include/asm/fixmap.h | |||
@@ -21,6 +21,10 @@ | |||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #ifdef CONFIG_HIGHMEM | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #endif | ||
24 | 28 | ||
25 | #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) | 29 | #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) |
26 | 30 | ||
@@ -44,6 +48,10 @@ | |||
44 | */ | 48 | */ |
45 | enum fixed_addresses { | 49 | enum fixed_addresses { |
46 | FIX_HOLE, | 50 | FIX_HOLE, |
51 | #ifdef CONFIG_HIGHMEM | ||
52 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
53 | FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, | ||
54 | #endif | ||
47 | __end_of_fixed_addresses | 55 | __end_of_fixed_addresses |
48 | }; | 56 | }; |
49 | 57 | ||
diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h new file mode 100644 index 000000000000..2446a73140ac --- /dev/null +++ b/arch/microblaze/include/asm/highmem.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * highmem.h: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
5 | * are not addressable by direct kernel virtual addresses. | ||
6 | * | ||
7 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
8 | * Gerhard.Wichert@pdb.siemens.de | ||
9 | * | ||
10 | * | ||
11 | * Redesigned the x86 32-bit VM architecture to deal with | ||
12 | * up to 16 Terabyte physical memory. With current x86 CPUs | ||
13 | * we now support up to 64 Gigabytes physical RAM. | ||
14 | * | ||
15 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
16 | */ | ||
17 | #ifndef _ASM_HIGHMEM_H | ||
18 | #define _ASM_HIGHMEM_H | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <asm/fixmap.h> | ||
26 | |||
27 | extern pte_t *kmap_pte; | ||
28 | extern pgprot_t kmap_prot; | ||
29 | extern pte_t *pkmap_page_table; | ||
30 | |||
31 | /* | ||
32 | * Right now we initialize only a single pte table. It can be extended | ||
33 | * easily, subsequent pte tables have to be allocated in one physical | ||
34 | * chunk of RAM. | ||
35 | */ | ||
36 | /* | ||
37 | * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte | ||
38 | * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP | ||
39 | * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP | ||
40 | * in case of 16K/64K/256K page sizes. | ||
41 | */ | ||
42 | |||
43 | #define PKMAP_ORDER PTE_SHIFT | ||
44 | #define LAST_PKMAP (1 << PKMAP_ORDER) | ||
45 | |||
46 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ | ||
47 | & PMD_MASK) | ||
48 | |||
49 | #define LAST_PKMAP_MASK (LAST_PKMAP - 1) | ||
50 | #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) | ||
51 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
52 | |||
53 | extern void *kmap_high(struct page *page); | ||
54 | extern void kunmap_high(struct page *page); | ||
55 | extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); | ||
56 | extern void __kunmap_atomic(void *kvaddr); | ||
57 | |||
58 | static inline void *kmap(struct page *page) | ||
59 | { | ||
60 | might_sleep(); | ||
61 | if (!PageHighMem(page)) | ||
62 | return page_address(page); | ||
63 | return kmap_high(page); | ||
64 | } | ||
65 | |||
66 | static inline void kunmap(struct page *page) | ||
67 | { | ||
68 | BUG_ON(in_interrupt()); | ||
69 | if (!PageHighMem(page)) | ||
70 | return; | ||
71 | kunmap_high(page); | ||
72 | } | ||
73 | |||
74 | static inline void *__kmap_atomic(struct page *page) | ||
75 | { | ||
76 | return kmap_atomic_prot(page, kmap_prot); | ||
77 | } | ||
78 | |||
79 | static inline struct page *kmap_atomic_to_page(void *ptr) | ||
80 | { | ||
81 | unsigned long idx, vaddr = (unsigned long) ptr; | ||
82 | pte_t *pte; | ||
83 | |||
84 | if (vaddr < FIXADDR_START) | ||
85 | return virt_to_page(ptr); | ||
86 | |||
87 | idx = virt_to_fix(vaddr); | ||
88 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
89 | return pte_page(*pte); | ||
90 | } | ||
91 | |||
92 | #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } | ||
93 | |||
94 | #endif /* __KERNEL__ */ | ||
95 | |||
96 | #endif /* _ASM_HIGHMEM_H */ | ||
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 09c49ed87235..7313bd8acbb7 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | obj-y := consistent.o init.o | 5 | obj-y := consistent.o init.o |
6 | 6 | ||
7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o | 7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o |
8 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c new file mode 100644 index 000000000000..7d78838e8bfa --- /dev/null +++ b/arch/microblaze/mm/highmem.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * highmem.c: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * PowerPC version, stolen from the i386 version. | ||
5 | * | ||
6 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
7 | * are not addressable by direct kernel virtual addresses. | ||
8 | * | ||
9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
10 | * Gerhard.Wichert@pdb.siemens.de | ||
11 | * | ||
12 | * | ||
13 | * Redesigned the x86 32-bit VM architecture to deal with | ||
14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | ||
15 | * we now support up to 64 Gigabytes physical RAM. | ||
16 | * | ||
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
18 | * | ||
19 | * Reworked for PowerPC by various contributors. Moved from | ||
20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | ||
21 | */ | ||
22 | |||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | /* | ||
27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
28 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
29 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
30 | * it. | ||
31 | */ | ||
32 | #include <asm/tlbflush.h> | ||
33 | |||
34 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) | ||
35 | { | ||
36 | |||
37 | unsigned long vaddr; | ||
38 | int idx, type; | ||
39 | |||
40 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
41 | pagefault_disable(); | ||
42 | if (!PageHighMem(page)) | ||
43 | return page_address(page); | ||
44 | |||
45 | |||
46 | type = kmap_atomic_idx_push(); | ||
47 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
48 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
49 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
50 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
51 | #endif | ||
52 | set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); | ||
53 | local_flush_tlb_page(NULL, vaddr); | ||
54 | |||
55 | return (void *) vaddr; | ||
56 | } | ||
57 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
58 | |||
59 | void __kunmap_atomic(void *kvaddr) | ||
60 | { | ||
61 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
62 | int type; | ||
63 | |||
64 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
65 | pagefault_enable(); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | type = kmap_atomic_idx(); | ||
70 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
71 | { | ||
72 | unsigned int idx; | ||
73 | |||
74 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
75 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
76 | |||
77 | /* | ||
78 | * force other mappings to Oops if they'll try to access | ||
79 | * this pte without first remap it | ||
80 | */ | ||
81 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
82 | local_flush_tlb_page(NULL, vaddr); | ||
83 | } | ||
84 | #endif | ||
85 | kmap_atomic_idx_pop(); | ||
86 | pagefault_enable(); | ||
87 | } | ||
88 | EXPORT_SYMBOL(__kunmap_atomic); | ||
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 43b3f604bafe..95297b13dd9e 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -49,6 +49,53 @@ unsigned long memory_size; | |||
49 | EXPORT_SYMBOL(memory_size); | 49 | EXPORT_SYMBOL(memory_size); |
50 | unsigned long lowmem_size; | 50 | unsigned long lowmem_size; |
51 | 51 | ||
52 | #ifdef CONFIG_HIGHMEM | ||
53 | pte_t *kmap_pte; | ||
54 | EXPORT_SYMBOL(kmap_pte); | ||
55 | pgprot_t kmap_prot; | ||
56 | EXPORT_SYMBOL(kmap_prot); | ||
57 | |||
58 | static inline pte_t *virt_to_kpte(unsigned long vaddr) | ||
59 | { | ||
60 | return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), | ||
61 | vaddr), vaddr); | ||
62 | } | ||
63 | |||
64 | static void __init highmem_init(void) | ||
65 | { | ||
66 | pr_debug("%x\n", (u32)PKMAP_BASE); | ||
67 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | ||
68 | pkmap_page_table = virt_to_kpte(PKMAP_BASE); | ||
69 | |||
70 | kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); | ||
71 | kmap_prot = PAGE_KERNEL; | ||
72 | } | ||
73 | |||
74 | static unsigned long highmem_setup(void) | ||
75 | { | ||
76 | unsigned long pfn; | ||
77 | unsigned long reservedpages = 0; | ||
78 | |||
79 | for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { | ||
80 | struct page *page = pfn_to_page(pfn); | ||
81 | |||
82 | /* FIXME not sure about */ | ||
83 | if (memblock_is_reserved(pfn << PAGE_SHIFT)) | ||
84 | continue; | ||
85 | ClearPageReserved(page); | ||
86 | init_page_count(page); | ||
87 | __free_page(page); | ||
88 | totalhigh_pages++; | ||
89 | reservedpages++; | ||
90 | } | ||
91 | totalram_pages += totalhigh_pages; | ||
92 | printk(KERN_INFO "High memory: %luk\n", | ||
93 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
94 | |||
95 | return reservedpages; | ||
96 | } | ||
97 | #endif /* CONFIG_HIGHMEM */ | ||
98 | |||
52 | /* | 99 | /* |
53 | * paging_init() sets up the page tables - in fact we've already done this. | 100 | * paging_init() sets up the page tables - in fact we've already done this. |
54 | */ | 101 | */ |
@@ -66,7 +113,14 @@ static void __init paging_init(void) | |||
66 | /* Clean every zones */ | 113 | /* Clean every zones */ |
67 | memset(zones_size, 0, sizeof(zones_size)); | 114 | memset(zones_size, 0, sizeof(zones_size)); |
68 | 115 | ||
116 | #ifdef CONFIG_HIGHMEM | ||
117 | highmem_init(); | ||
118 | |||
119 | zones_size[ZONE_DMA] = max_low_pfn; | ||
120 | zones_size[ZONE_HIGHMEM] = max_pfn; | ||
121 | #else | ||
69 | zones_size[ZONE_DMA] = max_pfn; | 122 | zones_size[ZONE_DMA] = max_pfn; |
123 | #endif | ||
70 | 124 | ||
71 | /* We don't have holes in memory map */ | 125 | /* We don't have holes in memory map */ |
72 | free_area_init_nodes(zones_size); | 126 | free_area_init_nodes(zones_size); |
@@ -241,6 +295,10 @@ void __init mem_init(void) | |||
241 | } | 295 | } |
242 | } | 296 | } |
243 | 297 | ||
298 | #ifdef CONFIG_HIGHMEM | ||
299 | reservedpages -= highmem_setup(); | ||
300 | #endif | ||
301 | |||
244 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; | 302 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; |
245 | datasize = (unsigned long)&_edata - (unsigned long)&_sdata; | 303 | datasize = (unsigned long)&_edata - (unsigned long)&_sdata; |
246 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | 304 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; |
@@ -259,6 +317,10 @@ void __init mem_init(void) | |||
259 | #ifdef CONFIG_MMU | 317 | #ifdef CONFIG_MMU |
260 | pr_info("Kernel virtual memory layout:\n"); | 318 | pr_info("Kernel virtual memory layout:\n"); |
261 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); | 319 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); |
320 | #ifdef CONFIG_HIGHMEM | ||
321 | pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", | ||
322 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); | ||
323 | #endif /* CONFIG_HIGHMEM */ | ||
262 | pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", | 324 | pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", |
263 | ioremap_bot, ioremap_base); | 325 | ioremap_bot, ioremap_base); |
264 | pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", | 326 | pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", |
@@ -346,7 +408,9 @@ asmlinkage void __init mmu_init(void) | |||
346 | 408 | ||
347 | if (lowmem_size > CONFIG_LOWMEM_SIZE) { | 409 | if (lowmem_size > CONFIG_LOWMEM_SIZE) { |
348 | lowmem_size = CONFIG_LOWMEM_SIZE; | 410 | lowmem_size = CONFIG_LOWMEM_SIZE; |
411 | #ifndef CONFIG_HIGHMEM | ||
349 | memory_size = lowmem_size; | 412 | memory_size = lowmem_size; |
413 | #endif | ||
350 | } | 414 | } |
351 | 415 | ||
352 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ | 416 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ |
@@ -375,7 +439,11 @@ asmlinkage void __init mmu_init(void) | |||
375 | mapin_ram(); | 439 | mapin_ram(); |
376 | 440 | ||
377 | /* Extend vmalloc and ioremap area as big as possible */ | 441 | /* Extend vmalloc and ioremap area as big as possible */ |
442 | #ifdef CONFIG_HIGHMEM | ||
443 | ioremap_base = ioremap_bot = PKMAP_BASE; | ||
444 | #else | ||
378 | ioremap_base = ioremap_bot = FIXADDR_START; | 445 | ioremap_base = ioremap_bot = FIXADDR_START; |
446 | #endif | ||
379 | 447 | ||
380 | /* Initialize the context management stuff */ | 448 | /* Initialize the context management stuff */ |
381 | mmu_context_init(); | 449 | mmu_context_init(); |