diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-27 21:20:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-27 21:20:56 -0400 |
commit | 30eebb54b13ef198a3f1a143ee9dd68f295c60de (patch) | |
tree | 64a489015c8f5def32820ac069534c6f7297ab70 /arch/microblaze | |
parent | 9e4db1c3eed55c22328d8022c2c80adb3093833f (diff) | |
parent | e02db0aa3e1976ae4e23a66077d252a2f3ba74c7 (diff) |
Merge branch 'next' of git://git.monstr.eu/linux-2.6-microblaze
Pull arch/microblaze fixes from Michal Simek
* 'next' of git://git.monstr.eu/linux-2.6-microblaze:
microblaze: Handle TLB skip size dynamically
microblaze: Introduce TLB skip size
microblaze: Improve TLB calculation for small systems
microblaze: Extend space for compiled-in FDT to 32kB
microblaze: Clear all MSR flags on the first kernel instruction
microblaze: Use node name instead of compatible string
microblaze: Fix mapin_ram function
microblaze: Highmem support
microblaze: Use active regions
microblaze: Show more detailed information about memory
microblaze: Introduce fixmap
microblaze: mm: Fix lowmem max memory size limits
microblaze: mm: Use ZONE_DMA instead of ZONE_NORMAL
microblaze: trivial: Fix typo fault in timer.c
microblaze: Use vsprintf extention %pf with builtin_return_address
microblaze: Add PVR version string for MB 8.20.b and 8.30.a
microblaze: Fix makefile to work with latest toolchain
microblaze: Fix typo in early_printk.c
Diffstat (limited to 'arch/microblaze')
23 files changed, 710 insertions, 95 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 11060fa87da3..ac22dc7f4cab 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config MICROBLAZE | 1 | config MICROBLAZE |
2 | def_bool y | 2 | def_bool y |
3 | select HAVE_MEMBLOCK | 3 | select HAVE_MEMBLOCK |
4 | select HAVE_MEMBLOCK_NODE_MAP | ||
4 | select HAVE_FUNCTION_TRACER | 5 | select HAVE_FUNCTION_TRACER |
5 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 6 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
6 | select HAVE_FUNCTION_GRAPH_TRACER | 7 | select HAVE_FUNCTION_GRAPH_TRACER |
@@ -28,6 +29,12 @@ config SWAP | |||
28 | config RWSEM_GENERIC_SPINLOCK | 29 | config RWSEM_GENERIC_SPINLOCK |
29 | def_bool y | 30 | def_bool y |
30 | 31 | ||
32 | config ZONE_DMA | ||
33 | def_bool y | ||
34 | |||
35 | config ARCH_POPULATES_NODE_MAP | ||
36 | def_bool y | ||
37 | |||
31 | config RWSEM_XCHGADD_ALGORITHM | 38 | config RWSEM_XCHGADD_ALGORITHM |
32 | bool | 39 | bool |
33 | 40 | ||
@@ -153,20 +160,18 @@ config XILINX_UNCACHED_SHADOW | |||
153 | The feature requires the design to define the RAM memory controller | 160 | The feature requires the design to define the RAM memory controller |
154 | window to be twice as large as the actual physical memory. | 161 | window to be twice as large as the actual physical memory. |
155 | 162 | ||
156 | config HIGHMEM_START_BOOL | 163 | config HIGHMEM |
157 | bool "Set high memory pool address" | 164 | bool "High memory support" |
158 | depends on ADVANCED_OPTIONS && HIGHMEM | 165 | depends on MMU |
159 | help | 166 | help |
160 | This option allows you to set the base address of the kernel virtual | 167 | The address space of Microblaze processors is only 4 Gigabytes large |
161 | area used to map high memory pages. This can be useful in | 168 | and it has to accommodate user address space, kernel address |
162 | optimizing the layout of kernel virtual memory. | 169 | space as well as some memory mapped IO. That means that, if you |
170 | have a large amount of physical memory and/or IO, not all of the | ||
171 | memory can be "permanently mapped" by the kernel. The physical | ||
172 | memory that is not permanently mapped is called "high memory". | ||
163 | 173 | ||
164 | Say N here unless you know what you are doing. | 174 | If unsure, say n. |
165 | |||
166 | config HIGHMEM_START | ||
167 | hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL | ||
168 | depends on MMU | ||
169 | default "0xfe000000" | ||
170 | 175 | ||
171 | config LOWMEM_SIZE_BOOL | 176 | config LOWMEM_SIZE_BOOL |
172 | bool "Set maximum low memory" | 177 | bool "Set maximum low memory" |
@@ -255,6 +260,10 @@ config MICROBLAZE_32K_PAGES | |||
255 | 260 | ||
256 | endchoice | 261 | endchoice |
257 | 262 | ||
263 | config KERNEL_PAD | ||
264 | hex "Kernel PAD for unpacking" if ADVANCED_OPTIONS | ||
265 | default "0x80000" if MMU | ||
266 | |||
258 | endmenu | 267 | endmenu |
259 | 268 | ||
260 | source "mm/Kconfig" | 269 | source "mm/Kconfig" |
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 0c796cf81586..34940c828def 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile | |||
@@ -8,7 +8,7 @@ obj-y += linked_dtb.o | |||
8 | 8 | ||
9 | targets := linux.bin linux.bin.gz simpleImage.% | 9 | targets := linux.bin linux.bin.gz simpleImage.% |
10 | 10 | ||
11 | OBJCOPYFLAGS := -O binary | 11 | OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary |
12 | 12 | ||
13 | # Ensure system.dtb exists | 13 | # Ensure system.dtb exists |
14 | $(obj)/linked_dtb.o: $(obj)/system.dtb | 14 | $(obj)/linked_dtb.o: $(obj)/system.dtb |
diff --git a/arch/microblaze/include/asm/fixmap.h b/arch/microblaze/include/asm/fixmap.h new file mode 100644 index 000000000000..f2b312e10b10 --- /dev/null +++ b/arch/microblaze/include/asm/fixmap.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Copyright 2008 Freescale Semiconductor Inc. | ||
11 | * Port to powerpc added by Kumar Gala | ||
12 | * | ||
13 | * Copyright 2011 Michal Simek <monstr@monstr.eu> | ||
14 | * Copyright 2011 PetaLogix Qld Pty Ltd | ||
15 | * Port to Microblaze | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_FIXMAP_H | ||
19 | #define _ASM_FIXMAP_H | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | #include <linux/kernel.h> | ||
23 | #include <asm/page.h> | ||
24 | #ifdef CONFIG_HIGHMEM | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #endif | ||
28 | |||
29 | #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) | ||
30 | |||
31 | /* | ||
32 | * Here we define all the compile-time 'special' virtual | ||
33 | * addresses. The point is to have a constant address at | ||
34 | * compile time, but to set the physical address only | ||
35 | * in the boot process. We allocate these special addresses | ||
36 | * from the end of virtual memory (0xfffff000) backwards. | ||
37 | * Also this lets us do fail-safe vmalloc(), we | ||
38 | * can guarantee that these special addresses and | ||
39 | * vmalloc()-ed addresses never overlap. | ||
40 | * | ||
41 | * these 'compile-time allocated' memory buffers are | ||
42 | * fixed-size 4k pages. (or larger if used with an increment | ||
43 | * highger than 1) use fixmap_set(idx,phys) to associate | ||
44 | * physical memory with fixmap indices. | ||
45 | * | ||
46 | * TLB entries of such buffers will not be flushed across | ||
47 | * task switches. | ||
48 | */ | ||
49 | enum fixed_addresses { | ||
50 | FIX_HOLE, | ||
51 | #ifdef CONFIG_HIGHMEM | ||
52 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
53 | FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, | ||
54 | #endif | ||
55 | __end_of_fixed_addresses | ||
56 | }; | ||
57 | |||
58 | extern void __set_fixmap(enum fixed_addresses idx, | ||
59 | phys_addr_t phys, pgprot_t flags); | ||
60 | |||
61 | #define set_fixmap(idx, phys) \ | ||
62 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
63 | /* | ||
64 | * Some hardware wants to get fixmapped without caching. | ||
65 | */ | ||
66 | #define set_fixmap_nocache(idx, phys) \ | ||
67 | __set_fixmap(idx, phys, PAGE_KERNEL_CI) | ||
68 | |||
69 | #define clear_fixmap(idx) \ | ||
70 | __set_fixmap(idx, 0, __pgprot(0)) | ||
71 | |||
72 | #define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
73 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | ||
74 | |||
75 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
76 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
77 | |||
78 | extern void __this_fixmap_does_not_exist(void); | ||
79 | |||
80 | /* | ||
81 | * 'index to address' translation. If anyone tries to use the idx | ||
82 | * directly without tranlation, we catch the bug with a NULL-deference | ||
83 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
84 | */ | ||
85 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
86 | { | ||
87 | /* | ||
88 | * this branch gets completely eliminated after inlining, | ||
89 | * except when someone tries to use fixaddr indices in an | ||
90 | * illegal way. (such as mixing up address types or using | ||
91 | * out-of-range indices). | ||
92 | * | ||
93 | * If it doesn't get removed, the linker will complain | ||
94 | * loudly with a reasonably clear error message.. | ||
95 | */ | ||
96 | if (idx >= __end_of_fixed_addresses) | ||
97 | __this_fixmap_does_not_exist(); | ||
98 | |||
99 | return __fix_to_virt(idx); | ||
100 | } | ||
101 | |||
102 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
103 | { | ||
104 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
105 | return __virt_to_fix(vaddr); | ||
106 | } | ||
107 | |||
108 | #endif /* !__ASSEMBLY__ */ | ||
109 | #endif | ||
diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h new file mode 100644 index 000000000000..2446a73140ac --- /dev/null +++ b/arch/microblaze/include/asm/highmem.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * highmem.h: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
5 | * are not addressable by direct kernel virtual addresses. | ||
6 | * | ||
7 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
8 | * Gerhard.Wichert@pdb.siemens.de | ||
9 | * | ||
10 | * | ||
11 | * Redesigned the x86 32-bit VM architecture to deal with | ||
12 | * up to 16 Terabyte physical memory. With current x86 CPUs | ||
13 | * we now support up to 64 Gigabytes physical RAM. | ||
14 | * | ||
15 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
16 | */ | ||
17 | #ifndef _ASM_HIGHMEM_H | ||
18 | #define _ASM_HIGHMEM_H | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <asm/fixmap.h> | ||
26 | |||
27 | extern pte_t *kmap_pte; | ||
28 | extern pgprot_t kmap_prot; | ||
29 | extern pte_t *pkmap_page_table; | ||
30 | |||
31 | /* | ||
32 | * Right now we initialize only a single pte table. It can be extended | ||
33 | * easily, subsequent pte tables have to be allocated in one physical | ||
34 | * chunk of RAM. | ||
35 | */ | ||
36 | /* | ||
37 | * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte | ||
38 | * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP | ||
39 | * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP | ||
40 | * in case of 16K/64K/256K page sizes. | ||
41 | */ | ||
42 | |||
43 | #define PKMAP_ORDER PTE_SHIFT | ||
44 | #define LAST_PKMAP (1 << PKMAP_ORDER) | ||
45 | |||
46 | #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ | ||
47 | & PMD_MASK) | ||
48 | |||
49 | #define LAST_PKMAP_MASK (LAST_PKMAP - 1) | ||
50 | #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) | ||
51 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
52 | |||
53 | extern void *kmap_high(struct page *page); | ||
54 | extern void kunmap_high(struct page *page); | ||
55 | extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); | ||
56 | extern void __kunmap_atomic(void *kvaddr); | ||
57 | |||
58 | static inline void *kmap(struct page *page) | ||
59 | { | ||
60 | might_sleep(); | ||
61 | if (!PageHighMem(page)) | ||
62 | return page_address(page); | ||
63 | return kmap_high(page); | ||
64 | } | ||
65 | |||
66 | static inline void kunmap(struct page *page) | ||
67 | { | ||
68 | BUG_ON(in_interrupt()); | ||
69 | if (!PageHighMem(page)) | ||
70 | return; | ||
71 | kunmap_high(page); | ||
72 | } | ||
73 | |||
74 | static inline void *__kmap_atomic(struct page *page) | ||
75 | { | ||
76 | return kmap_atomic_prot(page, kmap_prot); | ||
77 | } | ||
78 | |||
79 | static inline struct page *kmap_atomic_to_page(void *ptr) | ||
80 | { | ||
81 | unsigned long idx, vaddr = (unsigned long) ptr; | ||
82 | pte_t *pte; | ||
83 | |||
84 | if (vaddr < FIXADDR_START) | ||
85 | return virt_to_page(ptr); | ||
86 | |||
87 | idx = virt_to_fix(vaddr); | ||
88 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
89 | return pte_page(*pte); | ||
90 | } | ||
91 | |||
92 | #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } | ||
93 | |||
94 | #endif /* __KERNEL__ */ | ||
95 | |||
96 | #endif /* _ASM_HIGHMEM_H */ | ||
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h index 8d6a654ceffb..1f9edddf7f4b 100644 --- a/arch/microblaze/include/asm/mmu.h +++ b/arch/microblaze/include/asm/mmu.h | |||
@@ -56,6 +56,12 @@ typedef struct _SEGREG { | |||
56 | 56 | ||
57 | extern void _tlbie(unsigned long va); /* invalidate a TLB entry */ | 57 | extern void _tlbie(unsigned long va); /* invalidate a TLB entry */ |
58 | extern void _tlbia(void); /* invalidate all TLB entries */ | 58 | extern void _tlbia(void); /* invalidate all TLB entries */ |
59 | |||
60 | /* | ||
61 | * tlb_skip size stores actual number skipped TLBs from TLB0 - every directy TLB | ||
62 | * mapping has to increase tlb_skip size. | ||
63 | */ | ||
64 | extern u32 tlb_skip; | ||
59 | # endif /* __ASSEMBLY__ */ | 65 | # endif /* __ASSEMBLY__ */ |
60 | 66 | ||
61 | /* | 67 | /* |
@@ -69,6 +75,12 @@ extern void _tlbia(void); /* invalidate all TLB entries */ | |||
69 | 75 | ||
70 | # define MICROBLAZE_TLB_SIZE 64 | 76 | # define MICROBLAZE_TLB_SIZE 64 |
71 | 77 | ||
78 | /* For cases when you want to skip some TLB entries */ | ||
79 | # define MICROBLAZE_TLB_SKIP 0 | ||
80 | |||
81 | /* Use the last TLB for temporary access to LMB */ | ||
82 | # define MICROBLAZE_LMB_TLB_ID 63 | ||
83 | |||
72 | /* | 84 | /* |
73 | * TLB entries are defined by a "high" tag portion and a "low" data | 85 | * TLB entries are defined by a "high" tag portion and a "low" data |
74 | * portion. The data portion is 32-bits. | 86 | * portion. The data portion is 32-bits. |
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index a25e6b5e2ad4..352cc2352bd5 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -135,8 +135,8 @@ extern unsigned long min_low_pfn; | |||
135 | extern unsigned long max_pfn; | 135 | extern unsigned long max_pfn; |
136 | 136 | ||
137 | extern unsigned long memory_start; | 137 | extern unsigned long memory_start; |
138 | extern unsigned long memory_end; | ||
139 | extern unsigned long memory_size; | 138 | extern unsigned long memory_size; |
139 | extern unsigned long lowmem_size; | ||
140 | 140 | ||
141 | extern int page_is_ram(unsigned long pfn); | 141 | extern int page_is_ram(unsigned long pfn); |
142 | 142 | ||
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 44dc67aa0277..3ef7b9cafeca 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
94 | /* Start and end of the vmalloc area. */ | 94 | /* Start and end of the vmalloc area. */ |
95 | /* Make sure to map the vmalloc area above the pinned kernel memory area | 95 | /* Make sure to map the vmalloc area above the pinned kernel memory area |
96 | of 32Mb. */ | 96 | of 32Mb. */ |
97 | #define VMALLOC_START (CONFIG_KERNEL_START + \ | 97 | #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE) |
98 | max(32 * 1024 * 1024UL, memory_size)) | ||
99 | #define VMALLOC_END ioremap_bot | 98 | #define VMALLOC_END ioremap_bot |
100 | 99 | ||
101 | #endif /* __ASSEMBLY__ */ | 100 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 6c72ed7eba98..9f195c094731 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h | |||
@@ -39,7 +39,8 @@ extern void of_platform_reset_gpio_probe(void); | |||
39 | void time_init(void); | 39 | void time_init(void); |
40 | void init_IRQ(void); | 40 | void init_IRQ(void); |
41 | void machine_early_init(const char *cmdline, unsigned int ram, | 41 | void machine_early_init(const char *cmdline, unsigned int ram, |
42 | unsigned int fdt, unsigned int msr); | 42 | unsigned int fdt, unsigned int msr, unsigned int tlb0, |
43 | unsigned int tlb1); | ||
43 | 44 | ||
44 | void machine_restart(char *cmd); | 45 | void machine_restart(char *cmd); |
45 | void machine_shutdown(void); | 46 | void machine_shutdown(void); |
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h index 5a433cbaafb3..01228d2b1351 100644 --- a/arch/microblaze/include/asm/system.h +++ b/arch/microblaze/include/asm/system.h | |||
@@ -83,6 +83,7 @@ void default_idle(void); | |||
83 | void free_init_pages(char *what, unsigned long begin, unsigned long end); | 83 | void free_init_pages(char *what, unsigned long begin, unsigned long end); |
84 | void free_initmem(void); | 84 | void free_initmem(void); |
85 | extern char *klimit; | 85 | extern char *klimit; |
86 | extern unsigned long kernel_tlb; | ||
86 | extern void ret_from_fork(void); | 87 | extern void ret_from_fork(void); |
87 | 88 | ||
88 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | 89 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 072b0077abf9..ef25f7538d4a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long); | |||
80 | static inline int ___range_ok(unsigned long addr, unsigned long size) | 80 | static inline int ___range_ok(unsigned long addr, unsigned long size) |
81 | { | 81 | { |
82 | return ((addr < memory_start) || | 82 | return ((addr < memory_start) || |
83 | ((addr + size) > memory_end)); | 83 | ((addr + size - 1) > (memory_start + memory_size - 1))); |
84 | } | 84 | } |
85 | 85 | ||
86 | #define __range_ok(addr, size) \ | 86 | #define __range_ok(addr, size) \ |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 54194b28574a..eab6abf5652e 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c | |||
@@ -35,6 +35,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { | |||
35 | {"8.00.b", 0x13}, | 35 | {"8.00.b", 0x13}, |
36 | {"8.10.a", 0x14}, | 36 | {"8.10.a", 0x14}, |
37 | {"8.20.a", 0x15}, | 37 | {"8.20.a", 0x15}, |
38 | {"8.20.b", 0x16}, | ||
39 | {"8.30.a", 0x17}, | ||
38 | {NULL, 0}, | 40 | {NULL, 0}, |
39 | }; | 41 | }; |
40 | 42 | ||
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index 8356e47631c4..ec485876d0d0 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c | |||
@@ -171,10 +171,24 @@ void __init remap_early_printk(void) | |||
171 | { | 171 | { |
172 | if (!early_console_initialized || !early_console) | 172 | if (!early_console_initialized || !early_console) |
173 | return; | 173 | return; |
174 | printk(KERN_INFO "early_printk_console remaping from 0x%x to ", | 174 | printk(KERN_INFO "early_printk_console remapping from 0x%x to ", |
175 | base_addr); | 175 | base_addr); |
176 | base_addr = (u32) ioremap(base_addr, PAGE_SIZE); | 176 | base_addr = (u32) ioremap(base_addr, PAGE_SIZE); |
177 | printk(KERN_CONT "0x%x\n", base_addr); | 177 | printk(KERN_CONT "0x%x\n", base_addr); |
178 | |||
179 | /* | ||
180 | * Early console is on the top of skipped TLB entries | ||
181 | * decrease tlb_skip size ensure that hardcoded TLB entry will be | ||
182 | * used by generic algorithm | ||
183 | * FIXME check if early console mapping is on the top by rereading | ||
184 | * TLB entry and compare baseaddr | ||
185 | * mts rtlbx, (tlb_skip - 1) | ||
186 | * nop | ||
187 | * mfs rX, rtlblo | ||
188 | * nop | ||
189 | * cmp rX, orig_base_addr | ||
190 | */ | ||
191 | tlb_skip -= 1; | ||
178 | } | 192 | } |
179 | 193 | ||
180 | void __init disable_early_printk(void) | 194 | void __init disable_early_printk(void) |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 77320b8fc16a..98b17f9f904b 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -63,9 +63,7 @@ ENTRY(_start) | |||
63 | real_start: | 63 | real_start: |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | mfs r1, rmsr | 66 | mts rmsr, r0 |
67 | andi r1, r1, ~2 | ||
68 | mts rmsr, r1 | ||
69 | /* | 67 | /* |
70 | * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' | 68 | * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' |
71 | * if the msrclr instruction is not enabled. We use this to detect | 69 | * if the msrclr instruction is not enabled. We use this to detect |
@@ -73,6 +71,7 @@ real_start: | |||
73 | * r8 == 0 - msr instructions are implemented | 71 | * r8 == 0 - msr instructions are implemented |
74 | * r8 != 0 - msr instructions are not implemented | 72 | * r8 != 0 - msr instructions are not implemented |
75 | */ | 73 | */ |
74 | mfs r1, rmsr | ||
76 | msrclr r8, 0 /* clear nothing - just read msr for test */ | 75 | msrclr r8, 0 /* clear nothing - just read msr for test */ |
77 | cmpu r8, r8, r1 /* r1 must contain msr reg content */ | 76 | cmpu r8, r8, r1 /* r1 must contain msr reg content */ |
78 | 77 | ||
@@ -96,7 +95,7 @@ big_endian: | |||
96 | _prepare_copy_fdt: | 95 | _prepare_copy_fdt: |
97 | or r11, r0, r0 /* incremment */ | 96 | or r11, r0, r0 /* incremment */ |
98 | ori r4, r0, TOPHYS(_fdt_start) | 97 | ori r4, r0, TOPHYS(_fdt_start) |
99 | ori r3, r0, (0x4000 - 4) | 98 | ori r3, r0, (0x8000 - 4) |
100 | _copy_fdt: | 99 | _copy_fdt: |
101 | lw r12, r7, r11 /* r12 = r7 + r11 */ | 100 | lw r12, r7, r11 /* r12 = r7 + r11 */ |
102 | sw r12, r4, r11 /* addr[r4 + r11] = r12 */ | 101 | sw r12, r4, r11 /* addr[r4 + r11] = r12 */ |
@@ -150,6 +149,7 @@ _copy_bram: | |||
150 | _invalidate: | 149 | _invalidate: |
151 | mts rtlbx, r3 | 150 | mts rtlbx, r3 |
152 | mts rtlbhi, r0 /* flush: ensure V is clear */ | 151 | mts rtlbhi, r0 /* flush: ensure V is clear */ |
152 | mts rtlblo, r0 | ||
153 | bgtid r3, _invalidate /* loop for all entries */ | 153 | bgtid r3, _invalidate /* loop for all entries */ |
154 | addik r3, r3, -1 | 154 | addik r3, r3, -1 |
155 | /* sync */ | 155 | /* sync */ |
@@ -169,6 +169,53 @@ _invalidate: | |||
169 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ | 169 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ |
170 | tophys(r4,r3) /* Load the kernel physical address */ | 170 | tophys(r4,r3) /* Load the kernel physical address */ |
171 | 171 | ||
172 | /* start to do TLB calculation */ | ||
173 | addik r12, r0, _end | ||
174 | rsub r12, r3, r12 | ||
175 | addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */ | ||
176 | |||
177 | or r9, r0, r0 /* TLB0 = 0 */ | ||
178 | or r10, r0, r0 /* TLB1 = 0 */ | ||
179 | |||
180 | addik r11, r12, -0x1000000 | ||
181 | bgei r11, GT16 /* size is greater than 16MB */ | ||
182 | addik r11, r12, -0x0800000 | ||
183 | bgei r11, GT8 /* size is greater than 8MB */ | ||
184 | addik r11, r12, -0x0400000 | ||
185 | bgei r11, GT4 /* size is greater than 4MB */ | ||
186 | /* size is less than 4MB */ | ||
187 | addik r11, r12, -0x0200000 | ||
188 | bgei r11, GT2 /* size is greater than 2MB */ | ||
189 | addik r9, r0, 0x0100000 /* TLB0 must be 1MB */ | ||
190 | addik r11, r12, -0x0100000 | ||
191 | bgei r11, GT1 /* size is greater than 1MB */ | ||
192 | /* TLB1 is 0 which is setup above */ | ||
193 | bri tlb_end | ||
194 | GT4: /* r11 contains the rest - will be either 1 or 4 */ | ||
195 | ori r9, r0, 0x400000 /* TLB0 is 4MB */ | ||
196 | bri TLB1 | ||
197 | GT16: /* TLB0 is 16MB */ | ||
198 | addik r9, r0, 0x1000000 /* means TLB0 is 16MB */ | ||
199 | TLB1: | ||
200 | /* must be used r2 because of substract if failed */ | ||
201 | addik r2, r11, -0x0400000 | ||
202 | bgei r2, GT20 /* size is greater than 16MB */ | ||
203 | /* size is >16MB and <20MB */ | ||
204 | addik r11, r11, -0x0100000 | ||
205 | bgei r11, GT17 /* size is greater than 17MB */ | ||
206 | /* kernel is >16MB and < 17MB */ | ||
207 | GT1: | ||
208 | addik r10, r0, 0x0100000 /* means TLB1 is 1MB */ | ||
209 | bri tlb_end | ||
210 | GT2: /* TLB0 is 0 and TLB1 will be 4MB */ | ||
211 | GT17: /* TLB1 is 4MB - kernel size <20MB */ | ||
212 | addik r10, r0, 0x0400000 /* means TLB1 is 4MB */ | ||
213 | bri tlb_end | ||
214 | GT8: /* TLB0 is still zero that's why I can use only TLB1 */ | ||
215 | GT20: /* TLB1 is 16MB - kernel size >20MB */ | ||
216 | addik r10, r0, 0x1000000 /* means TLB1 is 16MB */ | ||
217 | tlb_end: | ||
218 | |||
172 | /* | 219 | /* |
173 | * Configure and load two entries into TLB slots 0 and 1. | 220 | * Configure and load two entries into TLB slots 0 and 1. |
174 | * In case we are pinning TLBs, these are reserved in by the | 221 | * In case we are pinning TLBs, these are reserved in by the |
@@ -178,28 +225,81 @@ _invalidate: | |||
178 | andi r4,r4,0xfffffc00 /* Mask off the real page number */ | 225 | andi r4,r4,0xfffffc00 /* Mask off the real page number */ |
179 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | 226 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
180 | 227 | ||
228 | /* | ||
229 | * TLB0 is always used - check if is not zero (r9 stores TLB0 value) | ||
230 | * if is use TLB1 value and clear it (r10 stores TLB1 value) | ||
231 | */ | ||
232 | bnei r9, tlb0_not_zero | ||
233 | add r9, r10, r0 | ||
234 | add r10, r0, r0 | ||
235 | tlb0_not_zero: | ||
236 | |||
237 | /* look at the code below */ | ||
238 | ori r30, r0, 0x200 | ||
239 | andi r29, r9, 0x100000 | ||
240 | bneid r29, 1f | ||
241 | addik r30, r30, 0x80 | ||
242 | andi r29, r9, 0x400000 | ||
243 | bneid r29, 1f | ||
244 | addik r30, r30, 0x80 | ||
245 | andi r29, r9, 0x1000000 | ||
246 | bneid r29, 1f | ||
247 | addik r30, r30, 0x80 | ||
248 | 1: | ||
181 | andi r3,r3,0xfffffc00 /* Mask off the effective page number */ | 249 | andi r3,r3,0xfffffc00 /* Mask off the effective page number */ |
182 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | 250 | ori r3,r3,(TLB_VALID) |
251 | or r3, r3, r30 | ||
183 | 252 | ||
184 | mts rtlbx,r0 /* TLB slow 0 */ | 253 | /* Load tlb_skip size value which is index to first unused TLB entry */ |
254 | lwi r11, r0, TOPHYS(tlb_skip) | ||
255 | mts rtlbx,r11 /* TLB slow 0 */ | ||
185 | 256 | ||
186 | mts rtlblo,r4 /* Load the data portion of the entry */ | 257 | mts rtlblo,r4 /* Load the data portion of the entry */ |
187 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | 258 | mts rtlbhi,r3 /* Load the tag portion of the entry */ |
188 | 259 | ||
189 | addik r4, r4, 0x01000000 /* Map next 16 M entries */ | 260 | /* Increase tlb_skip size */ |
190 | addik r3, r3, 0x01000000 | 261 | addik r11, r11, 1 |
262 | swi r11, r0, TOPHYS(tlb_skip) | ||
263 | |||
264 | /* TLB1 can be zeroes that's why we not setup it */ | ||
265 | beqi r10, jump_over2 | ||
266 | |||
267 | /* look at the code below */ | ||
268 | ori r30, r0, 0x200 | ||
269 | andi r29, r10, 0x100000 | ||
270 | bneid r29, 1f | ||
271 | addik r30, r30, 0x80 | ||
272 | andi r29, r10, 0x400000 | ||
273 | bneid r29, 1f | ||
274 | addik r30, r30, 0x80 | ||
275 | andi r29, r10, 0x1000000 | ||
276 | bneid r29, 1f | ||
277 | addik r30, r30, 0x80 | ||
278 | 1: | ||
279 | addk r4, r4, r9 /* previous addr + TLB0 size */ | ||
280 | addk r3, r3, r9 | ||
191 | 281 | ||
192 | ori r6,r0,1 /* TLB slot 1 */ | 282 | andi r3,r3,0xfffffc00 /* Mask off the effective page number */ |
193 | mts rtlbx,r6 | 283 | ori r3,r3,(TLB_VALID) |
284 | or r3, r3, r30 | ||
285 | |||
286 | lwi r11, r0, TOPHYS(tlb_skip) | ||
287 | mts rtlbx, r11 /* r11 is used from TLB0 */ | ||
194 | 288 | ||
195 | mts rtlblo,r4 /* Load the data portion of the entry */ | 289 | mts rtlblo,r4 /* Load the data portion of the entry */ |
196 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | 290 | mts rtlbhi,r3 /* Load the tag portion of the entry */ |
197 | 291 | ||
292 | /* Increase tlb_skip size */ | ||
293 | addik r11, r11, 1 | ||
294 | swi r11, r0, TOPHYS(tlb_skip) | ||
295 | |||
296 | jump_over2: | ||
198 | /* | 297 | /* |
199 | * Load a TLB entry for LMB, since we need access to | 298 | * Load a TLB entry for LMB, since we need access to |
200 | * the exception vectors, using a 4k real==virtual mapping. | 299 | * the exception vectors, using a 4k real==virtual mapping. |
201 | */ | 300 | */ |
202 | ori r6,r0,3 /* TLB slot 3 */ | 301 | /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ |
302 | ori r6, r0, MICROBLAZE_LMB_TLB_ID | ||
203 | mts rtlbx,r6 | 303 | mts rtlbx,r6 |
204 | 304 | ||
205 | ori r4,r0,(TLB_WR | TLB_EX) | 305 | ori r4,r0,(TLB_WR | TLB_EX) |
@@ -238,8 +338,8 @@ start_here: | |||
238 | * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for | 338 | * Please see $(ARCH)/mach-$(SUBARCH)/setup.c for |
239 | * the function. | 339 | * the function. |
240 | */ | 340 | */ |
241 | addik r9, r0, machine_early_init | 341 | addik r11, r0, machine_early_init |
242 | brald r15, r9 | 342 | brald r15, r11 |
243 | nop | 343 | nop |
244 | 344 | ||
245 | #ifndef CONFIG_MMU | 345 | #ifndef CONFIG_MMU |
@@ -268,8 +368,7 @@ start_here: | |||
268 | 368 | ||
269 | /* Load up the kernel context */ | 369 | /* Load up the kernel context */ |
270 | kernel_load_context: | 370 | kernel_load_context: |
271 | # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away. | 371 | ori r5, r0, MICROBLAZE_LMB_TLB_ID |
272 | ori r5,r0,3 | ||
273 | mts rtlbx,r5 | 372 | mts rtlbx,r5 |
274 | nop | 373 | nop |
275 | mts rtlbhi,r0 | 374 | mts rtlbhi,r0 |
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index e62be8379604..aa510f450ac6 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
@@ -820,19 +820,26 @@ ex_handler_done: | |||
820 | * Upon exit, we reload everything and RFI. | 820 | * Upon exit, we reload everything and RFI. |
821 | * A common place to load the TLB. | 821 | * A common place to load the TLB. |
822 | */ | 822 | */ |
823 | .section .data | ||
824 | .align 4 | ||
825 | .global tlb_skip | ||
826 | tlb_skip: | ||
827 | .long MICROBLAZE_TLB_SKIP | ||
823 | tlb_index: | 828 | tlb_index: |
824 | .long 1 /* MS: storing last used tlb index */ | 829 | /* MS: storing last used tlb index */ |
830 | .long MICROBLAZE_TLB_SIZE/2 | ||
831 | .previous | ||
825 | finish_tlb_load: | 832 | finish_tlb_load: |
826 | /* MS: load the last used TLB index. */ | 833 | /* MS: load the last used TLB index. */ |
827 | lwi r5, r0, TOPHYS(tlb_index) | 834 | lwi r5, r0, TOPHYS(tlb_index) |
828 | addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ | 835 | addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ |
829 | 836 | ||
830 | /* MS: FIXME this is potential fault, because this is mask not count */ | 837 | /* MS: FIXME this is potential fault, because this is mask not count */ |
831 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) | 838 | andi r5, r5, MICROBLAZE_TLB_SIZE - 1 |
832 | ori r6, r0, 1 | 839 | ori r6, r0, 1 |
833 | cmp r31, r5, r6 | 840 | cmp r31, r5, r6 |
834 | blti r31, ex12 | 841 | blti r31, ex12 |
835 | addik r5, r6, 1 | 842 | lwi r5, r0, TOPHYS(tlb_skip) |
836 | ex12: | 843 | ex12: |
837 | /* MS: save back current TLB index */ | 844 | /* MS: save back current TLB index */ |
838 | swi r5, r0, TOPHYS(tlb_index) | 845 | swi r5, r0, TOPHYS(tlb_index) |
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index ad120672cee5..6c54d4dcdec3 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c | |||
@@ -151,8 +151,8 @@ void __init init_IRQ(void) | |||
151 | #ifdef CONFIG_SELFMOD_INTC | 151 | #ifdef CONFIG_SELFMOD_INTC |
152 | selfmod_function((int *) arr_func, intc_baseaddr); | 152 | selfmod_function((int *) arr_func, intc_baseaddr); |
153 | #endif | 153 | #endif |
154 | printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n", | 154 | printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n", |
155 | intc_baseaddr, nr_irq, intr_mask); | 155 | intc->name, intc_baseaddr, nr_irq, intr_mask); |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * Disable all external interrupts until they are | 158 | * Disable all external interrupts until they are |
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S index 206da3da361f..1dafddeb8a0b 100644 --- a/arch/microblaze/kernel/misc.S +++ b/arch/microblaze/kernel/misc.S | |||
@@ -29,16 +29,16 @@ | |||
29 | .type _tlbia, @function | 29 | .type _tlbia, @function |
30 | .align 4; | 30 | .align 4; |
31 | _tlbia: | 31 | _tlbia: |
32 | addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */ | 32 | lwi r12, r0, tlb_skip; |
33 | /* isync */ | 33 | /* isync */ |
34 | _tlbia_1: | 34 | _tlbia_1: |
35 | mts rtlbx, r12 | 35 | mts rtlbx, r12 |
36 | nop | 36 | nop |
37 | mts rtlbhi, r0 /* flush: ensure V is clear */ | 37 | mts rtlbhi, r0 /* flush: ensure V is clear */ |
38 | nop | 38 | nop |
39 | addik r11, r12, -2 | 39 | rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1 |
40 | bneid r11, _tlbia_1 /* loop for all entries */ | 40 | bneid r11, _tlbia_1 /* loop for all entries */ |
41 | addik r12, r12, -1 | 41 | addik r12, r12, 1 |
42 | /* sync */ | 42 | /* sync */ |
43 | rtsd r15, 8 | 43 | rtsd r15, 8 |
44 | nop | 44 | nop |
@@ -75,7 +75,7 @@ early_console_reg_tlb_alloc: | |||
75 | * Load a TLB entry for the UART, so that microblaze_progress() can use | 75 | * Load a TLB entry for the UART, so that microblaze_progress() can use |
76 | * the UARTs nice and early. We use a 4k real==virtual mapping. | 76 | * the UARTs nice and early. We use a 4k real==virtual mapping. |
77 | */ | 77 | */ |
78 | ori r4, r0, MICROBLAZE_TLB_SIZE - 1 | 78 | lwi r4, r0, tlb_skip |
79 | mts rtlbx, r4 /* TLB slot 63 */ | 79 | mts rtlbx, r4 /* TLB slot 63 */ |
80 | 80 | ||
81 | or r4,r5,r0 | 81 | or r4,r5,r0 |
@@ -89,6 +89,11 @@ early_console_reg_tlb_alloc: | |||
89 | nop | 89 | nop |
90 | mts rtlbhi,r5 /* Load the tag portion of the entry */ | 90 | mts rtlbhi,r5 /* Load the tag portion of the entry */ |
91 | nop | 91 | nop |
92 | |||
93 | lwi r5, r0, tlb_skip | ||
94 | addik r5, r5, 1 | ||
95 | swi r5, r0, tlb_skip | ||
96 | |||
92 | rtsd r15, 8 | 97 | rtsd r15, 8 |
93 | nop | 98 | nop |
94 | 99 | ||
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 70e6d0b41ab4..9f79fb3bbfa0 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -95,8 +95,11 @@ inline unsigned get_romfs_len(unsigned *addr) | |||
95 | } | 95 | } |
96 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ | 96 | #endif /* CONFIG_MTD_UCLINUX_EBSS */ |
97 | 97 | ||
98 | unsigned long kernel_tlb; | ||
99 | |||
98 | void __init machine_early_init(const char *cmdline, unsigned int ram, | 100 | void __init machine_early_init(const char *cmdline, unsigned int ram, |
99 | unsigned int fdt, unsigned int msr) | 101 | unsigned int fdt, unsigned int msr, unsigned int tlb0, |
102 | unsigned int tlb1) | ||
100 | { | 103 | { |
101 | unsigned long *src, *dst; | 104 | unsigned long *src, *dst; |
102 | unsigned int offset = 0; | 105 | unsigned int offset = 0; |
@@ -143,6 +146,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
143 | setup_early_printk(NULL); | 146 | setup_early_printk(NULL); |
144 | #endif | 147 | #endif |
145 | 148 | ||
149 | /* setup kernel_tlb after BSS cleaning | ||
150 | * Maybe worth to move to asm code */ | ||
151 | kernel_tlb = tlb0 + tlb1; | ||
152 | /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, | ||
153 | tlb1, kernel_tlb); */ | ||
154 | |||
146 | printk("Ramdisk addr 0x%08x, ", ram); | 155 | printk("Ramdisk addr 0x%08x, ", ram); |
147 | if (fdt) | 156 | if (fdt) |
148 | printk("FDT at 0x%08x\n", fdt); | 157 | printk("FDT at 0x%08x\n", fdt); |
@@ -197,6 +206,19 @@ static int microblaze_debugfs_init(void) | |||
197 | return of_debugfs_root == NULL; | 206 | return of_debugfs_root == NULL; |
198 | } | 207 | } |
199 | arch_initcall(microblaze_debugfs_init); | 208 | arch_initcall(microblaze_debugfs_init); |
209 | |||
210 | static int __init debugfs_tlb(void) | ||
211 | { | ||
212 | struct dentry *d; | ||
213 | |||
214 | if (!of_debugfs_root) | ||
215 | return -ENODEV; | ||
216 | |||
217 | d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); | ||
218 | if (!d) | ||
219 | return -ENOMEM; | ||
220 | } | ||
221 | device_initcall(debugfs_tlb); | ||
200 | #endif | 222 | #endif |
201 | 223 | ||
202 | static int dflt_bus_notify(struct notifier_block *nb, | 224 | static int dflt_bus_notify(struct notifier_block *nb, |
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 3cb0bf640135..cadfd5608afb 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c | |||
@@ -79,7 +79,7 @@ static inline void microblaze_timer0_start_periodic(unsigned long load_val) | |||
79 | * !PWMA - disable pwm | 79 | * !PWMA - disable pwm |
80 | * TINT - clear interrupt status | 80 | * TINT - clear interrupt status |
81 | * ENT- enable timer itself | 81 | * ENT- enable timer itself |
82 | * EINT - enable interrupt | 82 | * ENIT - enable interrupt |
83 | * !LOAD - clear the bit to let go | 83 | * !LOAD - clear the bit to let go |
84 | * ARHT - auto reload | 84 | * ARHT - auto reload |
85 | * !CAPT - no external trigger | 85 | * !CAPT - no external trigger |
@@ -274,8 +274,8 @@ void __init time_init(void) | |||
274 | #ifdef CONFIG_SELFMOD_TIMER | 274 | #ifdef CONFIG_SELFMOD_TIMER |
275 | selfmod_function((int *) arr_func, timer_baseaddr); | 275 | selfmod_function((int *) arr_func, timer_baseaddr); |
276 | #endif | 276 | #endif |
277 | printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n", | 277 | printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n", |
278 | timer_baseaddr, irq); | 278 | timer->name, timer_baseaddr, irq); |
279 | 279 | ||
280 | /* If there is clock-frequency property than use it */ | 280 | /* If there is clock-frequency property than use it */ |
281 | prop = of_get_property(timer, "clock-frequency", NULL); | 281 | prop = of_get_property(timer, "clock-frequency", NULL); |
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index ac0e1a5d4782..109e9d86ade4 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
@@ -44,7 +44,7 @@ SECTIONS { | |||
44 | __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { | 44 | __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { |
45 | _fdt_start = . ; /* place for fdt blob */ | 45 | _fdt_start = . ; /* place for fdt blob */ |
46 | *(__fdt_blob) ; /* Any link-placed DTB */ | 46 | *(__fdt_blob) ; /* Any link-placed DTB */ |
47 | . = _fdt_start + 0x4000; /* Pad up to 16kbyte */ | 47 | . = _fdt_start + 0x8000; /* Pad up to 32kbyte */ |
48 | _fdt_end = . ; | 48 | _fdt_end = . ; |
49 | } | 49 | } |
50 | 50 | ||
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 09c49ed87235..7313bd8acbb7 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | obj-y := consistent.o init.o | 5 | obj-y := consistent.o init.o |
6 | 6 | ||
7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o | 7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o |
8 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c new file mode 100644 index 000000000000..7d78838e8bfa --- /dev/null +++ b/arch/microblaze/mm/highmem.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * highmem.c: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * PowerPC version, stolen from the i386 version. | ||
5 | * | ||
6 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
7 | * are not addressable by direct kernel virtual addresses. | ||
8 | * | ||
9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
10 | * Gerhard.Wichert@pdb.siemens.de | ||
11 | * | ||
12 | * | ||
13 | * Redesigned the x86 32-bit VM architecture to deal with | ||
14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | ||
15 | * we now support up to 64 Gigabytes physical RAM. | ||
16 | * | ||
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
18 | * | ||
19 | * Reworked for PowerPC by various contributors. Moved from | ||
20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | ||
21 | */ | ||
22 | |||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | /* | ||
27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
28 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
29 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
30 | * it. | ||
31 | */ | ||
32 | #include <asm/tlbflush.h> | ||
33 | |||
34 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) | ||
35 | { | ||
36 | |||
37 | unsigned long vaddr; | ||
38 | int idx, type; | ||
39 | |||
40 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
41 | pagefault_disable(); | ||
42 | if (!PageHighMem(page)) | ||
43 | return page_address(page); | ||
44 | |||
45 | |||
46 | type = kmap_atomic_idx_push(); | ||
47 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
48 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
49 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
50 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
51 | #endif | ||
52 | set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); | ||
53 | local_flush_tlb_page(NULL, vaddr); | ||
54 | |||
55 | return (void *) vaddr; | ||
56 | } | ||
57 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
58 | |||
59 | void __kunmap_atomic(void *kvaddr) | ||
60 | { | ||
61 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
62 | int type; | ||
63 | |||
64 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
65 | pagefault_enable(); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | type = kmap_atomic_idx(); | ||
70 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
71 | { | ||
72 | unsigned int idx; | ||
73 | |||
74 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
75 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
76 | |||
77 | /* | ||
78 | * force other mappings to Oops if they'll try to access | ||
79 | * this pte without first remap it | ||
80 | */ | ||
81 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
82 | local_flush_tlb_page(NULL, vaddr); | ||
83 | } | ||
84 | #endif | ||
85 | kmap_atomic_idx_pop(); | ||
86 | pagefault_enable(); | ||
87 | } | ||
88 | EXPORT_SYMBOL(__kunmap_atomic); | ||
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 565d193c7ebf..ce80823051ba 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/pgalloc.h> | 24 | #include <asm/pgalloc.h> |
25 | #include <asm/sections.h> | 25 | #include <asm/sections.h> |
26 | #include <asm/tlb.h> | 26 | #include <asm/tlb.h> |
27 | #include <asm/fixmap.h> | ||
27 | 28 | ||
28 | /* Use for MMU and noMMU because of PCI generic code */ | 29 | /* Use for MMU and noMMU because of PCI generic code */ |
29 | int mem_init_done; | 30 | int mem_init_done; |
@@ -44,9 +45,56 @@ char *klimit = _end; | |||
44 | */ | 45 | */ |
45 | unsigned long memory_start; | 46 | unsigned long memory_start; |
46 | EXPORT_SYMBOL(memory_start); | 47 | EXPORT_SYMBOL(memory_start); |
47 | unsigned long memory_end; /* due to mm/nommu.c */ | ||
48 | unsigned long memory_size; | 48 | unsigned long memory_size; |
49 | EXPORT_SYMBOL(memory_size); | 49 | EXPORT_SYMBOL(memory_size); |
50 | unsigned long lowmem_size; | ||
51 | |||
52 | #ifdef CONFIG_HIGHMEM | ||
53 | pte_t *kmap_pte; | ||
54 | EXPORT_SYMBOL(kmap_pte); | ||
55 | pgprot_t kmap_prot; | ||
56 | EXPORT_SYMBOL(kmap_prot); | ||
57 | |||
58 | static inline pte_t *virt_to_kpte(unsigned long vaddr) | ||
59 | { | ||
60 | return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), | ||
61 | vaddr), vaddr); | ||
62 | } | ||
63 | |||
64 | static void __init highmem_init(void) | ||
65 | { | ||
66 | pr_debug("%x\n", (u32)PKMAP_BASE); | ||
67 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | ||
68 | pkmap_page_table = virt_to_kpte(PKMAP_BASE); | ||
69 | |||
70 | kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); | ||
71 | kmap_prot = PAGE_KERNEL; | ||
72 | } | ||
73 | |||
74 | static unsigned long highmem_setup(void) | ||
75 | { | ||
76 | unsigned long pfn; | ||
77 | unsigned long reservedpages = 0; | ||
78 | |||
79 | for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { | ||
80 | struct page *page = pfn_to_page(pfn); | ||
81 | |||
82 | /* FIXME not sure about */ | ||
83 | if (memblock_is_reserved(pfn << PAGE_SHIFT)) | ||
84 | continue; | ||
85 | ClearPageReserved(page); | ||
86 | init_page_count(page); | ||
87 | __free_page(page); | ||
88 | totalhigh_pages++; | ||
89 | reservedpages++; | ||
90 | } | ||
91 | totalram_pages += totalhigh_pages; | ||
92 | printk(KERN_INFO "High memory: %luk\n", | ||
93 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
94 | |||
95 | return reservedpages; | ||
96 | } | ||
97 | #endif /* CONFIG_HIGHMEM */ | ||
50 | 98 | ||
51 | /* | 99 | /* |
52 | * paging_init() sets up the page tables - in fact we've already done this. | 100 | * paging_init() sets up the page tables - in fact we've already done this. |
@@ -54,17 +102,28 @@ EXPORT_SYMBOL(memory_size); | |||
54 | static void __init paging_init(void) | 102 | static void __init paging_init(void) |
55 | { | 103 | { |
56 | unsigned long zones_size[MAX_NR_ZONES]; | 104 | unsigned long zones_size[MAX_NR_ZONES]; |
105 | #ifdef CONFIG_MMU | ||
106 | int idx; | ||
107 | |||
108 | /* Setup fixmaps */ | ||
109 | for (idx = 0; idx < __end_of_fixed_addresses; idx++) | ||
110 | clear_fixmap(idx); | ||
111 | #endif | ||
57 | 112 | ||
58 | /* Clean every zones */ | 113 | /* Clean every zones */ |
59 | memset(zones_size, 0, sizeof(zones_size)); | 114 | memset(zones_size, 0, sizeof(zones_size)); |
60 | 115 | ||
61 | /* | 116 | #ifdef CONFIG_HIGHMEM |
62 | * old: we can DMA to/from any address.put all page into ZONE_DMA | 117 | highmem_init(); |
63 | * We use only ZONE_NORMAL | ||
64 | */ | ||
65 | zones_size[ZONE_NORMAL] = max_mapnr; | ||
66 | 118 | ||
67 | free_area_init(zones_size); | 119 | zones_size[ZONE_DMA] = max_low_pfn; |
120 | zones_size[ZONE_HIGHMEM] = max_pfn; | ||
121 | #else | ||
122 | zones_size[ZONE_DMA] = max_pfn; | ||
123 | #endif | ||
124 | |||
125 | /* We don't have holes in memory map */ | ||
126 | free_area_init_nodes(zones_size); | ||
68 | } | 127 | } |
69 | 128 | ||
70 | void __init setup_memory(void) | 129 | void __init setup_memory(void) |
@@ -78,32 +137,31 @@ void __init setup_memory(void) | |||
78 | /* Find main memory where is the kernel */ | 137 | /* Find main memory where is the kernel */ |
79 | for_each_memblock(memory, reg) { | 138 | for_each_memblock(memory, reg) { |
80 | memory_start = (u32)reg->base; | 139 | memory_start = (u32)reg->base; |
81 | memory_end = (u32) reg->base + reg->size; | 140 | lowmem_size = reg->size; |
82 | if ((memory_start <= (u32)_text) && | 141 | if ((memory_start <= (u32)_text) && |
83 | ((u32)_text <= memory_end)) { | 142 | ((u32)_text <= (memory_start + lowmem_size - 1))) { |
84 | memory_size = memory_end - memory_start; | 143 | memory_size = lowmem_size; |
85 | PAGE_OFFSET = memory_start; | 144 | PAGE_OFFSET = memory_start; |
86 | printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " | 145 | printk(KERN_INFO "%s: Main mem: 0x%x, " |
87 | "size 0x%08x\n", __func__, (u32) memory_start, | 146 | "size 0x%08x\n", __func__, (u32) memory_start, |
88 | (u32) memory_end, (u32) memory_size); | 147 | (u32) memory_size); |
89 | break; | 148 | break; |
90 | } | 149 | } |
91 | } | 150 | } |
92 | 151 | ||
93 | if (!memory_start || !memory_end) { | 152 | if (!memory_start || !memory_size) { |
94 | panic("%s: Missing memory setting 0x%08x-0x%08x\n", | 153 | panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", |
95 | __func__, (u32) memory_start, (u32) memory_end); | 154 | __func__, (u32) memory_start, (u32) memory_size); |
96 | } | 155 | } |
97 | 156 | ||
98 | /* reservation of region where is the kernel */ | 157 | /* reservation of region where is the kernel */ |
99 | kernel_align_start = PAGE_DOWN((u32)_text); | 158 | kernel_align_start = PAGE_DOWN((u32)_text); |
100 | /* ALIGN can be remove because _end in vmlinux.lds.S is align */ | 159 | /* ALIGN can be remove because _end in vmlinux.lds.S is align */ |
101 | kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; | 160 | kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; |
102 | memblock_reserve(kernel_align_start, kernel_align_size); | 161 | printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", |
103 | printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", | ||
104 | __func__, kernel_align_start, kernel_align_start | 162 | __func__, kernel_align_start, kernel_align_start |
105 | + kernel_align_size, kernel_align_size); | 163 | + kernel_align_size, kernel_align_size); |
106 | 164 | memblock_reserve(kernel_align_start, kernel_align_size); | |
107 | #endif | 165 | #endif |
108 | /* | 166 | /* |
109 | * Kernel: | 167 | * Kernel: |
@@ -120,11 +178,13 @@ void __init setup_memory(void) | |||
120 | min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ | 178 | min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ |
121 | /* RAM is assumed contiguous */ | 179 | /* RAM is assumed contiguous */ |
122 | num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; | 180 | num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; |
123 | max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; | 181 | max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; |
182 | max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; | ||
124 | 183 | ||
125 | printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); | 184 | printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); |
126 | printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); | 185 | printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); |
127 | printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); | 186 | printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); |
187 | printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn); | ||
128 | 188 | ||
129 | /* | 189 | /* |
130 | * Find an area to use for the bootmem bitmap. | 190 | * Find an area to use for the bootmem bitmap. |
@@ -137,15 +197,39 @@ void __init setup_memory(void) | |||
137 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); | 197 | PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); |
138 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); | 198 | memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); |
139 | 199 | ||
200 | /* Add active regions with valid PFNs */ | ||
201 | for_each_memblock(memory, reg) { | ||
202 | unsigned long start_pfn, end_pfn; | ||
203 | |||
204 | start_pfn = memblock_region_memory_base_pfn(reg); | ||
205 | end_pfn = memblock_region_memory_end_pfn(reg); | ||
206 | memblock_set_node(start_pfn << PAGE_SHIFT, | ||
207 | (end_pfn - start_pfn) << PAGE_SHIFT, 0); | ||
208 | } | ||
209 | |||
140 | /* free bootmem is whole main memory */ | 210 | /* free bootmem is whole main memory */ |
141 | free_bootmem(memory_start, memory_size); | 211 | free_bootmem_with_active_regions(0, max_low_pfn); |
142 | 212 | ||
143 | /* reserve allocate blocks */ | 213 | /* reserve allocate blocks */ |
144 | for_each_memblock(reserved, reg) { | 214 | for_each_memblock(reserved, reg) { |
145 | pr_debug("reserved - 0x%08x-0x%08x\n", | 215 | unsigned long top = reg->base + reg->size - 1; |
146 | (u32) reg->base, (u32) reg->size); | 216 | |
147 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | 217 | pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", |
218 | (u32) reg->base, (u32) reg->size, top, | ||
219 | memory_start + lowmem_size - 1); | ||
220 | |||
221 | if (top <= (memory_start + lowmem_size - 1)) { | ||
222 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); | ||
223 | } else if (reg->base < (memory_start + lowmem_size - 1)) { | ||
224 | unsigned long trunc_size = memory_start + lowmem_size - | ||
225 | reg->base; | ||
226 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); | ||
227 | } | ||
148 | } | 228 | } |
229 | |||
230 | /* XXX need to clip this if using highmem? */ | ||
231 | sparse_memory_present_with_active_regions(0); | ||
232 | |||
149 | #ifdef CONFIG_MMU | 233 | #ifdef CONFIG_MMU |
150 | init_bootmem_done = 1; | 234 | init_bootmem_done = 1; |
151 | #endif | 235 | #endif |
@@ -190,13 +274,58 @@ void free_initmem(void) | |||
190 | 274 | ||
191 | void __init mem_init(void) | 275 | void __init mem_init(void) |
192 | { | 276 | { |
193 | high_memory = (void *)__va(memory_end); | 277 | pg_data_t *pgdat; |
278 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
279 | |||
280 | high_memory = (void *)__va(memory_start + lowmem_size - 1); | ||
281 | |||
194 | /* this will put all memory onto the freelists */ | 282 | /* this will put all memory onto the freelists */ |
195 | totalram_pages += free_all_bootmem(); | 283 | totalram_pages += free_all_bootmem(); |
196 | 284 | ||
197 | printk(KERN_INFO "Memory: %luk/%luk available\n", | 285 | for_each_online_pgdat(pgdat) { |
198 | nr_free_pages() << (PAGE_SHIFT-10), | 286 | unsigned long i; |
199 | num_physpages << (PAGE_SHIFT-10)); | 287 | struct page *page; |
288 | |||
289 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
290 | if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
291 | continue; | ||
292 | page = pgdat_page_nr(pgdat, i); | ||
293 | if (PageReserved(page)) | ||
294 | reservedpages++; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | #ifdef CONFIG_HIGHMEM | ||
299 | reservedpages -= highmem_setup(); | ||
300 | #endif | ||
301 | |||
302 | codesize = (unsigned long)&_sdata - (unsigned long)&_stext; | ||
303 | datasize = (unsigned long)&_edata - (unsigned long)&_sdata; | ||
304 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
305 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
306 | |||
307 | pr_info("Memory: %luk/%luk available (%luk kernel code, " | ||
308 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
309 | nr_free_pages() << (PAGE_SHIFT-10), | ||
310 | num_physpages << (PAGE_SHIFT-10), | ||
311 | codesize >> 10, | ||
312 | reservedpages << (PAGE_SHIFT-10), | ||
313 | datasize >> 10, | ||
314 | bsssize >> 10, | ||
315 | initsize >> 10); | ||
316 | |||
317 | #ifdef CONFIG_MMU | ||
318 | pr_info("Kernel virtual memory layout:\n"); | ||
319 | pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); | ||
320 | #ifdef CONFIG_HIGHMEM | ||
321 | pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", | ||
322 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); | ||
323 | #endif /* CONFIG_HIGHMEM */ | ||
324 | pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", | ||
325 | ioremap_bot, ioremap_base); | ||
326 | pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", | ||
327 | (unsigned long)VMALLOC_START, VMALLOC_END); | ||
328 | #endif | ||
200 | mem_init_done = 1; | 329 | mem_init_done = 1; |
201 | } | 330 | } |
202 | 331 | ||
@@ -226,7 +355,6 @@ static void mm_cmdline_setup(void) | |||
226 | maxmem = memparse(p, &p); | 355 | maxmem = memparse(p, &p); |
227 | if (maxmem && memory_size > maxmem) { | 356 | if (maxmem && memory_size > maxmem) { |
228 | memory_size = maxmem; | 357 | memory_size = maxmem; |
229 | memory_end = memory_start + memory_size; | ||
230 | memblock.memory.regions[0].size = memory_size; | 358 | memblock.memory.regions[0].size = memory_size; |
231 | } | 359 | } |
232 | } | 360 | } |
@@ -270,15 +398,26 @@ asmlinkage void __init mmu_init(void) | |||
270 | machine_restart(NULL); | 398 | machine_restart(NULL); |
271 | } | 399 | } |
272 | 400 | ||
273 | if ((u32) memblock.memory.regions[0].size < 0x1000000) { | 401 | if ((u32) memblock.memory.regions[0].size < 0x400000) { |
274 | printk(KERN_EMERG "Memory must be greater than 16MB\n"); | 402 | printk(KERN_EMERG "Memory must be greater than 4MB\n"); |
403 | machine_restart(NULL); | ||
404 | } | ||
405 | |||
406 | if ((u32) memblock.memory.regions[0].size < kernel_tlb) { | ||
407 | printk(KERN_EMERG "Kernel size is greater than memory node\n"); | ||
275 | machine_restart(NULL); | 408 | machine_restart(NULL); |
276 | } | 409 | } |
410 | |||
277 | /* Find main memory where the kernel is */ | 411 | /* Find main memory where the kernel is */ |
278 | memory_start = (u32) memblock.memory.regions[0].base; | 412 | memory_start = (u32) memblock.memory.regions[0].base; |
279 | memory_end = (u32) memblock.memory.regions[0].base + | 413 | lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; |
280 | (u32) memblock.memory.regions[0].size; | 414 | |
281 | memory_size = memory_end - memory_start; | 415 | if (lowmem_size > CONFIG_LOWMEM_SIZE) { |
416 | lowmem_size = CONFIG_LOWMEM_SIZE; | ||
417 | #ifndef CONFIG_HIGHMEM | ||
418 | memory_size = lowmem_size; | ||
419 | #endif | ||
420 | } | ||
282 | 421 | ||
283 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ | 422 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ |
284 | 423 | ||
@@ -305,15 +444,20 @@ asmlinkage void __init mmu_init(void) | |||
305 | /* Map in all of RAM starting at CONFIG_KERNEL_START */ | 444 | /* Map in all of RAM starting at CONFIG_KERNEL_START */ |
306 | mapin_ram(); | 445 | mapin_ram(); |
307 | 446 | ||
308 | #ifdef CONFIG_HIGHMEM_START_BOOL | 447 | /* Extend vmalloc and ioremap area as big as possible */ |
309 | ioremap_base = CONFIG_HIGHMEM_START; | 448 | #ifdef CONFIG_HIGHMEM |
449 | ioremap_base = ioremap_bot = PKMAP_BASE; | ||
310 | #else | 450 | #else |
311 | ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ | 451 | ioremap_base = ioremap_bot = FIXADDR_START; |
312 | #endif /* CONFIG_HIGHMEM_START_BOOL */ | 452 | #endif |
313 | ioremap_bot = ioremap_base; | ||
314 | 453 | ||
315 | /* Initialize the context management stuff */ | 454 | /* Initialize the context management stuff */ |
316 | mmu_context_init(); | 455 | mmu_context_init(); |
456 | |||
457 | /* Shortly after that, the entire linear mapping will be available */ | ||
458 | /* This will also cause that unflatten device tree will be allocated | ||
459 | * inside 768MB limit */ | ||
460 | memblock_set_current_limit(memory_start + lowmem_size - 1); | ||
317 | } | 461 | } |
318 | 462 | ||
319 | /* This is only called until mem_init is done. */ | 463 | /* This is only called until mem_init is done. */ |
@@ -324,11 +468,11 @@ void __init *early_get_page(void) | |||
324 | p = alloc_bootmem_pages(PAGE_SIZE); | 468 | p = alloc_bootmem_pages(PAGE_SIZE); |
325 | } else { | 469 | } else { |
326 | /* | 470 | /* |
327 | * Mem start + 32MB -> here is limit | 471 | * Mem start + kernel_tlb -> here is limit |
328 | * because of mem mapping from head.S | 472 | * because of mem mapping from head.S |
329 | */ | 473 | */ |
330 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | 474 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, |
331 | memory_start + 0x2000000)); | 475 | memory_start + kernel_tlb)); |
332 | } | 476 | } |
333 | return p; | 477 | return p; |
334 | } | 478 | } |
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 59bf2335a4ce..d1c06d07fed8 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
39 | #include <asm/sections.h> | 39 | #include <asm/sections.h> |
40 | #include <asm/fixmap.h> | ||
40 | 41 | ||
41 | #define flush_HPTE(X, va, pg) _tlbie(va) | 42 | #define flush_HPTE(X, va, pg) _tlbie(va) |
42 | 43 | ||
@@ -44,11 +45,6 @@ unsigned long ioremap_base; | |||
44 | unsigned long ioremap_bot; | 45 | unsigned long ioremap_bot; |
45 | EXPORT_SYMBOL(ioremap_bot); | 46 | EXPORT_SYMBOL(ioremap_bot); |
46 | 47 | ||
47 | /* The maximum lowmem defaults to 768Mb, but this can be configured to | ||
48 | * another value. | ||
49 | */ | ||
50 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | ||
51 | |||
52 | #ifndef CONFIG_SMP | 48 | #ifndef CONFIG_SMP |
53 | struct pgtable_cache_struct quicklists; | 49 | struct pgtable_cache_struct quicklists; |
54 | #endif | 50 | #endif |
@@ -80,7 +76,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, | |||
80 | !(p >= virt_to_phys((unsigned long)&__bss_stop) && | 76 | !(p >= virt_to_phys((unsigned long)&__bss_stop) && |
81 | p < virt_to_phys((unsigned long)__bss_stop))) { | 77 | p < virt_to_phys((unsigned long)__bss_stop))) { |
82 | printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT | 78 | printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT |
83 | " is RAM lr %p\n", (unsigned long)p, | 79 | " is RAM lr %pf\n", (unsigned long)p, |
84 | __builtin_return_address(0)); | 80 | __builtin_return_address(0)); |
85 | return NULL; | 81 | return NULL; |
86 | } | 82 | } |
@@ -171,7 +167,7 @@ void __init mapin_ram(void) | |||
171 | 167 | ||
172 | v = CONFIG_KERNEL_START; | 168 | v = CONFIG_KERNEL_START; |
173 | p = memory_start; | 169 | p = memory_start; |
174 | for (s = 0; s < memory_size; s += PAGE_SIZE) { | 170 | for (s = 0; s < lowmem_size; s += PAGE_SIZE) { |
175 | f = _PAGE_PRESENT | _PAGE_ACCESSED | | 171 | f = _PAGE_PRESENT | _PAGE_ACCESSED | |
176 | _PAGE_SHARED | _PAGE_HWEXEC; | 172 | _PAGE_SHARED | _PAGE_HWEXEC; |
177 | if ((char *) v < _stext || (char *) v >= _etext) | 173 | if ((char *) v < _stext || (char *) v >= _etext) |
@@ -254,3 +250,13 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
254 | } | 250 | } |
255 | return pte; | 251 | return pte; |
256 | } | 252 | } |
253 | |||
254 | void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | ||
255 | { | ||
256 | unsigned long address = __fix_to_virt(idx); | ||
257 | |||
258 | if (idx >= __end_of_fixed_addresses) | ||
259 | BUG(); | ||
260 | |||
261 | map_page(address, phys, pgprot_val(flags)); | ||
262 | } | ||