aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2016-08-03 18:30:00 -0400
committerChris Zankel <chris@zankel.net>2016-08-03 18:30:00 -0400
commit9e8511ff7e3f18df7d202da06c9704d940fa68f9 (patch)
treee6fe856effe9a41693d58b947b18e0345fe9dc9b
parent523d939ef98fd712632d93a5a2b588e477a7565e (diff)
parentd8d2f7f64592f3e8c51dac6d20aed044dca4009a (diff)
Merge tag 'xtensa-for-next-20160731' of git://github.com/jcmvbkbc/linux-xtensa into for_next
Xtensa improvements for 4.8: - add new kernel memory layouts for MMUv3 cores: with 256MB and 512MB KSEG size, starting at physical address other than 0; - make kernel load address configurable; - clean up kernel memory layout macros; - drop sysmem early allocator and switch to memblock; - enable kmemleak and memory reservation from the device tree; - wire up new syscalls: userfaultfd, membarrier, mlock2, copy_file_range, preadv2 and pwritev2.
-rw-r--r--Documentation/xtensa/mmu.txt173
-rw-r--r--arch/xtensa/Kconfig90
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S2
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S7
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile10
-rw-r--r--arch/xtensa/include/asm/bitops.h2
-rw-r--r--arch/xtensa/include/asm/cacheasm.h11
-rw-r--r--arch/xtensa/include/asm/fixmap.h5
-rw-r--r--arch/xtensa/include/asm/highmem.h5
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h44
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h74
-rw-r--r--arch/xtensa/include/asm/page.h27
-rw-r--r--arch/xtensa/include/asm/pgtable.h7
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/include/asm/sysmem.h21
-rw-r--r--arch/xtensa/include/asm/vectors.h67
-rw-r--r--arch/xtensa/include/uapi/asm/types.h3
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h15
-rw-r--r--arch/xtensa/kernel/entry.S5
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/kernel/setup.c49
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c279
23 files changed, 449 insertions, 455 deletions
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 0312fe66475c..222a2c6748e6 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -3,15 +3,8 @@ MMUv3 initialization sequence.
3The code in the initialize_mmu macro sets up MMUv3 memory mapping 3The code in the initialize_mmu macro sets up MMUv3 memory mapping
4identically to MMUv2 fixed memory mapping. Depending on 4identically to MMUv2 fixed memory mapping. Depending on
5CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is 5CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
6located in one of the following address ranges: 6located in addresses it was linked for (symbol undefined), or not
7 7(symbol defined), so it needs to be position-independent.
8 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
9 typically ROM)
10 0x00000000..0x07FFFFFF (system RAM; this code is actually linked
11 at 0xD0000000..0xD7FFFFFF [cached]
12 or 0xD8000000..0xDFFFFFFF [uncached];
13 in any case, initially runs elsewhere
14 than linked, so have to be careful)
15 8
16The code has the following assumptions: 9The code has the following assumptions:
17 This code fragment is run only on an MMU v3. 10 This code fragment is run only on an MMU v3.
@@ -28,24 +21,26 @@ TLB setup proceeds along the following steps.
28 PA = physical address (two upper nibbles of it); 21 PA = physical address (two upper nibbles of it);
29 pc = physical range that contains this code; 22 pc = physical range that contains this code;
30 23
31After step 2, we jump to virtual address in 0x40000000..0x5fffffff 24After step 2, we jump to virtual address in the range 0x40000000..0x5fffffff
32that corresponds to next instruction to execute in this code. 25or 0x00000000..0x1fffffff, depending on whether the kernel was loaded below
33After step 4, we jump to intended (linked) address of this code. 260x40000000 or above. That address corresponds to next instruction to execute
34 27in this code. After step 4, we jump to intended (linked) address of this code.
35 Step 0 Step1 Step 2 Step3 Step 4 Step5 28The scheme below assumes that the kernel is loaded below 0x40000000.
36 ============ ===== ============ ===== ============ ===== 29
37 VA PA PA VA PA PA VA PA PA 30 Step0 Step1 Step2 Step3 Step4 Step5
38 ------ -- -- ------ -- -- ------ -- -- 31 ===== ===== ===== ===== ===== =====
39 E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0 32 VA PA PA PA PA VA PA PA
40 C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0 33 ------ -- -- -- -- ------ -- --
41 A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00 34 E0..FF -> E0 -> E0 -> E0 F0..FF -> F0 -> F0
42 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00 35 C0..DF -> C0 -> C0 -> C0 E0..EF -> F0 -> F0
43 60..7F -> 60 -> 60 60..7F -> 60 36 A0..BF -> A0 -> A0 -> A0 D8..DF -> 00 -> 00
44 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc 37 80..9F -> 80 -> 80 -> 80 D0..D7 -> 00 -> 00
45 20..3F -> 20 -> 20 20..3F -> 20 38 60..7F -> 60 -> 60 -> 60
46 00..1F -> 00 -> 00 00..1F -> 00 39 40..5F -> 40 -> pc -> pc 40..5F -> pc
47 40 20..3F -> 20 -> 20 -> 20
48The default location of IO peripherals is above 0xf0000000. This may change 41 00..1F -> 00 -> 00 -> 00
42
43The default location of IO peripherals is above 0xf0000000. This may be changed
49using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5 44using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5
50for details on the syntax and semantic of simple-bus nodes. The following 45for details on the syntax and semantic of simple-bus nodes. The following
51limitations apply: 46limitations apply:
@@ -62,3 +57,127 @@ limitations apply:
62 57
636. The IO area covers the entire 256MB segment of parent-bus-address; the 586. The IO area covers the entire 256MB segment of parent-bus-address; the
64 "ranges" triplet length field is ignored 59 "ranges" triplet length field is ignored
60
61
62MMUv3 address space layouts.
63============================
64
65Default MMUv2-compatible layout.
66
67 Symbol VADDR Size
68+------------------+
69| Userspace | 0x00000000 TASK_SIZE
70+------------------+ 0x40000000
71+------------------+
72| Page table | 0x80000000
73+------------------+ 0x80400000
74+------------------+
75| KMAP area | PKMAP_BASE PTRS_PER_PTE *
76| | DCACHE_N_COLORS *
77| | PAGE_SIZE
78| | (4MB * DCACHE_N_COLORS)
79+------------------+
80| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
81| | NR_CPUS *
82| | DCACHE_N_COLORS *
83| | PAGE_SIZE
84+------------------+ FIXADDR_TOP 0xbffff000
85+------------------+
86| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
87+------------------+ VMALLOC_END
88| Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE
89| remap area 1 |
90+------------------+
91| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
92| remap area 2 |
93+------------------+
94+------------------+
95| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
96+------------------+
97| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
98+------------------+
99| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
100+------------------+
101| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
102+------------------+
103
104
105256MB cached + 256MB uncached layout.
106
107 Symbol VADDR Size
108+------------------+
109| Userspace | 0x00000000 TASK_SIZE
110+------------------+ 0x40000000
111+------------------+
112| Page table | 0x80000000
113+------------------+ 0x80400000
114+------------------+
115| KMAP area | PKMAP_BASE PTRS_PER_PTE *
116| | DCACHE_N_COLORS *
117| | PAGE_SIZE
118| | (4MB * DCACHE_N_COLORS)
119+------------------+
120| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
121| | NR_CPUS *
122| | DCACHE_N_COLORS *
123| | PAGE_SIZE
124+------------------+ FIXADDR_TOP 0x9ffff000
125+------------------+
126| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
127+------------------+ VMALLOC_END
128| Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE
129| remap area 1 |
130+------------------+
131| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
132| remap area 2 |
133+------------------+
134+------------------+
135| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
136+------------------+
137| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
138+------------------+
139+------------------+
140| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
141+------------------+
142| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
143+------------------+
144
145
146512MB cached + 512MB uncached layout.
147
148 Symbol VADDR Size
149+------------------+
150| Userspace | 0x00000000 TASK_SIZE
151+------------------+ 0x40000000
152+------------------+
153| Page table | 0x80000000
154+------------------+ 0x80400000
155+------------------+
156| KMAP area | PKMAP_BASE PTRS_PER_PTE *
157| | DCACHE_N_COLORS *
158| | PAGE_SIZE
159| | (4MB * DCACHE_N_COLORS)
160+------------------+
161| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
162| | NR_CPUS *
163| | DCACHE_N_COLORS *
164| | PAGE_SIZE
165+------------------+ FIXADDR_TOP 0x8ffff000
166+------------------+
167| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
168+------------------+ VMALLOC_END
169| Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE
170| remap area 1 |
171+------------------+
172| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
173| remap area 2 |
174+------------------+
175+------------------+
176| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
177+------------------+
178| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
179+------------------+
180| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
181+------------------+
182| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
183+------------------+
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 64336f666fb6..3f6659c53023 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -13,16 +13,19 @@ config XTENSA
13 select GENERIC_IRQ_SHOW 13 select GENERIC_IRQ_SHOW
14 select GENERIC_PCI_IOMAP 14 select GENERIC_PCI_IOMAP
15 select GENERIC_SCHED_CLOCK 15 select GENERIC_SCHED_CLOCK
16 select HAVE_DEBUG_KMEMLEAK
16 select HAVE_DMA_API_DEBUG 17 select HAVE_DMA_API_DEBUG
17 select HAVE_EXIT_THREAD 18 select HAVE_EXIT_THREAD
18 select HAVE_FUNCTION_TRACER 19 select HAVE_FUNCTION_TRACER
19 select HAVE_FUTEX_CMPXCHG if !MMU 20 select HAVE_FUTEX_CMPXCHG if !MMU
20 select HAVE_HW_BREAKPOINT if PERF_EVENTS 21 select HAVE_HW_BREAKPOINT if PERF_EVENTS
21 select HAVE_IRQ_TIME_ACCOUNTING 22 select HAVE_IRQ_TIME_ACCOUNTING
23 select HAVE_MEMBLOCK
22 select HAVE_OPROFILE 24 select HAVE_OPROFILE
23 select HAVE_PERF_EVENTS 25 select HAVE_PERF_EVENTS
24 select IRQ_DOMAIN 26 select IRQ_DOMAIN
25 select MODULES_USE_ELF_RELA 27 select MODULES_USE_ELF_RELA
28 select NO_BOOTMEM
26 select PERF_USE_VMALLOC 29 select PERF_USE_VMALLOC
27 select VIRT_TO_BUS 30 select VIRT_TO_BUS
28 help 31 help
@@ -236,6 +239,69 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
236 239
237 If in doubt, say Y. 240 If in doubt, say Y.
238 241
242config KSEG_PADDR
243 hex "Physical address of the KSEG mapping"
244 depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
245 default 0x00000000
246 help
247 This is the physical address where KSEG is mapped. Please refer to
248 the chosen KSEG layout help for the required address alignment.
249 Unpacked kernel image (including vectors) must be located completely
250 within KSEG.
251 Physical memory below this address is not available to linux.
252
253 If unsure, leave the default value here.
254
255config KERNEL_LOAD_ADDRESS
256 hex "Kernel load address"
257 default 0x00003000
258 help
259 This is the address where the kernel is loaded.
260 It is virtual address for MMUv2 configurations and physical address
261 for all other configurations.
262
263 If unsure, leave the default value here.
264
265config VECTORS_OFFSET
266 hex "Kernel vectors offset"
267 default 0x00003000
268 help
269 This is the offset of the kernel image from the relocatable vectors
270 base.
271
272 If unsure, leave the default value here.
273
274choice
275 prompt "KSEG layout"
276 depends on MMU
277 default XTENSA_KSEG_MMU_V2
278
279config XTENSA_KSEG_MMU_V2
280 bool "MMUv2: 128MB cached + 128MB uncached"
281 help
282 MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
283 at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
284 without cache.
285 KSEG_PADDR must be aligned to 128MB.
286
287config XTENSA_KSEG_256M
288 bool "256MB cached + 256MB uncached"
289 depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
290 help
291 TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
292 with cache and to 0xc0000000 without cache.
293 KSEG_PADDR must be aligned to 256MB.
294
295config XTENSA_KSEG_512M
296 bool "512MB cached + 512MB uncached"
297 depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
298 help
299 TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
300 with cache and to 0xc0000000 without cache.
301 KSEG_PADDR must be aligned to 256MB.
302
303endchoice
304
239config HIGHMEM 305config HIGHMEM
240 bool "High Memory Support" 306 bool "High Memory Support"
241 depends on MMU 307 depends on MMU
@@ -331,7 +397,7 @@ config XTENSA_PLATFORM_XT2000
331config XTENSA_PLATFORM_XTFPGA 397config XTENSA_PLATFORM_XTFPGA
332 bool "XTFPGA" 398 bool "XTFPGA"
333 select ETHOC if ETHERNET 399 select ETHOC if ETHERNET
334 select PLATFORM_WANT_DEFAULT_MEM 400 select PLATFORM_WANT_DEFAULT_MEM if !MMU
335 select SERIAL_CONSOLE 401 select SERIAL_CONSOLE
336 select XTENSA_CALIBRATE_CCOUNT 402 select XTENSA_CALIBRATE_CCOUNT
337 help 403 help
@@ -369,6 +435,7 @@ config USE_OF
369 bool "Flattened Device Tree support" 435 bool "Flattened Device Tree support"
370 select OF 436 select OF
371 select OF_EARLY_FLATTREE 437 select OF_EARLY_FLATTREE
438 select OF_RESERVED_MEM
372 help 439 help
373 Include support for flattened device tree machine descriptions. 440 Include support for flattened device tree machine descriptions.
374 441
@@ -439,16 +506,9 @@ config DEFAULT_MEM_START
439 default 0x00000000 if MMU 506 default 0x00000000 if MMU
440 default 0x60000000 if !MMU 507 default 0x60000000 if !MMU
441 help 508 help
442 This is a fallback start address of the default memory area, it is 509 This is the base address of the default memory area.
443 used when no physical memory size is passed through DTB or through 510 Default memory area has platform-specific meaning, it may be used
444 boot parameter from bootloader. 511 for e.g. early cache initialization.
445
446 In noMMU configuration the following parameters are derived from it:
447 - kernel load address;
448 - kernel entry point address;
449 - relocatable vectors base address;
450 - uBoot load address;
451 - TASK_SIZE.
452 512
453 If unsure, leave the default value here. 513 If unsure, leave the default value here.
454 514
@@ -457,11 +517,9 @@ config DEFAULT_MEM_SIZE
457 depends on PLATFORM_WANT_DEFAULT_MEM 517 depends on PLATFORM_WANT_DEFAULT_MEM
458 default 0x04000000 518 default 0x04000000
459 help 519 help
460 This is a fallback size of the default memory area, it is used when 520 This is the size of the default memory area.
461 no physical memory size is passed through DTB or through boot 521 Default memory area has platform-specific meaning, it may be used
462 parameter from bootloader. 522 for e.g. early cache initialization.
463
464 It's also used for TASK_SIZE calculation in noMMU configuration.
465 523
466 If unsure, leave the default value here. 524 If unsure, leave the default value here.
467 525
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index e54f2c9df63a..a30993054e9c 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -23,7 +23,7 @@ SECTIONS
23 *(.ResetVector.text) 23 *(.ResetVector.text)
24 } 24 }
25 25
26 .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS) 26 .image KERNELOFFSET: AT (CONFIG_KERNEL_LOAD_ADDRESS)
27 { 27 {
28 _image_start = .; 28 _image_start = .;
29 *(image) 29 *(image)
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index e6bf313613cf..b6aa85328ac0 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -35,7 +35,12 @@ _ResetVector:
35 35
36 .align 4 36 .align 4
37RomInitAddr: 37RomInitAddr:
38 .word LOAD_MEMORY_ADDRESS 38#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
39 XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
40 .word CONFIG_KERNEL_LOAD_ADDRESS
41#else
42 .word KERNELOFFSET
43#endif
39RomBootParam: 44RomBootParam:
40 .word _bootparam 45 .word _bootparam
41_bootparam: 46_bootparam:
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index 403fcf23405c..0f4c417b4196 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,15 +4,7 @@
4# for more details. 4# for more details.
5# 5#
6 6
7ifdef CONFIG_MMU 7UIMAGE_LOADADDR = $(CONFIG_KERNEL_LOAD_ADDRESS)
8ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
9UIMAGE_LOADADDR = 0x00003000
10else
11UIMAGE_LOADADDR = 0xd0003000
12endif
13else
14UIMAGE_LOADADDR = $(shell printf "0x%x" $$(( ${CONFIG_DEFAULT_MEM_START} + 0x3000 )) )
15endif
16UIMAGE_COMPRESSION = gzip 8UIMAGE_COMPRESSION = gzip
17 9
18$(obj)/../uImage: vmlinux.bin.gz FORCE 10$(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 3f44fa2a53e9..d3490189792b 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -48,7 +48,7 @@ static inline int ffz(unsigned long x)
48 * __ffs: Find first bit set in word. Return 0 for bit 0 48 * __ffs: Find first bit set in word. Return 0 for bit 0
49 */ 49 */
50 50
51static inline int __ffs(unsigned long x) 51static inline unsigned long __ffs(unsigned long x)
52{ 52{
53 return 31 - __cntlz(x & -x); 53 return 31 - __cntlz(x & -x);
54} 54}
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index e0f9e1109c83..2041abb10a23 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -69,26 +69,23 @@
69 .endm 69 .endm
70 70
71 71
72#if XCHAL_DCACHE_LINE_LOCKABLE
73
74 .macro ___unlock_dcache_all ar at 72 .macro ___unlock_dcache_all ar at
75 73
76#if XCHAL_DCACHE_SIZE 74#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
77 __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH 75 __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
78#endif 76#endif
79 77
80 .endm 78 .endm
81 79
82#endif
83
84#if XCHAL_ICACHE_LINE_LOCKABLE
85 80
86 .macro ___unlock_icache_all ar at 81 .macro ___unlock_icache_all ar at
87 82
83#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
88 __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH 84 __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
85#endif
89 86
90 .endm 87 .endm
91#endif 88
92 89
93 .macro ___flush_invalidate_dcache_all ar at 90 .macro ___flush_invalidate_dcache_all ar at
94 91
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 62b507deea9d..0d30403b6c95 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -59,6 +59,11 @@ enum fixed_addresses {
59 */ 59 */
60static __always_inline unsigned long fix_to_virt(const unsigned int idx) 60static __always_inline unsigned long fix_to_virt(const unsigned int idx)
61{ 61{
62 /* Check if this memory layout is broken because fixmap overlaps page
63 * table.
64 */
65 BUILD_BUG_ON(FIXADDR_START <
66 XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
62 BUILD_BUG_ON(idx >= __end_of_fixed_addresses); 67 BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
63 return __fix_to_virt(idx); 68 return __fix_to_virt(idx);
64} 69}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 01cef6b40829..6e070db1022e 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -68,6 +68,11 @@ void kunmap_high(struct page *page);
68 68
69static inline void *kmap(struct page *page) 69static inline void *kmap(struct page *page)
70{ 70{
71 /* Check if this memory layout is broken because PKMAP overlaps
72 * page table.
73 */
74 BUILD_BUG_ON(PKMAP_BASE <
75 XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
71 BUG_ON(in_interrupt()); 76 BUG_ON(in_interrupt());
72 if (!PageHighMem(page)) 77 if (!PageHighMem(page))
73 return page_address(page); 78 return page_address(page);
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 7a1e075969a3..42410f253597 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -77,13 +77,16 @@
77 77
78 .align 4 78 .align 4
791: movi a2, 0x10000000 791: movi a2, 0x10000000
80 movi a3, 0x18000000 80
81 add a2, a2, a0 81#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
829: bgeu a2, a3, 9b /* PC is out of the expected range */ 82#define TEMP_MAPPING_VADDR 0x40000000
83#else
84#define TEMP_MAPPING_VADDR 0x00000000
85#endif
83 86
84 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ 87 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
85 88
86 movi a2, 0x40000000 | XCHAL_SPANNING_WAY 89 movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
87 idtlb a2 90 idtlb a2
88 iitlb a2 91 iitlb a2
89 isync 92 isync
@@ -95,14 +98,14 @@
95 srli a3, a0, 27 98 srli a3, a0, 27
96 slli a3, a3, 27 99 slli a3, a3, 27
97 addi a3, a3, CA_BYPASS 100 addi a3, a3, CA_BYPASS
98 addi a7, a2, -1 101 addi a7, a2, 5 - XCHAL_SPANNING_WAY
99 wdtlb a3, a7 102 wdtlb a3, a7
100 witlb a3, a7 103 witlb a3, a7
101 isync 104 isync
102 105
103 slli a4, a0, 5 106 slli a4, a0, 5
104 srli a4, a4, 5 107 srli a4, a4, 5
105 addi a5, a2, -6 108 addi a5, a2, -XCHAL_SPANNING_WAY
106 add a4, a4, a5 109 add a4, a4, a5
107 jx a4 110 jx a4
108 111
@@ -116,35 +119,48 @@
116 add a5, a5, a4 119 add a5, a5, a4
117 bne a5, a2, 3b 120 bne a5, a2, 3b
118 121
119 /* Step 4: Setup MMU with the old V2 mappings. */ 122 /* Step 4: Setup MMU with the requested static mappings. */
123
120 movi a6, 0x01000000 124 movi a6, 0x01000000
121 wsr a6, ITLBCFG 125 wsr a6, ITLBCFG
122 wsr a6, DTLBCFG 126 wsr a6, DTLBCFG
123 isync 127 isync
124 128
125 movi a5, 0xd0000005 129 movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
126 movi a4, CA_WRITEBACK 130 movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
127 wdtlb a4, a5 131 wdtlb a4, a5
128 witlb a4, a5 132 witlb a4, a5
129 133
130 movi a5, 0xd8000005 134 movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
131 movi a4, CA_BYPASS 135 movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
132 wdtlb a4, a5 136 wdtlb a4, a5
133 witlb a4, a5 137 witlb a4, a5
134 138
135 movi a5, XCHAL_KIO_CACHED_VADDR + 6 139#ifdef CONFIG_XTENSA_KSEG_512M
140 movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
141 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
142 wdtlb a4, a5
143 witlb a4, a5
144
145 movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
146 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
147 wdtlb a4, a5
148 witlb a4, a5
149#endif
150
151 movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
136 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK 152 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
137 wdtlb a4, a5 153 wdtlb a4, a5
138 witlb a4, a5 154 witlb a4, a5
139 155
140 movi a5, XCHAL_KIO_BYPASS_VADDR + 6 156 movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
141 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS 157 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
142 wdtlb a4, a5 158 wdtlb a4, a5
143 witlb a4, a5 159 witlb a4, a5
144 160
145 isync 161 isync
146 162
147 /* Jump to self, using MMU v2 mappings. */ 163 /* Jump to self, using final mappings. */
148 movi a4, 1f 164 movi a4, 1f
149 jx a4 165 jx a4
150 166
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000000..561f8729bcde
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,74 @@
1/*
2 * Kernel virtual memory layout definitions.
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2016 Cadence Design Systems Inc.
9 */
10
11#ifndef _XTENSA_KMEM_LAYOUT_H
12#define _XTENSA_KMEM_LAYOUT_H
13
14#include <asm/types.h>
15
16#ifdef CONFIG_MMU
17
18/*
19 * Fixed TLB translations in the processor.
20 */
21
22#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
23#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
24
25#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
26
27#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
28#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
29#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
30#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
31#define XCHAL_KSEG_TLB_WAY 5
32#define XCHAL_KIO_TLB_WAY 6
33
34#elif defined(CONFIG_XTENSA_KSEG_256M)
35
36#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
37#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
38#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
39#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
40#define XCHAL_KSEG_TLB_WAY 6
41#define XCHAL_KIO_TLB_WAY 6
42
43#elif defined(CONFIG_XTENSA_KSEG_512M)
44
45#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
46#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
47#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
48#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
49#define XCHAL_KSEG_TLB_WAY 6
50#define XCHAL_KIO_TLB_WAY 6
51
52#else
53#error Unsupported KSEG configuration
54#endif
55
56#ifdef CONFIG_KSEG_PADDR
57#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
58#else
59#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
60#endif
61
62#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
63#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
64#endif
65
66#else
67
68#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
69#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
70#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
71
72#endif
73
74#endif
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index ad38500471fa..976b1d70edbc 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -15,15 +15,7 @@
15#include <asm/types.h> 15#include <asm/types.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <platform/hardware.h> 17#include <platform/hardware.h>
18 18#include <asm/kmem_layout.h>
19/*
20 * Fixed TLB translations in the processor.
21 */
22
23#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
24#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
25#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
26#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
27 19
28/* 20/*
29 * PAGE_SHIFT determines the page size 21 * PAGE_SHIFT determines the page size
@@ -35,10 +27,13 @@
35 27
36#ifdef CONFIG_MMU 28#ifdef CONFIG_MMU
37#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 29#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
38#define MAX_MEM_PFN XCHAL_KSEG_SIZE 30#define PHYS_OFFSET XCHAL_KSEG_PADDR
31#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
32 PHYS_PFN(XCHAL_KSEG_SIZE))
39#else 33#else
40#define PAGE_OFFSET __XTENSA_UL_CONST(0) 34#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START
41#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) 35#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START
36#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
42#endif 37#endif
43 38
44#define PGTABLE_START 0x80000000 39#define PGTABLE_START 0x80000000
@@ -167,10 +162,12 @@ void copy_user_highpage(struct page *to, struct page *from,
167 * addresses. 162 * addresses.
168 */ 163 */
169 164
170#define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT) 165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
171 166
172#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 167#define __pa(x) \
173#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 168 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
169#define __va(x) \
170 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
174#define pfn_valid(pfn) \ 171#define pfn_valid(pfn) \
175 ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 172 ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
176 173
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index fb02fdc5ecee..8aa0e0d9cbb2 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -13,6 +13,7 @@
13 13
14#include <asm-generic/pgtable-nopmd.h> 14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/kmem_layout.h>
16 17
17/* 18/*
18 * We only use two ring levels, user and kernel space. 19 * We only use two ring levels, user and kernel space.
@@ -68,9 +69,9 @@
68 * Virtual memory area. We keep a distance to other memory regions to be 69 * Virtual memory area. We keep a distance to other memory regions to be
69 * on the safe side. We also use this area for cache aliasing. 70 * on the safe side. We also use this area for cache aliasing.
70 */ 71 */
71#define VMALLOC_START 0xC0000000 72#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
72#define VMALLOC_END 0xC7FEFFFF 73#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
73#define TLBTEMP_BASE_1 0xC7FF0000 74#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
74#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) 75#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
75#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE 76#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
76#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) 77#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index d2e40d39c615..b42d68bfe3cf 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -37,7 +37,7 @@
37#ifdef CONFIG_MMU 37#ifdef CONFIG_MMU
38#define TASK_SIZE __XTENSA_UL_CONST(0x40000000) 38#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
39#else 39#else
40#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) 40#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
41#endif 41#endif
42 42
43#define STACK_TOP TASK_SIZE 43#define STACK_TOP TASK_SIZE
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
index c015c5c8e3f7..552cdfd8590e 100644
--- a/arch/xtensa/include/asm/sysmem.h
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -11,27 +11,8 @@
11#ifndef _XTENSA_SYSMEM_H 11#ifndef _XTENSA_SYSMEM_H
12#define _XTENSA_SYSMEM_H 12#define _XTENSA_SYSMEM_H
13 13
14#define SYSMEM_BANKS_MAX 31 14#include <linux/memblock.h>
15 15
16struct meminfo {
17 unsigned long start;
18 unsigned long end;
19};
20
21/*
22 * Bank array is sorted by .start.
23 * Banks don't overlap and there's at least one page gap
24 * between adjacent bank entries.
25 */
26struct sysmem_info {
27 int nr_banks;
28 struct meminfo bank[SYSMEM_BANKS_MAX];
29};
30
31extern struct sysmem_info sysmem;
32
33int add_sysmem_bank(unsigned long start, unsigned long end);
34int mem_reserve(unsigned long, unsigned long, int);
35void bootmem_init(void); 16void bootmem_init(void);
36void zones_init(void); 17void zones_init(void);
37 18
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 288c776736d3..77d41cc7a688 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -20,6 +20,7 @@
20 20
21#include <variant/core.h> 21#include <variant/core.h>
22#include <platform/hardware.h> 22#include <platform/hardware.h>
23#include <asm/kmem_layout.h>
23 24
24#if XCHAL_HAVE_PTP_MMU 25#if XCHAL_HAVE_PTP_MMU
25#define XCHAL_KIO_CACHED_VADDR 0xe0000000 26#define XCHAL_KIO_CACHED_VADDR 0xe0000000
@@ -47,61 +48,42 @@ static inline unsigned long xtensa_get_kio_paddr(void)
47 48
48#if defined(CONFIG_MMU) 49#if defined(CONFIG_MMU)
49 50
50/* Will Become VECBASE */ 51#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
51#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
52
53/* Image Virtual Start Address */ 52/* Image Virtual Start Address */
54#define KERNELOFFSET 0xD0003000 53#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
55 54 CONFIG_KERNEL_LOAD_ADDRESS - \
56#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY 55 XCHAL_KSEG_PADDR)
57 /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
58 #define LOAD_MEMORY_ADDRESS 0x00003000
59#else 56#else
60 /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ 57#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
61 #define LOAD_MEMORY_ADDRESS 0xD0003000
62#endif 58#endif
63 59
64#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \
65 XCHAL_RESET_VECTOR1_PADDR)
66
67#else /* !defined(CONFIG_MMU) */ 60#else /* !defined(CONFIG_MMU) */
68 /* MMU Not being used - Virtual == Physical */ 61 /* MMU Not being used - Virtual == Physical */
69 62
70 /* VECBASE */ 63/* Location of the start of the kernel text, _start */
71 #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000) 64#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
72 65
73 /* Location of the start of the kernel text, _start */
74 #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000)
75
76 /* Loaded just above possibly live vectors */
77 #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
78
79#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
80 66
81#endif /* CONFIG_MMU */ 67#endif /* CONFIG_MMU */
82 68
83#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) 69#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
84 70#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
85/* Used to set VECBASE register */
86#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
87 71
88#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE 72#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
89 73
90#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) 74#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
91#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
92#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
93#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
94#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
95#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
96#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
97#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
98#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
99
100#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
101 75
102#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) 76#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
103 77#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
104#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) 78#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
79#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
80#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
81#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
82#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
83#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
84#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
85#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
86#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
105 87
106/* 88/*
107 * These XCHAL_* #defines from varian/core.h 89 * These XCHAL_* #defines from varian/core.h
@@ -109,7 +91,6 @@ static inline unsigned long xtensa_get_kio_paddr(void)
109 * constants are defined above and should be used. 91 * constants are defined above and should be used.
110 */ 92 */
111#undef XCHAL_VECBASE_RESET_VADDR 93#undef XCHAL_VECBASE_RESET_VADDR
112#undef XCHAL_RESET_VECTOR0_VADDR
113#undef XCHAL_USER_VECTOR_VADDR 94#undef XCHAL_USER_VECTOR_VADDR
114#undef XCHAL_KERNEL_VECTOR_VADDR 95#undef XCHAL_KERNEL_VECTOR_VADDR
115#undef XCHAL_DOUBLEEXC_VECTOR_VADDR 96#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
@@ -119,9 +100,8 @@ static inline unsigned long xtensa_get_kio_paddr(void)
119#undef XCHAL_INTLEVEL4_VECTOR_VADDR 100#undef XCHAL_INTLEVEL4_VECTOR_VADDR
120#undef XCHAL_INTLEVEL5_VECTOR_VADDR 101#undef XCHAL_INTLEVEL5_VECTOR_VADDR
121#undef XCHAL_INTLEVEL6_VECTOR_VADDR 102#undef XCHAL_INTLEVEL6_VECTOR_VADDR
122#undef XCHAL_DEBUG_VECTOR_VADDR
123#undef XCHAL_NMI_VECTOR_VADDR
124#undef XCHAL_INTLEVEL7_VECTOR_VADDR 103#undef XCHAL_INTLEVEL7_VECTOR_VADDR
104#undef XCHAL_DEBUG_VECTOR_VADDR
125 105
126#else 106#else
127 107
@@ -134,6 +114,7 @@ static inline unsigned long xtensa_get_kio_paddr(void)
134#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR 114#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
135#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR 115#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
136#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR 116#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
117#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
137#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR 118#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
138 119
139#endif 120#endif
diff --git a/arch/xtensa/include/uapi/asm/types.h b/arch/xtensa/include/uapi/asm/types.h
index 87ec7ae73cb1..2efc921506c4 100644
--- a/arch/xtensa/include/uapi/asm/types.h
+++ b/arch/xtensa/include/uapi/asm/types.h
@@ -18,7 +18,8 @@
18# define __XTENSA_UL_CONST(x) x 18# define __XTENSA_UL_CONST(x) x
19#else 19#else
20# define __XTENSA_UL(x) ((unsigned long)(x)) 20# define __XTENSA_UL(x) ((unsigned long)(x))
21# define __XTENSA_UL_CONST(x) x##UL 21# define ___XTENSA_UL_CONST(x) x##UL
22# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
22#endif 23#endif
23 24
24#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index b95c30594355..de9b14b2d348 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -754,7 +754,20 @@ __SYSCALL(340, sys_bpf, 3)
754#define __NR_execveat 341 754#define __NR_execveat 341
755__SYSCALL(341, sys_execveat, 5) 755__SYSCALL(341, sys_execveat, 5)
756 756
757#define __NR_syscall_count 342 757#define __NR_userfaultfd 342
758__SYSCALL(342, sys_userfaultfd, 1)
759#define __NR_membarrier 343
760__SYSCALL(343, sys_membarrier, 2)
761#define __NR_mlock2 344
762__SYSCALL(344, sys_mlock2, 3)
763#define __NR_copy_file_range 345
764__SYSCALL(345, sys_copy_file_range, 6)
765#define __NR_preadv2 346
766__SYSCALL(346, sys_preadv2, 6)
767#define __NR_pwritev2 347
768__SYSCALL(347, sys_pwritev2, 6)
769
770#define __NR_syscall_count 348
758 771
759/* 772/*
760 * sysxtensa syscall handler 773 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fe8f7e7efb9d..fa04d9d368a7 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1632,10 +1632,11 @@ ENTRY(fast_second_level_miss)
1632 * The messy computation for 'pteval' above really simplifies 1632 * The messy computation for 'pteval' above really simplifies
1633 * into the following: 1633 * into the following:
1634 * 1634 *
1635 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY 1635 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1636 * | PAGE_DIRECTORY
1636 */ 1637 */
1637 1638
1638 movi a1, (-PAGE_OFFSET) & 0xffffffff 1639 movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1639 add a0, a0, a1 # pmdval - PAGE_OFFSET 1640 add a0, a0, a1 # pmdval - PAGE_OFFSET
1640 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1641 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1641 xor a0, a0, a1 1642 xor a0, a0, a1
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index bc4f4bf05099..23ce62e60435 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -113,7 +113,7 @@ ENTRY(_startup)
113 movi a0, 0 113 movi a0, 0
114 114
115#if XCHAL_HAVE_VECBASE 115#if XCHAL_HAVE_VECBASE
116 movi a2, VECBASE_RESET_VADDR 116 movi a2, VECBASE_VADDR
117 wsr a2, vecbase 117 wsr a2, vecbase
118#endif 118#endif
119 119
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 9735691f37f1..393206b6aabc 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
9 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
10 * 11 *
11 * Chris Zankel <chris@zankel.net> 12 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -24,6 +25,7 @@
24#include <linux/percpu.h> 25#include <linux/percpu.h>
25#include <linux/clk-provider.h> 26#include <linux/clk-provider.h>
26#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/of.h>
27#include <linux/of_fdt.h> 29#include <linux/of_fdt.h>
28#include <linux/of_platform.h> 30#include <linux/of_platform.h>
29 31
@@ -114,7 +116,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
114 if (mi->type != MEMORY_TYPE_CONVENTIONAL) 116 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
115 return -1; 117 return -1;
116 118
117 return add_sysmem_bank(mi->start, mi->end); 119 return memblock_add(mi->start, mi->end - mi->start);
118} 120}
119 121
120__tagtable(BP_TAG_MEMORY, parse_tag_mem); 122__tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -188,7 +190,6 @@ static int __init parse_bootparam(const bp_tag_t* tag)
188} 190}
189 191
190#ifdef CONFIG_OF 192#ifdef CONFIG_OF
191bool __initdata dt_memory_scan = false;
192 193
193#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY 194#if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
194unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; 195unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -228,11 +229,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
228 229
229void __init early_init_dt_add_memory_arch(u64 base, u64 size) 230void __init early_init_dt_add_memory_arch(u64 base, u64 size)
230{ 231{
231 if (!dt_memory_scan)
232 return;
233
234 size &= PAGE_MASK; 232 size &= PAGE_MASK;
235 add_sysmem_bank(base, base + size); 233 memblock_add(base, size);
236} 234}
237 235
238void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 236void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -242,9 +240,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
242 240
243void __init early_init_devtree(void *params) 241void __init early_init_devtree(void *params)
244{ 242{
245 if (sysmem.nr_banks == 0)
246 dt_memory_scan = true;
247
248 early_init_dt_scan(params); 243 early_init_dt_scan(params);
249 of_scan_flat_dt(xtensa_dt_io_area, NULL); 244 of_scan_flat_dt(xtensa_dt_io_area, NULL);
250 245
@@ -278,12 +273,6 @@ void __init init_arch(bp_tag_t *bp_start)
278 early_init_devtree(dtb_start); 273 early_init_devtree(dtb_start);
279#endif 274#endif
280 275
281 if (sysmem.nr_banks == 0) {
282 add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
283 PLATFORM_DEFAULT_MEM_START +
284 PLATFORM_DEFAULT_MEM_SIZE);
285 }
286
287#ifdef CONFIG_CMDLINE_BOOL 276#ifdef CONFIG_CMDLINE_BOOL
288 if (!command_line[0]) 277 if (!command_line[0])
289 strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE); 278 strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
@@ -453,6 +442,10 @@ static int __init check_s32c1i(void)
453early_initcall(check_s32c1i); 442early_initcall(check_s32c1i);
454#endif /* CONFIG_S32C1I_SELFTEST */ 443#endif /* CONFIG_S32C1I_SELFTEST */
455 444
445static inline int mem_reserve(unsigned long start, unsigned long end)
446{
447 return memblock_reserve(start, end - start);
448}
456 449
457void __init setup_arch(char **cmdline_p) 450void __init setup_arch(char **cmdline_p)
458{ 451{
@@ -464,54 +457,54 @@ void __init setup_arch(char **cmdline_p)
464#ifdef CONFIG_BLK_DEV_INITRD 457#ifdef CONFIG_BLK_DEV_INITRD
465 if (initrd_start < initrd_end) { 458 if (initrd_start < initrd_end) {
466 initrd_is_mapped = mem_reserve(__pa(initrd_start), 459 initrd_is_mapped = mem_reserve(__pa(initrd_start),
467 __pa(initrd_end), 0) == 0; 460 __pa(initrd_end)) == 0;
468 initrd_below_start_ok = 1; 461 initrd_below_start_ok = 1;
469 } else { 462 } else {
470 initrd_start = 0; 463 initrd_start = 0;
471 } 464 }
472#endif 465#endif
473 466
474 mem_reserve(__pa(&_stext),__pa(&_end), 1); 467 mem_reserve(__pa(&_stext), __pa(&_end));
475 468
476 mem_reserve(__pa(&_WindowVectors_text_start), 469 mem_reserve(__pa(&_WindowVectors_text_start),
477 __pa(&_WindowVectors_text_end), 0); 470 __pa(&_WindowVectors_text_end));
478 471
479 mem_reserve(__pa(&_DebugInterruptVector_literal_start), 472 mem_reserve(__pa(&_DebugInterruptVector_literal_start),
480 __pa(&_DebugInterruptVector_text_end), 0); 473 __pa(&_DebugInterruptVector_text_end));
481 474
482 mem_reserve(__pa(&_KernelExceptionVector_literal_start), 475 mem_reserve(__pa(&_KernelExceptionVector_literal_start),
483 __pa(&_KernelExceptionVector_text_end), 0); 476 __pa(&_KernelExceptionVector_text_end));
484 477
485 mem_reserve(__pa(&_UserExceptionVector_literal_start), 478 mem_reserve(__pa(&_UserExceptionVector_literal_start),
486 __pa(&_UserExceptionVector_text_end), 0); 479 __pa(&_UserExceptionVector_text_end));
487 480
488 mem_reserve(__pa(&_DoubleExceptionVector_literal_start), 481 mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
489 __pa(&_DoubleExceptionVector_text_end), 0); 482 __pa(&_DoubleExceptionVector_text_end));
490 483
491#if XCHAL_EXCM_LEVEL >= 2 484#if XCHAL_EXCM_LEVEL >= 2
492 mem_reserve(__pa(&_Level2InterruptVector_text_start), 485 mem_reserve(__pa(&_Level2InterruptVector_text_start),
493 __pa(&_Level2InterruptVector_text_end), 0); 486 __pa(&_Level2InterruptVector_text_end));
494#endif 487#endif
495#if XCHAL_EXCM_LEVEL >= 3 488#if XCHAL_EXCM_LEVEL >= 3
496 mem_reserve(__pa(&_Level3InterruptVector_text_start), 489 mem_reserve(__pa(&_Level3InterruptVector_text_start),
497 __pa(&_Level3InterruptVector_text_end), 0); 490 __pa(&_Level3InterruptVector_text_end));
498#endif 491#endif
499#if XCHAL_EXCM_LEVEL >= 4 492#if XCHAL_EXCM_LEVEL >= 4
500 mem_reserve(__pa(&_Level4InterruptVector_text_start), 493 mem_reserve(__pa(&_Level4InterruptVector_text_start),
501 __pa(&_Level4InterruptVector_text_end), 0); 494 __pa(&_Level4InterruptVector_text_end));
502#endif 495#endif
503#if XCHAL_EXCM_LEVEL >= 5 496#if XCHAL_EXCM_LEVEL >= 5
504 mem_reserve(__pa(&_Level5InterruptVector_text_start), 497 mem_reserve(__pa(&_Level5InterruptVector_text_start),
505 __pa(&_Level5InterruptVector_text_end), 0); 498 __pa(&_Level5InterruptVector_text_end));
506#endif 499#endif
507#if XCHAL_EXCM_LEVEL >= 6 500#if XCHAL_EXCM_LEVEL >= 6
508 mem_reserve(__pa(&_Level6InterruptVector_text_start), 501 mem_reserve(__pa(&_Level6InterruptVector_text_start),
509 __pa(&_Level6InterruptVector_text_end), 0); 502 __pa(&_Level6InterruptVector_text_end));
510#endif 503#endif
511 504
512#ifdef CONFIG_SMP 505#ifdef CONFIG_SMP
513 mem_reserve(__pa(&_SecondaryResetVector_text_start), 506 mem_reserve(__pa(&_SecondaryResetVector_text_start),
514 __pa(&_SecondaryResetVector_text_end), 0); 507 __pa(&_SecondaryResetVector_text_end));
515#endif 508#endif
516 parse_early_param(); 509 parse_early_param();
517 bootmem_init(); 510 bootmem_init();
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index c417cbe4ec87..72cfe3587dd8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -30,10 +30,6 @@ jiffies = jiffies_64 + 4;
30jiffies = jiffies_64; 30jiffies = jiffies_64;
31#endif 31#endif
32 32
33#ifndef KERNELOFFSET
34#define KERNELOFFSET 0xd0003000
35#endif
36
37/* Note: In the following macros, it would be nice to specify only the 33/* Note: In the following macros, it would be nice to specify only the
38 vector name and section kind and construct "sym" and "section" using 34 vector name and section kind and construct "sym" and "section" using
39 CPP concatenation, but that does not work reliably. Concatenating a 35 CPP concatenation, but that does not work reliably. Concatenating a
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 9a9a5935bd36..80e4cfb2471a 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -8,7 +8,7 @@
8 * for more details. 8 * for more details.
9 * 9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 * Copyright (C) 2014 Cadence Design Systems Inc. 11 * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
12 * 12 *
13 * Chris Zankel <chris@zankel.net> 13 * Chris Zankel <chris@zankel.net>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -25,284 +25,43 @@
25#include <linux/mman.h> 25#include <linux/mman.h>
26#include <linux/nodemask.h> 26#include <linux/nodemask.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/of_fdt.h>
28 29
29#include <asm/bootparam.h> 30#include <asm/bootparam.h>
30#include <asm/page.h> 31#include <asm/page.h>
31#include <asm/sections.h> 32#include <asm/sections.h>
32#include <asm/sysmem.h> 33#include <asm/sysmem.h>
33 34
34struct sysmem_info sysmem __initdata;
35
36static void __init sysmem_dump(void)
37{
38 unsigned i;
39
40 pr_debug("Sysmem:\n");
41 for (i = 0; i < sysmem.nr_banks; ++i)
42 pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
43 sysmem.bank[i].start, sysmem.bank[i].end,
44 (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
45}
46
47/*
48 * Find bank with maximal .start such that bank.start <= start
49 */
50static inline struct meminfo * __init find_bank(unsigned long start)
51{
52 unsigned i;
53 struct meminfo *it = NULL;
54
55 for (i = 0; i < sysmem.nr_banks; ++i)
56 if (sysmem.bank[i].start <= start)
57 it = sysmem.bank + i;
58 else
59 break;
60 return it;
61}
62
63/*
64 * Move all memory banks starting at 'from' to a new place at 'to',
65 * adjust nr_banks accordingly.
66 * Both 'from' and 'to' must be inside the sysmem.bank.
67 *
68 * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
69 */
70static int __init move_banks(struct meminfo *to, struct meminfo *from)
71{
72 unsigned n = sysmem.nr_banks - (from - sysmem.bank);
73
74 if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
75 return -ENOMEM;
76 if (to != from)
77 memmove(to, from, n * sizeof(struct meminfo));
78 sysmem.nr_banks += to - from;
79 return 0;
80}
81
82/*
83 * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
84 * original sysmem and the new bank.
85 *
86 * Returns: 0 (success), < 0 (error)
87 */
88int __init add_sysmem_bank(unsigned long start, unsigned long end)
89{
90 unsigned i;
91 struct meminfo *it = NULL;
92 unsigned long sz;
93 unsigned long bank_sz = 0;
94
95 if (start == end ||
96 (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
97 pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
98 start, end - start);
99 return -EINVAL;
100 }
101
102 start = PAGE_ALIGN(start);
103 end &= PAGE_MASK;
104 sz = end - start;
105
106 it = find_bank(start);
107
108 if (it)
109 bank_sz = it->end - it->start;
110
111 if (it && bank_sz >= start - it->start) {
112 if (end - it->start > bank_sz)
113 it->end = end;
114 else
115 return 0;
116 } else {
117 if (!it)
118 it = sysmem.bank;
119 else
120 ++it;
121
122 if (it - sysmem.bank < sysmem.nr_banks &&
123 it->start - start <= sz) {
124 it->start = start;
125 if (it->end - it->start < sz)
126 it->end = end;
127 else
128 return 0;
129 } else {
130 if (move_banks(it + 1, it) < 0) {
131 pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
132 start, end - start);
133 return -EINVAL;
134 }
135 it->start = start;
136 it->end = end;
137 return 0;
138 }
139 }
140 sz = it->end - it->start;
141 for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
142 if (sysmem.bank[i].start - it->start <= sz) {
143 if (sz < sysmem.bank[i].end - it->start)
144 it->end = sysmem.bank[i].end;
145 } else {
146 break;
147 }
148
149 move_banks(it + 1, sysmem.bank + i);
150 return 0;
151}
152
153/*
154 * mem_reserve(start, end, must_exist)
155 *
156 * Reserve some memory from the memory pool.
157 * If must_exist is set and a part of the region being reserved does not exist
158 * memory map is not altered.
159 *
160 * Parameters:
161 * start Start of region,
162 * end End of region,
163 * must_exist Must exist in memory pool.
164 *
165 * Returns:
166 * 0 (success)
167 * < 0 (error)
168 */
169
170int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
171{
172 struct meminfo *it;
173 struct meminfo *rm = NULL;
174 unsigned long sz;
175 unsigned long bank_sz = 0;
176
177 start = start & PAGE_MASK;
178 end = PAGE_ALIGN(end);
179 sz = end - start;
180 if (!sz)
181 return -EINVAL;
182
183 it = find_bank(start);
184
185 if (it)
186 bank_sz = it->end - it->start;
187
188 if ((!it || end - it->start > bank_sz) && must_exist) {
189 pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
190 start, end);
191 return -EINVAL;
192 }
193
194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) {
196 if (end - it->start < bank_sz) {
197 it->start = end;
198 return 0;
199 } else {
200 rm = it;
201 }
202 } else {
203 it->end = start;
204 if (end - it->start < bank_sz)
205 return add_sysmem_bank(end,
206 it->start + bank_sz);
207 ++it;
208 }
209 }
210
211 if (!it)
212 it = sysmem.bank;
213
214 for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
215 if (it->end - start <= sz) {
216 if (!rm)
217 rm = it;
218 } else {
219 if (it->start - start < sz)
220 it->start = end;
221 break;
222 }
223 }
224
225 if (rm)
226 move_banks(rm, it);
227
228 return 0;
229}
230
231
232/* 35/*
233 * Initialize the bootmem system and give it all low memory we have available. 36 * Initialize the bootmem system and give it all low memory we have available.
234 */ 37 */
235 38
236void __init bootmem_init(void) 39void __init bootmem_init(void)
237{ 40{
238 unsigned long pfn; 41 /* Reserve all memory below PHYS_OFFSET, as memory
239 unsigned long bootmap_start, bootmap_size;
240 int i;
241
242 /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory
243 * accounting doesn't work for pages below that address. 42 * accounting doesn't work for pages below that address.
244 * 43 *
245 * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0: 44 * If PHYS_OFFSET is zero reserve page at address 0:
246 * successfull allocations should never return NULL. 45 * successfull allocations should never return NULL.
247 */ 46 */
248 if (PLATFORM_DEFAULT_MEM_START) 47 if (PHYS_OFFSET)
249 mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0); 48 memblock_reserve(0, PHYS_OFFSET);
250 else 49 else
251 mem_reserve(0, 1, 0); 50 memblock_reserve(0, 1);
252 51
253 sysmem_dump(); 52 early_init_fdt_scan_reserved_mem();
254 max_low_pfn = max_pfn = 0;
255 min_low_pfn = ~0;
256
257 for (i=0; i < sysmem.nr_banks; i++) {
258 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
259 if (pfn < min_low_pfn)
260 min_low_pfn = pfn;
261 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
262 if (pfn > max_pfn)
263 max_pfn = pfn;
264 }
265 53
266 if (min_low_pfn > max_pfn) 54 if (!memblock_phys_mem_size())
267 panic("No memory found!\n"); 55 panic("No memory found!\n");
268 56
269 max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? 57 min_low_pfn = PFN_UP(memblock_start_of_DRAM());
270 max_pfn : MAX_MEM_PFN >> PAGE_SHIFT; 58 min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
59 max_pfn = PFN_DOWN(memblock_end_of_DRAM());
60 max_low_pfn = min(max_pfn, MAX_LOW_PFN);
271 61
272 /* Find an area to use for the bootmem bitmap. */ 62 memblock_set_current_limit(PFN_PHYS(max_low_pfn));
273
274 bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn);
275 bootmap_size <<= PAGE_SHIFT;
276 bootmap_start = ~0;
277
278 for (i=0; i<sysmem.nr_banks; i++)
279 if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
280 bootmap_start = sysmem.bank[i].start;
281 break;
282 }
283
284 if (bootmap_start == ~0UL)
285 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
286
287 /* Reserve the bootmem bitmap area */
288
289 mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
290 bootmap_size = init_bootmem_node(NODE_DATA(0),
291 bootmap_start >> PAGE_SHIFT,
292 min_low_pfn,
293 max_low_pfn);
294
295 /* Add all remaining memory pieces into the bootmem map */
296
297 for (i = 0; i < sysmem.nr_banks; i++) {
298 if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
299 unsigned long end = min(max_low_pfn << PAGE_SHIFT,
300 sysmem.bank[i].end);
301 free_bootmem(sysmem.bank[i].start,
302 end - sysmem.bank[i].start);
303 }
304 }
305 63
64 memblock_dump_all();
306} 65}
307 66
308 67
@@ -344,7 +103,7 @@ void __init mem_init(void)
344 " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" 103 " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
345#endif 104#endif
346#ifdef CONFIG_MMU 105#ifdef CONFIG_MMU
347 " vmalloc : 0x%08x - 0x%08x (%5u MB)\n" 106 " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
348#endif 107#endif
349 " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n", 108 " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
350#ifdef CONFIG_HIGHMEM 109#ifdef CONFIG_HIGHMEM
@@ -395,16 +154,16 @@ static void __init parse_memmap_one(char *p)
395 switch (*p) { 154 switch (*p) {
396 case '@': 155 case '@':
397 start_at = memparse(p + 1, &p); 156 start_at = memparse(p + 1, &p);
398 add_sysmem_bank(start_at, start_at + mem_size); 157 memblock_add(start_at, mem_size);
399 break; 158 break;
400 159
401 case '$': 160 case '$':
402 start_at = memparse(p + 1, &p); 161 start_at = memparse(p + 1, &p);
403 mem_reserve(start_at, start_at + mem_size, 0); 162 memblock_reserve(start_at, mem_size);
404 break; 163 break;
405 164
406 case 0: 165 case 0:
407 mem_reserve(mem_size, 0, 0); 166 memblock_reserve(mem_size, -mem_size);
408 break; 167 break;
409 168
410 default: 169 default: