aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/Kconfig13
-rw-r--r--arch/sh/boot/Makefile6
-rw-r--r--arch/sh/include/asm/page.h10
-rw-r--r--arch/sh/kernel/vmlinux.lds.S2
-rw-r--r--arch/sh/mm/init.c8
5 files changed, 31 insertions, 8 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2d39594bcdd6..5629e2099130 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -645,7 +645,7 @@ config CRASH_DUMP
645 a specially reserved region and then later executed after 645 a specially reserved region and then later executed after
646 a crash by kdump/kexec. The crash dump kernel must be compiled 646 a crash by kdump/kexec. The crash dump kernel must be compiled
647 to a memory address not used by the main kernel using 647 to a memory address not used by the main kernel using
648 MEMORY_START. 648 PHYSICAL_START.
649 649
650 For more details see Documentation/kdump/kdump.txt 650 For more details see Documentation/kdump/kdump.txt
651 651
@@ -656,6 +656,17 @@ config KEXEC_JUMP
656 Jump between original kernel and kexeced kernel and invoke 656 Jump between original kernel and kexeced kernel and invoke
657 code via KEXEC 657 code via KEXEC
658 658
659config PHYSICAL_START
660 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
661 default MEMORY_START
662 ---help---
663 This gives the physical address where the kernel is loaded
664 and is ordinarily the same as MEMORY_START.
665
666 Different values are primarily used in the case of kexec on panic
667 where the fail safe kernel needs to run at a different address
668 than the panic-ed kernel.
669
659config SECCOMP 670config SECCOMP
660 bool "Enable seccomp to safely compute untrusted bytecode" 671 bool "Enable seccomp to safely compute untrusted bytecode"
661 depends on PROC_FS 672 depends on PROC_FS
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index ba515d800245..e4ea31a62c55 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -19,6 +19,7 @@ CONFIG_MEMORY_START ?= 0x0c000000
19CONFIG_BOOT_LINK_OFFSET ?= 0x00800000 19CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
20CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000 20CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000
21CONFIG_ENTRY_OFFSET ?= 0x00001000 21CONFIG_ENTRY_OFFSET ?= 0x00001000
22CONFIG_PHYSICAL_START ?= $(CONFIG_MEMORY_START)
22 23
23suffix-y := bin 24suffix-y := bin
24suffix-$(CONFIG_KERNEL_GZIP) := gz 25suffix-$(CONFIG_KERNEL_GZIP) := gz
@@ -48,7 +49,7 @@ $(obj)/romimage/vmlinux: $(obj)/zImage FORCE
48 $(Q)$(MAKE) $(build)=$(obj)/romimage $@ 49 $(Q)$(MAKE) $(build)=$(obj)/romimage $@
49 50
50KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ 51KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
51 $$[$(CONFIG_MEMORY_START) & 0x1fffffff]') 52 $$[$(CONFIG_PHYSICAL_START) & 0x1fffffff]')
52 53
53KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ 54KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
54 $$[$(CONFIG_PAGE_OFFSET) + \ 55 $$[$(CONFIG_PAGE_OFFSET) + \
@@ -114,4 +115,5 @@ $(obj)/uImage: $(obj)/uImage.$(suffix-y)
114 @echo ' Image $@ is ready' 115 @echo ' Image $@ is ready'
115 116
116export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \ 117export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \
117 CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET KERNEL_MEMORY suffix-y 118 CONFIG_PHYSICAL_START CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET \
119 KERNEL_MEMORY suffix-y
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 822d6084195b..0dca9a5c6be6 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -113,6 +113,16 @@ typedef struct page *pgtable_t;
113#define __MEMORY_SIZE CONFIG_MEMORY_SIZE 113#define __MEMORY_SIZE CONFIG_MEMORY_SIZE
114 114
115/* 115/*
116 * PHYSICAL_OFFSET is the offset in physical memory where the base
117 * of the kernel is loaded.
118 */
119#ifdef CONFIG_PHYSICAL_START
120#define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START)
121#else
122#define PHYSICAL_OFFSET 0
123#endif
124
125/*
116 * PAGE_OFFSET is the virtual address of the start of kernel address 126 * PAGE_OFFSET is the virtual address of the start of kernel address
117 * space. 127 * space.
118 */ 128 */
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 731c10ce67b5..c98905f71e28 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -23,7 +23,7 @@ OUTPUT_ARCH(sh)
23ENTRY(_start) 23ENTRY(_start)
24SECTIONS 24SECTIONS
25{ 25{
26 . = PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET; 26 . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
27 27
28 _text = .; /* Text and read-only data */ 28 _text = .; /* Text and read-only data */
29 29
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 58a93fb3d965..c9dbace35b16 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -287,6 +287,8 @@ static void __init do_init_bootmem(void)
287static void __init early_reserve_mem(void) 287static void __init early_reserve_mem(void)
288{ 288{
289 unsigned long start_pfn; 289 unsigned long start_pfn;
290 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
291 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
290 292
291 /* 293 /*
292 * Partially used pages are not usable - thus 294 * Partially used pages are not usable - thus
@@ -300,15 +302,13 @@ static void __init early_reserve_mem(void)
300 * this catches the (definitely buggy) case of us accidentally 302 * this catches the (definitely buggy) case of us accidentally
301 * initializing the bootmem allocator with an invalid RAM area. 303 * initializing the bootmem allocator with an invalid RAM area.
302 */ 304 */
303 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 305 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
306 306
307 /* 307 /*
308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
309 */ 309 */
310 if (CONFIG_ZERO_PAGE_OFFSET != 0) 310 if (CONFIG_ZERO_PAGE_OFFSET != 0)
311 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 311 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
312 312
313 /* 313 /*
314 * Handle additional early reservations 314 * Handle additional early reservations