aboutsummaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/Kconfig6
-rw-r--r--arch/hexagon/include/asm/hexagon_vm.h47
-rw-r--r--arch/hexagon/include/asm/mem-layout.h21
-rw-r--r--arch/hexagon/include/asm/page.h5
-rw-r--r--arch/hexagon/kernel/head.S22
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S10
-rw-r--r--arch/hexagon/mm/init.c33
8 files changed, 91 insertions, 55 deletions
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index e4decc6b8947..dd89a7245ac4 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -33,6 +33,7 @@ config HEXAGON
33 Qualcomm Hexagon is a processor architecture designed for high 33 Qualcomm Hexagon is a processor architecture designed for high
34 performance and low power across a wide variety of applications. 34 performance and low power across a wide variety of applications.
35 35
36
36config HEXAGON_ARCH_V1 37config HEXAGON_ARCH_V1
37 bool 38 bool
38 39
@@ -45,6 +46,11 @@ config HEXAGON_ARCH_V3
45config HEXAGON_ARCH_V4 46config HEXAGON_ARCH_V4
46 bool 47 bool
47 48
49config HEXAGON_PHYS_OFFSET
50 def_bool y
51 ---help---
52 Platforms that don't load the kernel at zero set this.
53
48config FRAME_POINTER 54config FRAME_POINTER
49 def_bool y 55 def_bool y
50 56
diff --git a/arch/hexagon/include/asm/hexagon_vm.h b/arch/hexagon/include/asm/hexagon_vm.h
index c144bee6cabe..6b81e4d5ecb1 100644
--- a/arch/hexagon/include/asm/hexagon_vm.h
+++ b/arch/hexagon/include/asm/hexagon_vm.h
@@ -31,10 +31,26 @@
31 * for tracing/debugging. 31 * for tracing/debugging.
32 */ 32 */
33 33
34/* 34#define HVM_TRAP1_VMVERSION 0
35 * Lets make this stuff visible only if configured, 35#define HVM_TRAP1_VMRTE 1
36 * so we can unconditionally include the file. 36#define HVM_TRAP1_VMSETVEC 2
37 */ 37#define HVM_TRAP1_VMSETIE 3
38#define HVM_TRAP1_VMGETIE 4
39#define HVM_TRAP1_VMINTOP 5
40#define HVM_TRAP1_VMCLRMAP 10
41#define HVM_TRAP1_VMNEWMAP 11
42#define HVM_TRAP1_FORMERLY_VMWIRE 12
43#define HVM_TRAP1_VMCACHE 13
44#define HVM_TRAP1_VMGETTIME 14
45#define HVM_TRAP1_VMSETTIME 15
46#define HVM_TRAP1_VMWAIT 16
47#define HVM_TRAP1_VMYIELD 17
48#define HVM_TRAP1_VMSTART 18
49#define HVM_TRAP1_VMSTOP 19
50#define HVM_TRAP1_VMVPID 20
51#define HVM_TRAP1_VMSETREGS 21
52#define HVM_TRAP1_VMGETREGS 22
53#define HVM_TRAP1_VMTIMEROP 24
38 54
39#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
40 56
@@ -175,25 +191,6 @@ static inline long __vmintop_clear(long i)
175 191
176#else /* Only assembly code should reference these */ 192#else /* Only assembly code should reference these */
177 193
178#define HVM_TRAP1_VMRTE 1
179#define HVM_TRAP1_VMSETVEC 2
180#define HVM_TRAP1_VMSETIE 3
181#define HVM_TRAP1_VMGETIE 4
182#define HVM_TRAP1_VMINTOP 5
183#define HVM_TRAP1_VMCLRMAP 10
184#define HVM_TRAP1_VMNEWMAP 11
185#define HVM_TRAP1_FORMERLY_VMWIRE 12
186#define HVM_TRAP1_VMCACHE 13
187#define HVM_TRAP1_VMGETTIME 14
188#define HVM_TRAP1_VMSETTIME 15
189#define HVM_TRAP1_VMWAIT 16
190#define HVM_TRAP1_VMYIELD 17
191#define HVM_TRAP1_VMSTART 18
192#define HVM_TRAP1_VMSTOP 19
193#define HVM_TRAP1_VMVPID 20
194#define HVM_TRAP1_VMSETREGS 21
195#define HVM_TRAP1_VMGETREGS 22
196
197#endif /* __ASSEMBLY__ */ 194#endif /* __ASSEMBLY__ */
198 195
199/* 196/*
@@ -224,6 +221,8 @@ static inline long __vmintop_clear(long i)
224#define HVM_VMEST_UM_MSK 1 221#define HVM_VMEST_UM_MSK 1
225#define HVM_VMEST_IE_SFT 30 222#define HVM_VMEST_IE_SFT 30
226#define HVM_VMEST_IE_MSK 1 223#define HVM_VMEST_IE_MSK 1
224#define HVM_VMEST_SS_SFT 29
225#define HVM_VMEST_SS_MSK 1
227#define HVM_VMEST_EVENTNUM_SFT 16 226#define HVM_VMEST_EVENTNUM_SFT 16
228#define HVM_VMEST_EVENTNUM_MSK 0xff 227#define HVM_VMEST_EVENTNUM_MSK 0xff
229#define HVM_VMEST_CAUSE_SFT 0 228#define HVM_VMEST_CAUSE_SFT 0
@@ -260,6 +259,8 @@ static inline long __vmintop_clear(long i)
260#define HVM_GE_C_INVI 0x15 259#define HVM_GE_C_INVI 0x15
261#define HVM_GE_C_PRIVI 0x1B 260#define HVM_GE_C_PRIVI 0x1B
262#define HVM_GE_C_XMAL 0x1C 261#define HVM_GE_C_XMAL 0x1C
262#define HVM_GE_C_WREG 0x1D
263#define HVM_GE_C_PCAL 0x1E
263#define HVM_GE_C_RMAL 0x20 264#define HVM_GE_C_RMAL 0x20
264#define HVM_GE_C_WMAL 0x21 265#define HVM_GE_C_WMAL 0x21
265#define HVM_GE_C_RPROT 0x22 266#define HVM_GE_C_RPROT 0x22
diff --git a/arch/hexagon/include/asm/mem-layout.h b/arch/hexagon/include/asm/mem-layout.h
index af16e977c55e..1426cd71a1d3 100644
--- a/arch/hexagon/include/asm/mem-layout.h
+++ b/arch/hexagon/include/asm/mem-layout.h
@@ -32,16 +32,25 @@
32#define PAGE_OFFSET _AC(0xc0000000, UL) 32#define PAGE_OFFSET _AC(0xc0000000, UL)
33 33
34/* 34/*
35 * LOAD_ADDRESS is the physical/linear address of where in memory 35 * Compiling for a platform that needs a crazy physical offset
36 * the kernel gets loaded. The 12 least significant bits must be zero (0) 36 * (like if the memory starts at 1GB and up) means we need
37 * due to limitations on setting the EVB 37 * an actual PHYS_OFFSET. Should be set up in head.S.
38 *
39 */ 38 */
40 39
41#ifndef LOAD_ADDRESS 40#ifdef CONFIG_HEXAGON_PHYS_OFFSET
42#define LOAD_ADDRESS 0x00000000 41#ifndef __ASSEMBLY__
42extern unsigned long __phys_offset;
43#endif
44#define PHYS_OFFSET __phys_offset
45#endif
46
47#ifndef PHYS_OFFSET
48#define PHYS_OFFSET 0
43#endif 49#endif
44 50
51#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
52#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
53
45#define TASK_SIZE (PAGE_OFFSET) 54#define TASK_SIZE (PAGE_OFFSET)
46 55
47/* not sure how these are used yet */ 56/* not sure how these are used yet */
diff --git a/arch/hexagon/include/asm/page.h b/arch/hexagon/include/asm/page.h
index 692adc213429..de1b2871d09c 100644
--- a/arch/hexagon/include/asm/page.h
+++ b/arch/hexagon/include/asm/page.h
@@ -96,8 +96,8 @@ typedef struct page *pgtable_t;
96 * MIPS says they're only used during mem_init. 96 * MIPS says they're only used during mem_init.
97 * also, check if we need a PHYS_OFFSET. 97 * also, check if we need a PHYS_OFFSET.
98 */ 98 */
99#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 99#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
100#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 100#define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
101 101
102/* The "page frame" descriptor is defined in linux/mm.h */ 102/* The "page frame" descriptor is defined in linux/mm.h */
103struct page; 103struct page;
@@ -147,6 +147,7 @@ static inline void clear_page(void *page)
147 */ 147 */
148#define kern_addr_valid(addr) (1) 148#define kern_addr_valid(addr) (1)
149 149
150#include <asm/mem-layout.h>
150#include <asm-generic/memory_model.h> 151#include <asm-generic/memory_model.h>
151/* XXX Todo: implement assembly-optimized version of getorder. */ 152/* XXX Todo: implement assembly-optimized version of getorder. */
152#include <asm-generic/getorder.h> 153#include <asm-generic/getorder.h>
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
index d859402c73ba..477320c455f7 100644
--- a/arch/hexagon/kernel/head.S
+++ b/arch/hexagon/kernel/head.S
@@ -43,14 +43,21 @@ ENTRY(stext)
43 * Symbol is kernel segment address, but we need 43 * Symbol is kernel segment address, but we need
44 * the logical/physical address. 44 * the logical/physical address.
45 */ 45 */
46 r24 = asl(r24, #2) 46 r25 = pc;
47 r24 = lsr(r24, #2) 47 r2.h = #0xffc0;
48 r2.l = #0x0000;
49 r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */
50 r1.h = #HI(PAGE_OFFSET);
51 r1.l = #LO(PAGE_OFFSET);
52 r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */
53 r24 = add(r24,r25); /* + PHYS_OFFSET */
48 54
49 r0 = r24 55 r0 = r24; /* aka __pa(swapper_pg_dir) */
50 56
51 /* 57 /*
52 * Initialize a 16MB PTE to make the virtual and physical 58 * Initialize page dir to make the virtual and physical
53 * addresses where the kernel was loaded be identical. 59 * addresses where the kernel was loaded be identical.
60 * Done in 4MB chunks.
54 */ 61 */
55#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \ 62#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
56 | __HEXAGON_C_WB_L2 << 6 \ 63 | __HEXAGON_C_WB_L2 << 6 \
@@ -143,6 +150,13 @@ __head_s_vaddr_target:
143 r2 = sub(r2,r0); 150 r2 = sub(r2,r0);
144 call memset; 151 call memset;
145 152
153 /* Set PHYS_OFFSET; should still be in R25 */
154#ifdef CONFIG_HEXAGON_PHYS_OFFSET
155 r0.l = #LO(__phys_offset);
156 r0.h = #HI(__phys_offset);
157 memw(r0) = r25;
158#endif
159
146 /* Time to make the doughnuts. */ 160 /* Time to make the doughnuts. */
147 call start_kernel 161 call start_kernel
148 162
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 94a387835008..2e2304f7b7ee 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -68,6 +68,8 @@ void __init setup_arch(char **cmdline_p)
68 */ 68 */
69 __vmsetvec(_K_VM_event_vector); 69 __vmsetvec(_K_VM_event_vector);
70 70
71 printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET);
72
71 /* 73 /*
72 * Simulator has a few differences from the hardware. 74 * Simulator has a few differences from the hardware.
73 * For now, check uninitialized-but-mapped memory 75 * For now, check uninitialized-but-mapped memory
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 14e793f6abbf..fafb886511e3 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -18,8 +18,6 @@
18 * 02110-1301, USA. 18 * 02110-1301, USA.
19 */ 19 */
20 20
21#define LOAD_OFFSET PAGE_OFFSET
22
23#include <asm-generic/vmlinux.lds.h> 21#include <asm-generic/vmlinux.lds.h>
24#include <asm/asm-offsets.h> /* Most of the kernel defines are here */ 22#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
25#include <asm/mem-layout.h> /* except for page_offset */ 23#include <asm/mem-layout.h> /* except for page_offset */
@@ -36,13 +34,9 @@ See asm-generic/sections.h for seemingly required labels.
36 34
37#define PAGE_SIZE _PAGE_SIZE 35#define PAGE_SIZE _PAGE_SIZE
38 36
39/* This LOAD_OFFSET is temporary for debugging on the simulator; it may change
40 for hypervisor pseudo-physical memory. */
41
42
43SECTIONS 37SECTIONS
44{ 38{
45 . = PAGE_OFFSET + LOAD_ADDRESS; 39 . = PAGE_OFFSET;
46 40
47 __init_begin = .; 41 __init_begin = .;
48 HEAD_TEXT_SECTION 42 HEAD_TEXT_SECTION
@@ -52,7 +46,7 @@ SECTIONS
52 46
53 . = ALIGN(_PAGE_SIZE); 47 . = ALIGN(_PAGE_SIZE);
54 _stext = .; 48 _stext = .;
55 .text : AT(ADDR(.text) - LOAD_OFFSET) { 49 .text : AT(ADDR(.text)) {
56 _text = .; 50 _text = .;
57 TEXT_TEXT 51 TEXT_TEXT
58 SCHED_TEXT 52 SCHED_TEXT
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 69ffcfd28794..8e803a6e3402 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -31,9 +31,10 @@
31 * Define a startpg just past the end of the kernel image and a lastpg 31 * Define a startpg just past the end of the kernel image and a lastpg
32 * that corresponds to the end of real or simulated platform memory. 32 * that corresponds to the end of real or simulated platform memory.
33 */ 33 */
34#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET)) 34#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
35 35
36unsigned long bootmem_lastpg; /* Should be set by platform code */ 36unsigned long bootmem_lastpg; /* Should be set by platform code */
37unsigned long __phys_offset; /* physical kernel offset >> 12 */
37 38
38/* Set as variable to limit PMD copies */ 39/* Set as variable to limit PMD copies */
39int max_kernel_seg = 0x303; 40int max_kernel_seg = 0x303;
@@ -44,7 +45,6 @@ unsigned long zero_page_mask;
44/* indicate pfn's of high memory */ 45/* indicate pfn's of high memory */
45unsigned long highstart_pfn, highend_pfn; 46unsigned long highstart_pfn, highend_pfn;
46 47
47/* struct mmu_gather defined in asm-generic.h; */
48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
49 49
50/* Default cache attribute for newly created page tables */ 50/* Default cache attribute for newly created page tables */
@@ -71,7 +71,7 @@ void __init mem_init(void)
71{ 71{
72 /* No idea where this is actually declared. Seems to evade LXR. */ 72 /* No idea where this is actually declared. Seems to evade LXR. */
73 totalram_pages += free_all_bootmem(); 73 totalram_pages += free_all_bootmem();
74 num_physpages = bootmem_lastpg; /* seriously, what? */ 74 num_physpages = bootmem_lastpg-ARCH_PFN_OFFSET;
75 75
76 printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages); 76 printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
77 77
@@ -193,6 +193,9 @@ void __init setup_arch_memory(void)
193 * This needs to change for highmem setups. 193 * This needs to change for highmem setups.
194 */ 194 */
195 195
196 /* Prior to this, bootmem_lastpg is actually mem size */
197 bootmem_lastpg += ARCH_PFN_OFFSET;
198
196 /* Memory size needs to be a multiple of 16M */ 199 /* Memory size needs to be a multiple of 16M */
197 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) & 200 bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
198 ~((BIG_KERNEL_PAGE_SIZE) - 1)); 201 ~((BIG_KERNEL_PAGE_SIZE) - 1));
@@ -201,12 +204,15 @@ void __init setup_arch_memory(void)
201 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached) 204 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
202 * memory allocation 205 * memory allocation
203 */ 206 */
204 bootmap_size = init_bootmem(bootmem_startpg, bootmem_lastpg - 207
205 PFN_DOWN(DMA_RESERVED_BYTES)); 208 max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
209 min_low_pfn = ARCH_PFN_OFFSET;
210 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn);
206 211
207 printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg); 212 printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
208 printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg); 213 printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
209 printk(KERN_INFO "bootmap_size: %d\n", bootmap_size); 214 printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
215 printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn);
210 printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn); 216 printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
211 217
212 /* 218 /*
@@ -221,14 +227,17 @@ void __init setup_arch_memory(void)
221 /* this actually only goes to the end of the first gig */ 227 /* this actually only goes to the end of the first gig */
222 segtable_end = segtable + (1<<(30-22)); 228 segtable_end = segtable + (1<<(30-22));
223 229
224 /* Move forward to the start of empty pages */ 230 /*
225 segtable += bootmem_lastpg >> (22-PAGE_SHIFT); 231 * Move forward to the start of empty pages; take into account
232 * phys_offset shift.
233 */
226 234
235 segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
227 { 236 {
228 int i; 237 int i;
229 238
230 for (i = 1 ; i <= DMA_RESERVE ; i++) 239 for (i = 1 ; i <= DMA_RESERVE ; i++)
231 segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB) 240 segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
232 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X 241 | __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
233 | __HEXAGON_C_UNC << 6 242 | __HEXAGON_C_UNC << 6
234 | __HVM_PDE_S_4MB); 243 | __HVM_PDE_S_4MB);
@@ -256,7 +265,7 @@ void __init setup_arch_memory(void)
256 * Free all the memory that wasn't taken up by the bootmap, the DMA 265 * Free all the memory that wasn't taken up by the bootmap, the DMA
257 * reserve, or kernel itself. 266 * reserve, or kernel itself.
258 */ 267 */
259 free_bootmem(PFN_PHYS(bootmem_startpg)+bootmap_size, 268 free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size,
260 PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size - 269 PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
261 DMA_RESERVED_BYTES); 270 DMA_RESERVED_BYTES);
262 271