aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/arch_timer.h5
-rw-r--r--arch/arm64/kernel/process.c50
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/sys32.S7
-rw-r--r--arch/arm64/kernel/time.c6
-rw-r--r--arch/arm64/kernel/traps.c7
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/mmu.c15
8 files changed, 24 insertions, 94 deletions
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 91e2a6a6fcd4..bf6ab242f047 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -130,4 +130,9 @@ static inline u64 arch_counter_get_cntvct(void)
130 return cval; 130 return cval;
131} 131}
132 132
133static inline int arch_timer_arch_init(void)
134{
135 return 0;
136}
137
133#endif 138#endif
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index bbefb6fdfee2..46f02c3b5015 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off);
84void (*arm_pm_restart)(char str, const char *cmd); 84void (*arm_pm_restart)(char str, const char *cmd);
85EXPORT_SYMBOL_GPL(arm_pm_restart); 85EXPORT_SYMBOL_GPL(arm_pm_restart);
86 86
87void arch_cpu_idle_prepare(void)
88{
89 local_fiq_enable();
90}
87 91
88/* 92/*
89 * This is our default idle handler. 93 * This is our default idle handler.
90 */ 94 */
91static void default_idle(void) 95void arch_cpu_idle(void)
92{ 96{
93 /* 97 /*
94 * This should do all the clock switching and wait for interrupt 98 * This should do all the clock switching and wait for interrupt
@@ -98,43 +102,6 @@ static void default_idle(void)
98 local_irq_enable(); 102 local_irq_enable();
99} 103}
100 104
101/*
102 * The idle thread.
103 * We always respect 'hlt_counter' to prevent low power idle.
104 */
105void cpu_idle(void)
106{
107 local_fiq_enable();
108
109 /* endless idle loop with no priority at all */
110 while (1) {
111 tick_nohz_idle_enter();
112 rcu_idle_enter();
113 while (!need_resched()) {
114 /*
115 * We need to disable interrupts here to ensure
116 * we don't miss a wakeup call.
117 */
118 local_irq_disable();
119 if (!need_resched()) {
120 stop_critical_timings();
121 default_idle();
122 start_critical_timings();
123 /*
124 * default_idle functions should always return
125 * with IRQs enabled.
126 */
127 WARN_ON(irqs_disabled());
128 } else {
129 local_irq_enable();
130 }
131 }
132 rcu_idle_exit();
133 tick_nohz_idle_exit();
134 schedule_preempt_disabled();
135 }
136}
137
138void machine_shutdown(void) 105void machine_shutdown(void)
139{ 106{
140#ifdef CONFIG_SMP 107#ifdef CONFIG_SMP
@@ -178,11 +145,7 @@ void __show_regs(struct pt_regs *regs)
178{ 145{
179 int i; 146 int i;
180 147
181 printk("CPU: %d %s (%s %.*s)\n", 148 show_regs_print_info(KERN_DEFAULT);
182 raw_smp_processor_id(), print_tainted(),
183 init_utsname()->release,
184 (int)strcspn(init_utsname()->version, " "),
185 init_utsname()->version);
186 print_symbol("PC is at %s\n", instruction_pointer(regs)); 149 print_symbol("PC is at %s\n", instruction_pointer(regs));
187 print_symbol("LR is at %s\n", regs->regs[30]); 150 print_symbol("LR is at %s\n", regs->regs[30]);
188 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 151 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
@@ -199,7 +162,6 @@ void __show_regs(struct pt_regs *regs)
199void show_regs(struct pt_regs * regs) 162void show_regs(struct pt_regs * regs)
200{ 163{
201 printk("\n"); 164 printk("\n");
202 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
203 __show_regs(regs); 165 __show_regs(regs);
204} 166}
205 167
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 1e22ff9ae153..5d54e3717bf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -217,7 +217,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
217 /* 217 /*
218 * OK, it's off to the idle thread for us 218 * OK, it's off to the idle thread for us
219 */ 219 */
220 cpu_idle(); 220 cpu_startup_entry(CPUHP_ONLINE);
221} 221}
222 222
223void __init smp_cpus_done(unsigned int max_cpus) 223void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 9416d045a687..db01aa978c41 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -84,13 +84,6 @@ compat_sys_readahead_wrapper:
84 b sys_readahead 84 b sys_readahead
85ENDPROC(compat_sys_readahead_wrapper) 85ENDPROC(compat_sys_readahead_wrapper)
86 86
87compat_sys_lookup_dcookie:
88 orr x0, x0, x1, lsl #32
89 mov w1, w2
90 mov w2, w3
91 b sys_lookup_dcookie
92ENDPROC(compat_sys_lookup_dcookie)
93
94compat_sys_fadvise64_64_wrapper: 87compat_sys_fadvise64_64_wrapper:
95 mov w6, w1 88 mov w6, w1
96 orr x1, x2, x3, lsl #32 89 orr x1, x2, x3, lsl #32
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index b0ef18d14c3b..a551f88ae2c1 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -32,6 +32,7 @@
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/clocksource.h>
35 36
36#include <clocksource/arm_arch_timer.h> 37#include <clocksource/arm_arch_timer.h>
37 38
@@ -77,10 +78,11 @@ void __init time_init(void)
77{ 78{
78 u32 arch_timer_rate; 79 u32 arch_timer_rate;
79 80
80 if (arch_timer_init()) 81 clocksource_of_init();
81 panic("Unable to initialise architected timer.\n");
82 82
83 arch_timer_rate = arch_timer_get_rate(); 83 arch_timer_rate = arch_timer_get_rate();
84 if (!arch_timer_rate)
85 panic("Unable to initialise architected timer.\n");
84 86
85 /* Cache the sched_clock multiplier to save a divide in the hot path. */ 87 /* Cache the sched_clock multiplier to save a divide in the hot path. */
86 sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; 88 sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b3c5f628bdb4..61d7dd29f756 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -167,13 +167,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
167 } 167 }
168} 168}
169 169
170void dump_stack(void)
171{
172 dump_backtrace(NULL, NULL);
173}
174
175EXPORT_SYMBOL(dump_stack);
176
177void show_stack(struct task_struct *tsk, unsigned long *sp) 170void show_stack(struct task_struct *tsk, unsigned long *sp)
178{ 171{
179 dump_backtrace(NULL, tsk); 172 dump_backtrace(NULL, tsk);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 800aac306a08..f497ca77925a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -197,24 +197,6 @@ void __init bootmem_init(void)
197 max_pfn = max_low_pfn = max; 197 max_pfn = max_low_pfn = max;
198} 198}
199 199
200static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201{
202 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203
204 for (; pfn < end; pfn++) {
205 struct page *page = pfn_to_page(pfn);
206 ClearPageReserved(page);
207 init_page_count(page);
208 __free_page(page);
209 pages++;
210 }
211
212 if (size && s)
213 pr_info("Freeing %s memory: %dK\n", s, size);
214
215 return pages;
216}
217
218/* 200/*
219 * Poison init memory with an undefined instruction (0x0). 201 * Poison init memory with an undefined instruction (0x0).
220 */ 202 */
@@ -405,9 +387,7 @@ void __init mem_init(void)
405void free_initmem(void) 387void free_initmem(void)
406{ 388{
407 poison_init_mem(__init_begin, __init_end - __init_begin); 389 poison_init_mem(__init_begin, __init_end - __init_begin);
408 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 390 free_initmem_default(0);
409 __phys_to_pfn(__pa(__init_end)),
410 "init");
411} 391}
412 392
413#ifdef CONFIG_BLK_DEV_INITRD 393#ifdef CONFIG_BLK_DEV_INITRD
@@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
418{ 398{
419 if (!keep_initrd) { 399 if (!keep_initrd) {
420 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 400 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
421 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 401 free_reserved_area(start, end, 0, "initrd");
422 __phys_to_pfn(__pa(end)),
423 "initrd");
424 } 402 }
425} 403}
426 404
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534e..eeecc9c8ed68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) 261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{ 262{
263 unsigned long size, mask; 263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(ARM64_64K_PAGES); 264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd; 265 pgd_t *pgd;
266 pud_t *pud; 266 pud_t *pud;
267 pmd_t *pmd; 267 pmd_t *pmd;
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
391} 391}
392#ifdef CONFIG_SPARSEMEM_VMEMMAP 392#ifdef CONFIG_SPARSEMEM_VMEMMAP
393#ifdef CONFIG_ARM64_64K_PAGES 393#ifdef CONFIG_ARM64_64K_PAGES
394int __meminit vmemmap_populate(struct page *start_page, 394int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
395 unsigned long size, int node)
396{ 395{
397 return vmemmap_populate_basepages(start_page, size, node); 396 return vmemmap_populate_basepages(start, end, node);
398} 397}
399#else /* !CONFIG_ARM64_64K_PAGES */ 398#else /* !CONFIG_ARM64_64K_PAGES */
400int __meminit vmemmap_populate(struct page *start_page, 399int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401 unsigned long size, int node)
402{ 400{
403 unsigned long addr = (unsigned long)start_page; 401 unsigned long addr = start;
404 unsigned long end = (unsigned long)(start_page + size);
405 unsigned long next; 402 unsigned long next;
406 pgd_t *pgd; 403 pgd_t *pgd;
407 pud_t *pud; 404 pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
434 return 0; 431 return 0;
435} 432}
436#endif /* CONFIG_ARM64_64K_PAGES */ 433#endif /* CONFIG_ARM64_64K_PAGES */
437void vmemmap_free(struct page *memmap, unsigned long nr_pages) 434void vmemmap_free(unsigned long start, unsigned long end)
438{ 435{
439} 436}
440#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 437#endif /* CONFIG_SPARSEMEM_VMEMMAP */