aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 08:20:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 08:20:57 -0400
commit0e51793e162ca432fc5f04178cf82b80a92c2659 (patch)
treecf7ffdb5064e2f7b6647a63e7323d1c4e99b7739 /arch/arm/kernel
parent5cad3598ea0cdb817681f74518d3213583a04f7a (diff)
parentb4874a3d298606c20118d1ead73235439bbc2823 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "This is the first chunk of ARM updates for this merge window. Conflicts are expected in two files - asm/timex.h and mach-integrator/integrator_cp.c. Nothing particularly stands out more than anything else. Most of the growth is down to the opcodes stuff from Dave Martin, which is countered by Rob's patches to use more of the asm-generic headers on ARM." (A few more conflicts grew since then, but it all looked fairly trivial) * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (44 commits) ARM: 7548/1: include linux/sched.h in syscall.h ARM: 7541/1: Add ARM ERRATA 775420 workaround ARM: ensure vm_struct has its phys_addr member filled in ARM: 7540/1: kexec: Check segment memory addresses ARM: 7539/1: kexec: scan for dtb magic in segments ARM: 7538/1: delay: add registration mechanism for delay timer sources ARM: 7536/1: smp: Formalize an IPI for wakeup ARM: 7525/1: ptrace: use updated syscall number for syscall auditing ARM: 7524/1: support syscall tracing ARM: 7519/1: integrator: convert platform devices to Device Tree ARM: 7518/1: integrator: convert AMBA devices to device tree ARM: 7517/1: integrator: initial device tree support ARM: 7516/1: plat-versatile: add DT support to FPGA IRQ ARM: 7515/1: integrator: check PL010 base address from resource ARM: 7514/1: integrator: call common init function from machine ARM: 7522/1: arch_timers: register a time/cycle counter ARM: 7523/1: arch_timers: enable the use of the virtual timer ARM: 7531/1: mark kernelmode mem{cpy,set} non-experimental ARM: 7520/1: Build dtb files in all target ARM: Fix build warning in arch/arm/mm/alignment.c ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/arch_timer.c383
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/atags.h14
-rw-r--r--arch/arm/kernel/atags_compat.c (renamed from arch/arm/kernel/compat.c)4
-rw-r--r--arch/arm/kernel/atags_parse.c238
-rw-r--r--arch/arm/kernel/atags_proc.c (renamed from arch/arm/kernel/atags.c)0
-rw-r--r--arch/arm/kernel/compat.h11
-rw-r--r--arch/arm/kernel/entry-common.S9
-rw-r--r--arch/arm/kernel/machine_kexec.c29
-rw-r--r--arch/arm/kernel/ptrace.c19
-rw-r--r--arch/arm/kernel/sched_clock.c8
-rw-r--r--arch/arm/kernel/setup.c236
-rw-r--r--arch/arm/kernel/smp.c13
14 files changed, 600 insertions, 371 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index d81f3a6d9ad8..5dfef9d97ed9 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -19,7 +19,9 @@ obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
19 process.o ptrace.o return_address.o sched_clock.o \ 19 process.o ptrace.o return_address.o sched_clock.o \
20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o 20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
21 21
22obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o 22obj-$(CONFIG_ATAGS) += atags_parse.o
23obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
24obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
23 25
24obj-$(CONFIG_OC_ETM) += etm.o 26obj-$(CONFIG_OC_ETM) += etm.o
25obj-$(CONFIG_CPU_IDLE) += cpuidle.o 27obj-$(CONFIG_CPU_IDLE) += cpuidle.o
@@ -51,7 +53,6 @@ test-kprobes-objs += kprobes-test-thumb.o
51else 53else
52test-kprobes-objs += kprobes-test-arm.o 54test-kprobes-objs += kprobes-test-arm.o
53endif 55endif
54obj-$(CONFIG_ATAGS_PROC) += atags.o
55obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 56obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
56obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 57obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
57obj-$(CONFIG_KGDB) += kgdb.o 58obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index cf258807160d..c8ef20747ee7 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -21,18 +21,28 @@
21#include <linux/io.h> 21#include <linux/io.h>
22 22
23#include <asm/cputype.h> 23#include <asm/cputype.h>
24#include <asm/delay.h>
24#include <asm/localtimer.h> 25#include <asm/localtimer.h>
25#include <asm/arch_timer.h> 26#include <asm/arch_timer.h>
26#include <asm/system_info.h> 27#include <asm/system_info.h>
27#include <asm/sched_clock.h> 28#include <asm/sched_clock.h>
28 29
29static unsigned long arch_timer_rate; 30static unsigned long arch_timer_rate;
30static int arch_timer_ppi; 31
31static int arch_timer_ppi2; 32enum ppi_nr {
33 PHYS_SECURE_PPI,
34 PHYS_NONSECURE_PPI,
35 VIRT_PPI,
36 HYP_PPI,
37 MAX_TIMER_PPI
38};
39
40static int arch_timer_ppi[MAX_TIMER_PPI];
32 41
33static struct clock_event_device __percpu **arch_timer_evt; 42static struct clock_event_device __percpu **arch_timer_evt;
43static struct delay_timer arch_delay_timer;
34 44
35extern void init_current_timer_delay(unsigned long freq); 45static bool arch_timer_use_virtual = true;
36 46
37/* 47/*
38 * Architected system timer support. 48 * Architected system timer support.
@@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);
46#define ARCH_TIMER_REG_FREQ 1 56#define ARCH_TIMER_REG_FREQ 1
47#define ARCH_TIMER_REG_TVAL 2 57#define ARCH_TIMER_REG_TVAL 2
48 58
49static void arch_timer_reg_write(int reg, u32 val) 59#define ARCH_TIMER_PHYS_ACCESS 0
60#define ARCH_TIMER_VIRT_ACCESS 1
61
62/*
63 * These register accessors are marked inline so the compiler can
64 * nicely work out which register we want, and chuck away the rest of
65 * the code. At least it does so with a recent GCC (4.6.3).
66 */
67static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
50{ 68{
51 switch (reg) { 69 if (access == ARCH_TIMER_PHYS_ACCESS) {
52 case ARCH_TIMER_REG_CTRL: 70 switch (reg) {
53 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); 71 case ARCH_TIMER_REG_CTRL:
54 break; 72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
55 case ARCH_TIMER_REG_TVAL: 73 break;
56 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 74 case ARCH_TIMER_REG_TVAL:
57 break; 75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
76 break;
77 }
78 }
79
80 if (access == ARCH_TIMER_VIRT_ACCESS) {
81 switch (reg) {
82 case ARCH_TIMER_REG_CTRL:
83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
84 break;
85 case ARCH_TIMER_REG_TVAL:
86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
87 break;
88 }
58 } 89 }
59 90
60 isb(); 91 isb();
61} 92}
62 93
63static u32 arch_timer_reg_read(int reg) 94static inline u32 arch_timer_reg_read(const int access, const int reg)
64{ 95{
65 u32 val; 96 u32 val = 0;
97
98 if (access == ARCH_TIMER_PHYS_ACCESS) {
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
105 break;
106 case ARCH_TIMER_REG_FREQ:
107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
108 break;
109 }
110 }
66 111
67 switch (reg) { 112 if (access == ARCH_TIMER_VIRT_ACCESS) {
68 case ARCH_TIMER_REG_CTRL: 113 switch (reg) {
69 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); 114 case ARCH_TIMER_REG_CTRL:
70 break; 115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
71 case ARCH_TIMER_REG_FREQ: 116 break;
72 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); 117 case ARCH_TIMER_REG_TVAL:
73 break; 118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
74 case ARCH_TIMER_REG_TVAL: 119 break;
75 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 120 }
76 break;
77 default:
78 BUG();
79 } 121 }
80 122
81 return val; 123 return val;
82} 124}
83 125
84static irqreturn_t arch_timer_handler(int irq, void *dev_id) 126static inline cycle_t arch_timer_counter_read(const int access)
85{ 127{
86 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 128 cycle_t cval = 0;
87 unsigned long ctrl; 129
130 if (access == ARCH_TIMER_PHYS_ACCESS)
131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
132
133 if (access == ARCH_TIMER_VIRT_ACCESS)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
135
136 return cval;
137}
138
139static inline cycle_t arch_counter_get_cntpct(void)
140{
141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS);
142}
88 143
89 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 144static inline cycle_t arch_counter_get_cntvct(void)
145{
146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS);
147}
148
149static irqreturn_t inline timer_handler(const int access,
150 struct clock_event_device *evt)
151{
152 unsigned long ctrl;
153 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
90 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 154 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
91 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 155 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
92 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 156 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
93 evt->event_handler(evt); 157 evt->event_handler(evt);
94 return IRQ_HANDLED; 158 return IRQ_HANDLED;
95 } 159 }
@@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)
97 return IRQ_NONE; 161 return IRQ_NONE;
98} 162}
99 163
100static void arch_timer_disable(void) 164static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
101{ 165{
102 unsigned long ctrl; 166 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
103 167
104 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 168 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
105 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
106 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
107} 169}
108 170
109static void arch_timer_set_mode(enum clock_event_mode mode, 171static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
110 struct clock_event_device *clk)
111{ 172{
173 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
174
175 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
176}
177
178static inline void timer_set_mode(const int access, int mode)
179{
180 unsigned long ctrl;
112 switch (mode) { 181 switch (mode) {
113 case CLOCK_EVT_MODE_UNUSED: 182 case CLOCK_EVT_MODE_UNUSED:
114 case CLOCK_EVT_MODE_SHUTDOWN: 183 case CLOCK_EVT_MODE_SHUTDOWN:
115 arch_timer_disable(); 184 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
185 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
186 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
116 break; 187 break;
117 default: 188 default:
118 break; 189 break;
119 } 190 }
120} 191}
121 192
122static int arch_timer_set_next_event(unsigned long evt, 193static void arch_timer_set_mode_virt(enum clock_event_mode mode,
123 struct clock_event_device *unused) 194 struct clock_event_device *clk)
124{ 195{
125 unsigned long ctrl; 196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
197}
126 198
127 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); 199static void arch_timer_set_mode_phys(enum clock_event_mode mode,
200 struct clock_event_device *clk)
201{
202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
203}
204
205static inline void set_next_event(const int access, unsigned long evt)
206{
207 unsigned long ctrl;
208 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
128 ctrl |= ARCH_TIMER_CTRL_ENABLE; 209 ctrl |= ARCH_TIMER_CTRL_ENABLE;
129 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 210 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
211 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
212 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
213}
130 214
131 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); 215static int arch_timer_set_next_event_virt(unsigned long evt,
132 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); 216 struct clock_event_device *unused)
217{
218 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
219 return 0;
220}
133 221
222static int arch_timer_set_next_event_phys(unsigned long evt,
223 struct clock_event_device *unused)
224{
225 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
134 return 0; 226 return 0;
135} 227}
136 228
137static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 229static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
138{ 230{
139 /* Be safe... */
140 arch_timer_disable();
141
142 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 231 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
143 clk->name = "arch_sys_timer"; 232 clk->name = "arch_sys_timer";
144 clk->rating = 450; 233 clk->rating = 450;
145 clk->set_mode = arch_timer_set_mode; 234 if (arch_timer_use_virtual) {
146 clk->set_next_event = arch_timer_set_next_event; 235 clk->irq = arch_timer_ppi[VIRT_PPI];
147 clk->irq = arch_timer_ppi; 236 clk->set_mode = arch_timer_set_mode_virt;
237 clk->set_next_event = arch_timer_set_next_event_virt;
238 } else {
239 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
240 clk->set_mode = arch_timer_set_mode_phys;
241 clk->set_next_event = arch_timer_set_next_event_phys;
242 }
243
244 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
148 245
149 clockevents_config_and_register(clk, arch_timer_rate, 246 clockevents_config_and_register(clk, arch_timer_rate,
150 0xf, 0x7fffffff); 247 0xf, 0x7fffffff);
151 248
152 *__this_cpu_ptr(arch_timer_evt) = clk; 249 *__this_cpu_ptr(arch_timer_evt) = clk;
153 250
154 enable_percpu_irq(clk->irq, 0); 251 if (arch_timer_use_virtual)
155 if (arch_timer_ppi2) 252 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
156 enable_percpu_irq(arch_timer_ppi2, 0); 253 else {
254 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
255 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
256 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
257 }
157 258
158 return 0; 259 return 0;
159} 260}
@@ -173,8 +274,8 @@ static int arch_timer_available(void)
173 return -ENXIO; 274 return -ENXIO;
174 275
175 if (arch_timer_rate == 0) { 276 if (arch_timer_rate == 0) {
176 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); 277 freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS,
177 freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); 278 ARCH_TIMER_REG_FREQ);
178 279
179 /* Check the timer frequency. */ 280 /* Check the timer frequency. */
180 if (freq == 0) { 281 if (freq == 0) {
@@ -185,52 +286,57 @@ static int arch_timer_available(void)
185 arch_timer_rate = freq; 286 arch_timer_rate = freq;
186 } 287 }
187 288
188 pr_info_once("Architected local timer running at %lu.%02luMHz.\n", 289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
189 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); 290 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100,
291 arch_timer_use_virtual ? "virt" : "phys");
190 return 0; 292 return 0;
191} 293}
192 294
193static inline cycle_t arch_counter_get_cntpct(void) 295static u32 notrace arch_counter_get_cntpct32(void)
194{ 296{
195 u32 cvall, cvalh; 297 cycle_t cnt = arch_counter_get_cntpct();
196
197 asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
198 298
199 return ((cycle_t) cvalh << 32) | cvall; 299 /*
200} 300 * The sched_clock infrastructure only knows about counters
201 301 * with at most 32bits. Forget about the upper 24 bits for the
202static inline cycle_t arch_counter_get_cntvct(void) 302 * time being...
203{ 303 */
204 u32 cvall, cvalh; 304 return (u32)cnt;
205
206 asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
207
208 return ((cycle_t) cvalh << 32) | cvall;
209} 305}
210 306
211static u32 notrace arch_counter_get_cntvct32(void) 307static u32 notrace arch_counter_get_cntvct32(void)
212{ 308{
213 cycle_t cntvct = arch_counter_get_cntvct(); 309 cycle_t cnt = arch_counter_get_cntvct();
214 310
215 /* 311 /*
216 * The sched_clock infrastructure only knows about counters 312 * The sched_clock infrastructure only knows about counters
217 * with at most 32bits. Forget about the upper 24 bits for the 313 * with at most 32bits. Forget about the upper 24 bits for the
218 * time being... 314 * time being...
219 */ 315 */
220 return (u32)(cntvct & (u32)~0); 316 return (u32)cnt;
221} 317}
222 318
223static cycle_t arch_counter_read(struct clocksource *cs) 319static cycle_t arch_counter_read(struct clocksource *cs)
224{ 320{
321 /*
322 * Always use the physical counter for the clocksource.
323 * CNTHCTL.PL1PCTEN must be set to 1.
324 */
225 return arch_counter_get_cntpct(); 325 return arch_counter_get_cntpct();
226} 326}
227 327
228int read_current_timer(unsigned long *timer_val) 328static unsigned long arch_timer_read_current_timer(void)
229{ 329{
230 if (!arch_timer_rate) 330 return arch_counter_get_cntpct();
231 return -ENXIO; 331}
232 *timer_val = arch_counter_get_cntpct(); 332
233 return 0; 333static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
334{
335 /*
336 * Always use the physical counter for the clocksource.
337 * CNTHCTL.PL1PCTEN must be set to 1.
338 */
339 return arch_counter_get_cntpct();
234} 340}
235 341
236static struct clocksource clocksource_counter = { 342static struct clocksource clocksource_counter = {
@@ -241,14 +347,32 @@ static struct clocksource clocksource_counter = {
241 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 347 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
242}; 348};
243 349
350static struct cyclecounter cyclecounter = {
351 .read = arch_counter_read_cc,
352 .mask = CLOCKSOURCE_MASK(56),
353};
354
355static struct timecounter timecounter;
356
357struct timecounter *arch_timer_get_timecounter(void)
358{
359 return &timecounter;
360}
361
244static void __cpuinit arch_timer_stop(struct clock_event_device *clk) 362static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
245{ 363{
246 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 364 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
247 clk->irq, smp_processor_id()); 365 clk->irq, smp_processor_id());
248 disable_percpu_irq(clk->irq); 366
249 if (arch_timer_ppi2) 367 if (arch_timer_use_virtual)
250 disable_percpu_irq(arch_timer_ppi2); 368 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
251 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); 369 else {
370 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
371 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
372 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
373 }
374
375 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
252} 376}
253 377
254static struct local_timer_ops arch_timer_ops __cpuinitdata = { 378static struct local_timer_ops arch_timer_ops __cpuinitdata = {
@@ -261,36 +385,48 @@ static struct clock_event_device arch_timer_global_evt;
261static int __init arch_timer_register(void) 385static int __init arch_timer_register(void)
262{ 386{
263 int err; 387 int err;
388 int ppi;
264 389
265 err = arch_timer_available(); 390 err = arch_timer_available();
266 if (err) 391 if (err)
267 return err; 392 goto out;
268 393
269 arch_timer_evt = alloc_percpu(struct clock_event_device *); 394 arch_timer_evt = alloc_percpu(struct clock_event_device *);
270 if (!arch_timer_evt) 395 if (!arch_timer_evt) {
271 return -ENOMEM; 396 err = -ENOMEM;
397 goto out;
398 }
272 399
273 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 400 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
401 cyclecounter.mult = clocksource_counter.mult;
402 cyclecounter.shift = clocksource_counter.shift;
403 timecounter_init(&timecounter, &cyclecounter,
404 arch_counter_get_cntpct());
405
406 if (arch_timer_use_virtual) {
407 ppi = arch_timer_ppi[VIRT_PPI];
408 err = request_percpu_irq(ppi, arch_timer_handler_virt,
409 "arch_timer", arch_timer_evt);
410 } else {
411 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
412 err = request_percpu_irq(ppi, arch_timer_handler_phys,
413 "arch_timer", arch_timer_evt);
414 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
415 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
416 err = request_percpu_irq(ppi, arch_timer_handler_phys,
417 "arch_timer", arch_timer_evt);
418 if (err)
419 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
420 arch_timer_evt);
421 }
422 }
274 423
275 err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
276 "arch_timer", arch_timer_evt);
277 if (err) { 424 if (err) {
278 pr_err("arch_timer: can't register interrupt %d (%d)\n", 425 pr_err("arch_timer: can't register interrupt %d (%d)\n",
279 arch_timer_ppi, err); 426 ppi, err);
280 goto out_free; 427 goto out_free;
281 } 428 }
282 429
283 if (arch_timer_ppi2) {
284 err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
285 "arch_timer", arch_timer_evt);
286 if (err) {
287 pr_err("arch_timer: can't register interrupt %d (%d)\n",
288 arch_timer_ppi2, err);
289 arch_timer_ppi2 = 0;
290 goto out_free_irq;
291 }
292 }
293
294 err = local_timer_register(&arch_timer_ops); 430 err = local_timer_register(&arch_timer_ops);
295 if (err) { 431 if (err) {
296 /* 432 /*
@@ -302,21 +438,29 @@ static int __init arch_timer_register(void)
302 arch_timer_global_evt.cpumask = cpumask_of(0); 438 arch_timer_global_evt.cpumask = cpumask_of(0);
303 err = arch_timer_setup(&arch_timer_global_evt); 439 err = arch_timer_setup(&arch_timer_global_evt);
304 } 440 }
305
306 if (err) 441 if (err)
307 goto out_free_irq; 442 goto out_free_irq;
308 443
309 init_current_timer_delay(arch_timer_rate); 444 /* Use the architected timer for the delay loop. */
445 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
446 arch_delay_timer.freq = arch_timer_rate;
447 register_current_timer_delay(&arch_delay_timer);
310 return 0; 448 return 0;
311 449
312out_free_irq: 450out_free_irq:
313 free_percpu_irq(arch_timer_ppi, arch_timer_evt); 451 if (arch_timer_use_virtual)
314 if (arch_timer_ppi2) 452 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
315 free_percpu_irq(arch_timer_ppi2, arch_timer_evt); 453 else {
454 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
455 arch_timer_evt);
456 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
457 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
458 arch_timer_evt);
459 }
316 460
317out_free: 461out_free:
318 free_percpu(arch_timer_evt); 462 free_percpu(arch_timer_evt);
319 463out:
320 return err; 464 return err;
321} 465}
322 466
@@ -329,6 +473,7 @@ int __init arch_timer_of_register(void)
329{ 473{
330 struct device_node *np; 474 struct device_node *np;
331 u32 freq; 475 u32 freq;
476 int i;
332 477
333 np = of_find_matching_node(NULL, arch_timer_of_match); 478 np = of_find_matching_node(NULL, arch_timer_of_match);
334 if (!np) { 479 if (!np) {
@@ -340,22 +485,40 @@ int __init arch_timer_of_register(void)
340 if (!of_property_read_u32(np, "clock-frequency", &freq)) 485 if (!of_property_read_u32(np, "clock-frequency", &freq))
341 arch_timer_rate = freq; 486 arch_timer_rate = freq;
342 487
343 arch_timer_ppi = irq_of_parse_and_map(np, 0); 488 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
344 arch_timer_ppi2 = irq_of_parse_and_map(np, 1); 489 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
345 pr_info("arch_timer: found %s irqs %d %d\n", 490
346 np->name, arch_timer_ppi, arch_timer_ppi2); 491 /*
492 * If no interrupt provided for virtual timer, we'll have to
493 * stick to the physical timer. It'd better be accessible...
494 */
495 if (!arch_timer_ppi[VIRT_PPI]) {
496 arch_timer_use_virtual = false;
497
498 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
499 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
500 pr_warn("arch_timer: No interrupt available, giving up\n");
501 return -EINVAL;
502 }
503 }
347 504
348 return arch_timer_register(); 505 return arch_timer_register();
349} 506}
350 507
351int __init arch_timer_sched_clock_init(void) 508int __init arch_timer_sched_clock_init(void)
352{ 509{
510 u32 (*cnt32)(void);
353 int err; 511 int err;
354 512
355 err = arch_timer_available(); 513 err = arch_timer_available();
356 if (err) 514 if (err)
357 return err; 515 return err;
358 516
359 setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); 517 if (arch_timer_use_virtual)
518 cnt32 = arch_counter_get_cntvct32;
519 else
520 cnt32 = arch_counter_get_cntpct32;
521
522 setup_sched_clock(cnt32, 32, arch_timer_rate);
360 return 0; 523 return 0;
361} 524}
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 1429d8989fb9..c985b481192c 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -59,10 +59,12 @@ int main(void)
59 DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); 59 DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
60 DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); 60 DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
61 DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); 61 DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
62#ifdef CONFIG_VFP
62 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); 63 DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
63#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
64 DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); 65 DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
65#endif 66#endif
67#endif
66#ifdef CONFIG_ARM_THUMBEE 68#ifdef CONFIG_ARM_THUMBEE
67 DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); 69 DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
68#endif 70#endif
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index e5f028d214a1..9edc9692332d 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -3,3 +3,17 @@ extern void save_atags(struct tag *tags);
3#else 3#else
4static inline void save_atags(struct tag *tags) { } 4static inline void save_atags(struct tag *tags) { }
5#endif 5#endif
6
7void convert_to_tag_list(struct tag *tags);
8
9#ifdef CONFIG_ATAGS
10struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr);
11#else
12static inline struct machine_desc *
13setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
14{
15 early_print("no ATAGS support: can't continue\n");
16 while (true);
17 unreachable();
18}
19#endif
diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/atags_compat.c
index 925652318b8b..5236ad38f417 100644
--- a/arch/arm/kernel/compat.c
+++ b/arch/arm/kernel/atags_compat.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/kernel/compat.c 2 * linux/arch/arm/kernel/atags_compat.c
3 * 3 *
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * 5 *
@@ -26,7 +26,7 @@
26 26
27#include <asm/mach/arch.h> 27#include <asm/mach/arch.h>
28 28
29#include "compat.h" 29#include "atags.h"
30 30
31/* 31/*
32 * Usage: 32 * Usage:
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
new file mode 100644
index 000000000000..14512e6931d8
--- /dev/null
+++ b/arch/arm/kernel/atags_parse.c
@@ -0,0 +1,238 @@
1/*
2 * Tag parsing.
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * This is the traditional way of passing data to the kernel at boot time. Rather
13 * than passing a fixed inflexible structure to the kernel, we pass a list
14 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
15 * tag for the list to be recognised (to distinguish the tagged list from
16 * a param_struct). The list is terminated with a zero-length tag (this tag
17 * is not parsed in any way).
18 */
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/fs.h>
23#include <linux/root_dev.h>
24#include <linux/screen_info.h>
25
26#include <asm/setup.h>
27#include <asm/system_info.h>
28#include <asm/page.h>
29#include <asm/mach/arch.h>
30
31#include "atags.h"
32
33static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
34
35#ifndef MEM_SIZE
36#define MEM_SIZE (16*1024*1024)
37#endif
38
39static struct {
40 struct tag_header hdr1;
41 struct tag_core core;
42 struct tag_header hdr2;
43 struct tag_mem32 mem;
44 struct tag_header hdr3;
45} default_tags __initdata = {
46 { tag_size(tag_core), ATAG_CORE },
47 { 1, PAGE_SIZE, 0xff },
48 { tag_size(tag_mem32), ATAG_MEM },
49 { MEM_SIZE },
50 { 0, ATAG_NONE }
51};
52
53static int __init parse_tag_core(const struct tag *tag)
54{
55 if (tag->hdr.size > 2) {
56 if ((tag->u.core.flags & 1) == 0)
57 root_mountflags &= ~MS_RDONLY;
58 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
59 }
60 return 0;
61}
62
63__tagtable(ATAG_CORE, parse_tag_core);
64
65static int __init parse_tag_mem32(const struct tag *tag)
66{
67 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
68}
69
70__tagtable(ATAG_MEM, parse_tag_mem32);
71
72#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
73static int __init parse_tag_videotext(const struct tag *tag)
74{
75 screen_info.orig_x = tag->u.videotext.x;
76 screen_info.orig_y = tag->u.videotext.y;
77 screen_info.orig_video_page = tag->u.videotext.video_page;
78 screen_info.orig_video_mode = tag->u.videotext.video_mode;
79 screen_info.orig_video_cols = tag->u.videotext.video_cols;
80 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
81 screen_info.orig_video_lines = tag->u.videotext.video_lines;
82 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
83 screen_info.orig_video_points = tag->u.videotext.video_points;
84 return 0;
85}
86
87__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
88#endif
89
90#ifdef CONFIG_BLK_DEV_RAM
91static int __init parse_tag_ramdisk(const struct tag *tag)
92{
93 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
94
95 rd_image_start = tag->u.ramdisk.start;
96 rd_doload = (tag->u.ramdisk.flags & 1) == 0;
97 rd_prompt = (tag->u.ramdisk.flags & 2) == 0;
98
99 if (tag->u.ramdisk.size)
100 rd_size = tag->u.ramdisk.size;
101
102 return 0;
103}
104
105__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
106#endif
107
108static int __init parse_tag_serialnr(const struct tag *tag)
109{
110 system_serial_low = tag->u.serialnr.low;
111 system_serial_high = tag->u.serialnr.high;
112 return 0;
113}
114
115__tagtable(ATAG_SERIAL, parse_tag_serialnr);
116
117static int __init parse_tag_revision(const struct tag *tag)
118{
119 system_rev = tag->u.revision.rev;
120 return 0;
121}
122
123__tagtable(ATAG_REVISION, parse_tag_revision);
124
125static int __init parse_tag_cmdline(const struct tag *tag)
126{
127#if defined(CONFIG_CMDLINE_EXTEND)
128 strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
129 strlcat(default_command_line, tag->u.cmdline.cmdline,
130 COMMAND_LINE_SIZE);
131#elif defined(CONFIG_CMDLINE_FORCE)
132 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
133#else
134 strlcpy(default_command_line, tag->u.cmdline.cmdline,
135 COMMAND_LINE_SIZE);
136#endif
137 return 0;
138}
139
140__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
141
142/*
143 * Scan the tag table for this tag, and call its parse function.
144 * The tag table is built by the linker from all the __tagtable
145 * declarations.
146 */
147static int __init parse_tag(const struct tag *tag)
148{
149 extern struct tagtable __tagtable_begin, __tagtable_end;
150 struct tagtable *t;
151
152 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
153 if (tag->hdr.tag == t->tag) {
154 t->parse(tag);
155 break;
156 }
157
158 return t < &__tagtable_end;
159}
160
161/*
162 * Parse all tags in the list, checking both the global and architecture
163 * specific tag tables.
164 */
165static void __init parse_tags(const struct tag *t)
166{
167 for (; t->hdr.size; t = tag_next(t))
168 if (!parse_tag(t))
169 printk(KERN_WARNING
170 "Ignoring unrecognised tag 0x%08x\n",
171 t->hdr.tag);
172}
173
174static void __init squash_mem_tags(struct tag *tag)
175{
176 for (; tag->hdr.size; tag = tag_next(tag))
177 if (tag->hdr.tag == ATAG_MEM)
178 tag->hdr.tag = ATAG_NONE;
179}
180
181struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer,
182 unsigned int machine_nr)
183{
184 struct tag *tags = (struct tag *)&default_tags;
185 struct machine_desc *mdesc = NULL, *p;
186 char *from = default_command_line;
187
188 default_tags.mem.start = PHYS_OFFSET;
189
190 /*
191 * locate machine in the list of supported machines.
192 */
193 for_each_machine_desc(p)
194 if (machine_nr == p->nr) {
195 printk("Machine: %s\n", p->name);
196 mdesc = p;
197 break;
198 }
199
200 if (!mdesc) {
201 early_print("\nError: unrecognized/unsupported machine ID"
202 " (r1 = 0x%08x).\n\n", machine_nr);
203 dump_machine_table(); /* does not return */
204 }
205
206 if (__atags_pointer)
207 tags = phys_to_virt(__atags_pointer);
208 else if (mdesc->atag_offset)
209 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
210
211#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
212 /*
213 * If we have the old style parameters, convert them to
214 * a tag list.
215 */
216 if (tags->hdr.tag != ATAG_CORE)
217 convert_to_tag_list(tags);
218#endif
219 if (tags->hdr.tag != ATAG_CORE) {
220 early_print("Warning: Neither atags nor dtb found\n");
221 tags = (struct tag *)&default_tags;
222 }
223
224 if (mdesc->fixup)
225 mdesc->fixup(tags, &from, &meminfo);
226
227 if (tags->hdr.tag == ATAG_CORE) {
228 if (meminfo.nr_banks != 0)
229 squash_mem_tags(tags);
230 save_atags(tags);
231 parse_tags(tags);
232 }
233
234 /* parse_early_param needs a boot_command_line */
235 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
236
237 return mdesc;
238}
diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags_proc.c
index 42a1a1415fa6..42a1a1415fa6 100644
--- a/arch/arm/kernel/atags.c
+++ b/arch/arm/kernel/atags_proc.c
diff --git a/arch/arm/kernel/compat.h b/arch/arm/kernel/compat.h
deleted file mode 100644
index 39264ab1b9c6..000000000000
--- a/arch/arm/kernel/compat.h
+++ /dev/null
@@ -1,11 +0,0 @@
1/*
2 * linux/arch/arm/kernel/compat.h
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11extern void convert_to_tag_list(struct tag *tags);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 978eac57e04a..f45987037bf1 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -94,6 +94,15 @@ ENDPROC(ret_from_fork)
94 .equ NR_syscalls,0 94 .equ NR_syscalls,0
95#define CALL(x) .equ NR_syscalls,NR_syscalls+1 95#define CALL(x) .equ NR_syscalls,NR_syscalls+1
96#include "calls.S" 96#include "calls.S"
97
98/*
99 * Ensure that the system call table is equal to __NR_syscalls,
100 * which is the value the rest of the system sees
101 */
102.ifne NR_syscalls - __NR_syscalls
103.error "__NR_syscalls is not equal to the size of the syscall table"
104.endif
105
97#undef CALL 106#undef CALL
98#define CALL(x) .long x 107#define CALL(x) .long x
99 108
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index dfcdb9f7c126..e29c3337ca81 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -8,7 +8,9 @@
8#include <linux/reboot.h> 8#include <linux/reboot.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <linux/memblock.h>
11#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <linux/of_fdt.h>
12#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
13#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
14#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
@@ -32,6 +34,29 @@ static atomic_t waiting_for_crash_ipi;
32 34
33int machine_kexec_prepare(struct kimage *image) 35int machine_kexec_prepare(struct kimage *image)
34{ 36{
37 struct kexec_segment *current_segment;
38 __be32 header;
39 int i, err;
40
41 /*
42 * No segment at default ATAGs address. try to locate
43 * a dtb using magic.
44 */
45 for (i = 0; i < image->nr_segments; i++) {
46 current_segment = &image->segment[i];
47
48 err = memblock_is_region_memory(current_segment->mem,
49 current_segment->memsz);
50 if (err)
51 return - EINVAL;
52
53 err = get_user(header, (__be32*)current_segment->buf);
54 if (err)
55 return err;
56
57 if (be32_to_cpu(header) == OF_DT_HEADER)
58 kexec_boot_atags = current_segment->mem;
59 }
35 return 0; 60 return 0;
36} 61}
37 62
@@ -122,7 +147,9 @@ void machine_kexec(struct kimage *image)
122 kexec_start_address = image->start; 147 kexec_start_address = image->start;
123 kexec_indirection_page = page_list; 148 kexec_indirection_page = page_list;
124 kexec_mach_type = machine_arch_type; 149 kexec_mach_type = machine_arch_type;
125 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; 150 if (!kexec_boot_atags)
151 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
152
126 153
127 /* copy our kernel relocation code to the control code page */ 154 /* copy our kernel relocation code to the control code page */
128 memcpy(reboot_code_buffer, 155 memcpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 3e0fc5f7ed4b..739db3a1b2d2 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -30,6 +30,9 @@
30#include <asm/pgtable.h> 30#include <asm/pgtable.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32 32
33#define CREATE_TRACE_POINTS
34#include <trace/events/syscalls.h>
35
33#define REG_PC 15 36#define REG_PC 15
34#define REG_PSR 16 37#define REG_PSR 16
35/* 38/*
@@ -918,11 +921,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
918{ 921{
919 unsigned long ip; 922 unsigned long ip;
920 923
924 current_thread_info()->syscall = scno;
925
921 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 926 if (!test_thread_flag(TIF_SYSCALL_TRACE))
922 return scno; 927 return scno;
923 928
924 current_thread_info()->syscall = scno;
925
926 /* 929 /*
927 * IP is used to denote syscall entry/exit: 930 * IP is used to denote syscall entry/exit:
928 * IP = 0 -> entry, =1 -> exit 931 * IP = 0 -> entry, =1 -> exit
@@ -941,15 +944,19 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
941 944
942asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) 945asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
943{ 946{
944 int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); 947 scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER);
948 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
949 trace_sys_enter(regs, scno);
945 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, 950 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,
946 regs->ARM_r2, regs->ARM_r3); 951 regs->ARM_r2, regs->ARM_r3);
947 return ret; 952 return scno;
948} 953}
949 954
950asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) 955asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno)
951{ 956{
952 int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); 957 scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT);
958 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
959 trace_sys_exit(regs, scno);
953 audit_syscall_exit(regs); 960 audit_syscall_exit(regs);
954 return ret; 961 return scno;
955} 962}
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index f4515393248d..e21bac20d90d 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -9,6 +9,7 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/moduleparam.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/syscore_ops.h> 14#include <linux/syscore_ops.h>
14#include <linux/timer.h> 15#include <linux/timer.h>
@@ -27,6 +28,9 @@ struct clock_data {
27 28
28static void sched_clock_poll(unsigned long wrap_ticks); 29static void sched_clock_poll(unsigned long wrap_ticks);
29static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); 30static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
31static int irqtime = -1;
32
33core_param(irqtime, irqtime, int, 0400);
30 34
31static struct clock_data cd = { 35static struct clock_data cd = {
32 .mult = NSEC_PER_SEC / HZ, 36 .mult = NSEC_PER_SEC / HZ,
@@ -157,6 +161,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
157 */ 161 */
158 cd.epoch_ns = 0; 162 cd.epoch_ns = 0;
159 163
164 /* Enable IRQ time accounting if we have a fast enough sched_clock */
165 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
166 enable_sched_clock_irqtime();
167
160 pr_debug("Registered %pF as sched_clock source\n", read); 168 pr_debug("Registered %pF as sched_clock source\n", read);
161} 169}
162 170
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 725f9f2a9541..febafa0f552d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -21,11 +21,9 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23#include <linux/of_fdt.h> 23#include <linux/of_fdt.h>
24#include <linux/root_dev.h>
25#include <linux/cpu.h> 24#include <linux/cpu.h>
26#include <linux/interrupt.h> 25#include <linux/interrupt.h>
27#include <linux/smp.h> 26#include <linux/smp.h>
28#include <linux/fs.h>
29#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
30#include <linux/memblock.h> 28#include <linux/memblock.h>
31#include <linux/bug.h> 29#include <linux/bug.h>
@@ -56,15 +54,9 @@
56#include <asm/unwind.h> 54#include <asm/unwind.h>
57#include <asm/memblock.h> 55#include <asm/memblock.h>
58 56
59#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
60#include "compat.h"
61#endif
62#include "atags.h" 57#include "atags.h"
63#include "tcm.h" 58#include "tcm.h"
64 59
65#ifndef MEM_SIZE
66#define MEM_SIZE (16*1024*1024)
67#endif
68 60
69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70char fpe_type[8]; 62char fpe_type[8];
@@ -145,7 +137,6 @@ static const char *machine_name;
145static char __initdata cmd_line[COMMAND_LINE_SIZE]; 137static char __initdata cmd_line[COMMAND_LINE_SIZE];
146struct machine_desc *machine_desc __initdata; 138struct machine_desc *machine_desc __initdata;
147 139
148static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
149static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 140static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
150#define ENDIANNESS ((char)endian_test.l) 141#define ENDIANNESS ((char)endian_test.l)
151 142
@@ -583,21 +574,6 @@ static int __init early_mem(char *p)
583} 574}
584early_param("mem", early_mem); 575early_param("mem", early_mem);
585 576
586static void __init
587setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
588{
589#ifdef CONFIG_BLK_DEV_RAM
590 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
591
592 rd_image_start = image_start;
593 rd_prompt = prompt;
594 rd_doload = doload;
595
596 if (rd_sz)
597 rd_size = rd_sz;
598#endif
599}
600
601static void __init request_standard_resources(struct machine_desc *mdesc) 577static void __init request_standard_resources(struct machine_desc *mdesc)
602{ 578{
603 struct memblock_region *region; 579 struct memblock_region *region;
@@ -643,35 +619,6 @@ static void __init request_standard_resources(struct machine_desc *mdesc)
643 request_resource(&ioport_resource, &lp2); 619 request_resource(&ioport_resource, &lp2);
644} 620}
645 621
646/*
647 * Tag parsing.
648 *
649 * This is the new way of passing data to the kernel at boot time. Rather
650 * than passing a fixed inflexible structure to the kernel, we pass a list
651 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
652 * tag for the list to be recognised (to distinguish the tagged list from
653 * a param_struct). The list is terminated with a zero-length tag (this tag
654 * is not parsed in any way).
655 */
656static int __init parse_tag_core(const struct tag *tag)
657{
658 if (tag->hdr.size > 2) {
659 if ((tag->u.core.flags & 1) == 0)
660 root_mountflags &= ~MS_RDONLY;
661 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
662 }
663 return 0;
664}
665
666__tagtable(ATAG_CORE, parse_tag_core);
667
668static int __init parse_tag_mem32(const struct tag *tag)
669{
670 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
671}
672
673__tagtable(ATAG_MEM, parse_tag_mem32);
674
675#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 622#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
676struct screen_info screen_info = { 623struct screen_info screen_info = {
677 .orig_video_lines = 30, 624 .orig_video_lines = 30,
@@ -681,117 +628,8 @@ struct screen_info screen_info = {
681 .orig_video_isVGA = 1, 628 .orig_video_isVGA = 1,
682 .orig_video_points = 8 629 .orig_video_points = 8
683}; 630};
684
685static int __init parse_tag_videotext(const struct tag *tag)
686{
687 screen_info.orig_x = tag->u.videotext.x;
688 screen_info.orig_y = tag->u.videotext.y;
689 screen_info.orig_video_page = tag->u.videotext.video_page;
690 screen_info.orig_video_mode = tag->u.videotext.video_mode;
691 screen_info.orig_video_cols = tag->u.videotext.video_cols;
692 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
693 screen_info.orig_video_lines = tag->u.videotext.video_lines;
694 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
695 screen_info.orig_video_points = tag->u.videotext.video_points;
696 return 0;
697}
698
699__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
700#endif 631#endif
701 632
702static int __init parse_tag_ramdisk(const struct tag *tag)
703{
704 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
705 (tag->u.ramdisk.flags & 2) == 0,
706 tag->u.ramdisk.start, tag->u.ramdisk.size);
707 return 0;
708}
709
710__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
711
712static int __init parse_tag_serialnr(const struct tag *tag)
713{
714 system_serial_low = tag->u.serialnr.low;
715 system_serial_high = tag->u.serialnr.high;
716 return 0;
717}
718
719__tagtable(ATAG_SERIAL, parse_tag_serialnr);
720
721static int __init parse_tag_revision(const struct tag *tag)
722{
723 system_rev = tag->u.revision.rev;
724 return 0;
725}
726
727__tagtable(ATAG_REVISION, parse_tag_revision);
728
729static int __init parse_tag_cmdline(const struct tag *tag)
730{
731#if defined(CONFIG_CMDLINE_EXTEND)
732 strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
733 strlcat(default_command_line, tag->u.cmdline.cmdline,
734 COMMAND_LINE_SIZE);
735#elif defined(CONFIG_CMDLINE_FORCE)
736 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
737#else
738 strlcpy(default_command_line, tag->u.cmdline.cmdline,
739 COMMAND_LINE_SIZE);
740#endif
741 return 0;
742}
743
744__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
745
746/*
747 * Scan the tag table for this tag, and call its parse function.
748 * The tag table is built by the linker from all the __tagtable
749 * declarations.
750 */
751static int __init parse_tag(const struct tag *tag)
752{
753 extern struct tagtable __tagtable_begin, __tagtable_end;
754 struct tagtable *t;
755
756 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
757 if (tag->hdr.tag == t->tag) {
758 t->parse(tag);
759 break;
760 }
761
762 return t < &__tagtable_end;
763}
764
765/*
766 * Parse all tags in the list, checking both the global and architecture
767 * specific tag tables.
768 */
769static void __init parse_tags(const struct tag *t)
770{
771 for (; t->hdr.size; t = tag_next(t))
772 if (!parse_tag(t))
773 printk(KERN_WARNING
774 "Ignoring unrecognised tag 0x%08x\n",
775 t->hdr.tag);
776}
777
778/*
779 * This holds our defaults.
780 */
781static struct init_tags {
782 struct tag_header hdr1;
783 struct tag_core core;
784 struct tag_header hdr2;
785 struct tag_mem32 mem;
786 struct tag_header hdr3;
787} init_tags __initdata = {
788 { tag_size(tag_core), ATAG_CORE },
789 { 1, PAGE_SIZE, 0xff },
790 { tag_size(tag_mem32), ATAG_MEM },
791 { MEM_SIZE },
792 { 0, ATAG_NONE }
793};
794
795static int __init customize_machine(void) 633static int __init customize_machine(void)
796{ 634{
797 /* customizes platform devices, or adds new ones */ 635 /* customizes platform devices, or adds new ones */
@@ -858,78 +696,6 @@ static void __init reserve_crashkernel(void)
858static inline void reserve_crashkernel(void) {} 696static inline void reserve_crashkernel(void) {}
859#endif /* CONFIG_KEXEC */ 697#endif /* CONFIG_KEXEC */
860 698
861static void __init squash_mem_tags(struct tag *tag)
862{
863 for (; tag->hdr.size; tag = tag_next(tag))
864 if (tag->hdr.tag == ATAG_MEM)
865 tag->hdr.tag = ATAG_NONE;
866}
867
868static struct machine_desc * __init setup_machine_tags(unsigned int nr)
869{
870 struct tag *tags = (struct tag *)&init_tags;
871 struct machine_desc *mdesc = NULL, *p;
872 char *from = default_command_line;
873
874 init_tags.mem.start = PHYS_OFFSET;
875
876 /*
877 * locate machine in the list of supported machines.
878 */
879 for_each_machine_desc(p)
880 if (nr == p->nr) {
881 printk("Machine: %s\n", p->name);
882 mdesc = p;
883 break;
884 }
885
886 if (!mdesc) {
887 early_print("\nError: unrecognized/unsupported machine ID"
888 " (r1 = 0x%08x).\n\n", nr);
889 dump_machine_table(); /* does not return */
890 }
891
892 if (__atags_pointer)
893 tags = phys_to_virt(__atags_pointer);
894 else if (mdesc->atag_offset)
895 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
896
897#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
898 /*
899 * If we have the old style parameters, convert them to
900 * a tag list.
901 */
902 if (tags->hdr.tag != ATAG_CORE)
903 convert_to_tag_list(tags);
904#endif
905
906 if (tags->hdr.tag != ATAG_CORE) {
907#if defined(CONFIG_OF)
908 /*
909 * If CONFIG_OF is set, then assume this is a reasonably
910 * modern system that should pass boot parameters
911 */
912 early_print("Warning: Neither atags nor dtb found\n");
913#endif
914 tags = (struct tag *)&init_tags;
915 }
916
917 if (mdesc->fixup)
918 mdesc->fixup(tags, &from, &meminfo);
919
920 if (tags->hdr.tag == ATAG_CORE) {
921 if (meminfo.nr_banks != 0)
922 squash_mem_tags(tags);
923 save_atags(tags);
924 parse_tags(tags);
925 }
926
927 /* parse_early_param needs a boot_command_line */
928 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
929
930 return mdesc;
931}
932
933static int __init meminfo_cmp(const void *_a, const void *_b) 699static int __init meminfo_cmp(const void *_a, const void *_b)
934{ 700{
935 const struct membank *a = _a, *b = _b; 701 const struct membank *a = _a, *b = _b;
@@ -944,7 +710,7 @@ void __init setup_arch(char **cmdline_p)
944 setup_processor(); 710 setup_processor();
945 mdesc = setup_machine_fdt(__atags_pointer); 711 mdesc = setup_machine_fdt(__atags_pointer);
946 if (!mdesc) 712 if (!mdesc)
947 mdesc = setup_machine_tags(machine_arch_type); 713 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
948 machine_desc = mdesc; 714 machine_desc = mdesc;
949 machine_name = mdesc->name; 715 machine_name = mdesc->name;
950 716
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index dea7a925c7e2..d100eacdb798 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -59,7 +59,8 @@ struct secondary_data secondary_data;
59volatile int __cpuinitdata pen_release = -1; 59volatile int __cpuinitdata pen_release = -1;
60 60
61enum ipi_msg_type { 61enum ipi_msg_type {
62 IPI_TIMER = 2, 62 IPI_WAKEUP,
63 IPI_TIMER,
63 IPI_RESCHEDULE, 64 IPI_RESCHEDULE,
64 IPI_CALL_FUNC, 65 IPI_CALL_FUNC,
65 IPI_CALL_FUNC_SINGLE, 66 IPI_CALL_FUNC_SINGLE,
@@ -414,7 +415,8 @@ void arch_send_call_function_single_ipi(int cpu)
414} 415}
415 416
416static const char *ipi_types[NR_IPI] = { 417static const char *ipi_types[NR_IPI] = {
417#define S(x,s) [x - IPI_TIMER] = s 418#define S(x,s) [x] = s
419 S(IPI_WAKEUP, "CPU wakeup interrupts"),
418 S(IPI_TIMER, "Timer broadcast interrupts"), 420 S(IPI_TIMER, "Timer broadcast interrupts"),
419 S(IPI_RESCHEDULE, "Rescheduling interrupts"), 421 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
420 S(IPI_CALL_FUNC, "Function call interrupts"), 422 S(IPI_CALL_FUNC, "Function call interrupts"),
@@ -567,10 +569,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
567 unsigned int cpu = smp_processor_id(); 569 unsigned int cpu = smp_processor_id();
568 struct pt_regs *old_regs = set_irq_regs(regs); 570 struct pt_regs *old_regs = set_irq_regs(regs);
569 571
570 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) 572 if (ipinr < NR_IPI)
571 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); 573 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
572 574
573 switch (ipinr) { 575 switch (ipinr) {
576 case IPI_WAKEUP:
577 break;
578
574 case IPI_TIMER: 579 case IPI_TIMER:
575 irq_enter(); 580 irq_enter();
576 ipi_timer(); 581 ipi_timer();