diff options
-rw-r--r-- | arch/arm/kernel/entry-header.S | 13 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 7 |
3 files changed, 20 insertions, 10 deletions
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 4176df721bf0..1a0045abead7 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -253,21 +253,22 @@ | |||
253 | .endm | 253 | .endm |
254 | 254 | ||
255 | .macro restore_user_regs, fast = 0, offset = 0 | 255 | .macro restore_user_regs, fast = 0, offset = 0 |
256 | ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr | 256 | mov r2, sp |
257 | ldr lr, [sp, #\offset + S_PC]! @ get pc | 257 | ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr |
258 | ldr lr, [r2, #\offset + S_PC]! @ get pc | ||
258 | msr spsr_cxsf, r1 @ save in spsr_svc | 259 | msr spsr_cxsf, r1 @ save in spsr_svc |
259 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) | 260 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) |
260 | @ We must avoid clrex due to Cortex-A15 erratum #830321 | 261 | @ We must avoid clrex due to Cortex-A15 erratum #830321 |
261 | strex r1, r2, [sp] @ clear the exclusive monitor | 262 | strex r1, r2, [r2] @ clear the exclusive monitor |
262 | #endif | 263 | #endif |
263 | .if \fast | 264 | .if \fast |
264 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | 265 | ldmdb r2, {r1 - lr}^ @ get calling r1 - lr |
265 | .else | 266 | .else |
266 | ldmdb sp, {r0 - lr}^ @ get calling r0 - lr | 267 | ldmdb r2, {r0 - lr}^ @ get calling r0 - lr |
267 | .endif | 268 | .endif |
268 | mov r0, r0 @ ARMv5T and earlier require a nop | 269 | mov r0, r0 @ ARMv5T and earlier require a nop |
269 | @ after ldm {}^ | 270 | @ after ldm {}^ |
270 | add sp, sp, #S_FRAME_SIZE - S_PC | 271 | add sp, sp, #\offset + S_FRAME_SIZE |
271 | movs pc, lr @ return & move spsr_svc into cpsr | 272 | movs pc, lr @ return & move spsr_svc into cpsr |
272 | .endm | 273 | .endm |
273 | 274 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f7c65adaa428..557e128e4df0 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event) | |||
116 | ret = 1; | 116 | ret = 1; |
117 | } | 117 | } |
118 | 118 | ||
119 | if (left > (s64)armpmu->max_period) | 119 | /* |
120 | left = armpmu->max_period; | 120 | * Limit the maximum period to prevent the counter value |
121 | * from overtaking the one we are about to program. In | ||
122 | * effect we are reducing max_period to account for | ||
123 | * interrupt latency (and we are being very conservative). | ||
124 | */ | ||
125 | if (left > (armpmu->max_period >> 1)) | ||
126 | left = armpmu->max_period >> 1; | ||
121 | 127 | ||
122 | local64_set(&hwc->prev_count, (u64)-left); | 128 | local64_set(&hwc->prev_count, (u64)-left); |
123 | 129 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 715ae19bc7c8..e55408e96559 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size) | |||
657 | 657 | ||
658 | /* | 658 | /* |
659 | * Ensure that start/size are aligned to a page boundary. | 659 | * Ensure that start/size are aligned to a page boundary. |
660 | * Size is appropriately rounded down, start is rounded up. | 660 | * Size is rounded down, start is rounded up. |
661 | */ | 661 | */ |
662 | size -= start & ~PAGE_MASK; | ||
663 | aligned_start = PAGE_ALIGN(start); | 662 | aligned_start = PAGE_ALIGN(start); |
663 | if (aligned_start > start + size) | ||
664 | size = 0; | ||
665 | else | ||
666 | size -= aligned_start - start; | ||
664 | 667 | ||
665 | #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT | 668 | #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT |
666 | if (aligned_start > ULONG_MAX) { | 669 | if (aligned_start > ULONG_MAX) { |