From abf015f04614a3a7da43d35b55edc90189af4e6a Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 7 Nov 2011 19:04:36 +0000 Subject: Revert "ARM: 7098/1: kdump: copy kernel relocation code at the kexec prepare stage" This reverts commit 2b034922af2caa19df86f0c502e76cb6f6e910f9. Will Deacon reports: This is causing kexec to fail. The symptoms are that the .init.text section is not loaded as part of the new kernel image, so when we try to do the SMP/UP fixups we hit a whole sea of poison left there by the previous kernel. So my guess is that machine_kexec_prepare *is* too early for preparing the reboot_code_buffer and, unless anybody has a good reason not to, I'd like to revert the patch causing these problems. Reported-by: Will Deacon --- arch/arm/kernel/machine_kexec.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index c1b4463dcc83..e59bbd496c39 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -32,24 +32,6 @@ static atomic_t waiting_for_crash_ipi; int machine_kexec_prepare(struct kimage *image) { - unsigned long page_list; - void *reboot_code_buffer; - page_list = image->head & PAGE_MASK; - - reboot_code_buffer = page_address(image->control_code_page); - - /* Prepare parameters for reboot_code_buffer*/ - kexec_start_address = image->start; - kexec_indirection_page = page_list; - kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; - - /* copy our kernel relocation code to the control code page */ - memcpy(reboot_code_buffer, - relocate_new_kernel, relocate_new_kernel_size); - - flush_icache_range((unsigned long) reboot_code_buffer, - (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); return 0; } @@ -100,14 +82,31 @@ void (*kexec_reinit)(void); void machine_kexec(struct kimage *image) { + unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; + + page_list = image->head & PAGE_MASK; + /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); + /* Prepare parameters for reboot_code_buffer*/ + kexec_start_address = image->start; + kexec_indirection_page = page_list; + kexec_mach_type = machine_arch_type; + kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; + + /* copy our kernel relocation code to the control code page */ + memcpy(reboot_code_buffer, + relocate_new_kernel, relocate_new_kernel_size); + + + flush_icache_range((unsigned long) reboot_code_buffer, + (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) -- cgit v1.2.2 From 8428e84d42179c2a00f5f6450866e70d802d1d05 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 7 Nov 2011 18:05:53 +0100 Subject: ARM: 7150/1: Allow kernel unaligned accesses on ARMv6+ processors Recent gcc versions generate unaligned accesses by default on ARMv6 and later processors. This patch ensures that the SCTLR.A bit is always cleared on such processors to avoid kernel traping before alignment_init() is called. Signed-off-by: Catalin Marinas Tested-by: John Linn Acked-by: Nicolas Pitre Cc: stable@vger.kernel.org Signed-off-by: Russell King --- arch/arm/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 566c54c2a1fe..08c82fd844a8 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -360,7 +360,7 @@ __secondary_data: * r13 = *virtual* address to jump to upon completion */ __enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP +#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A -- cgit v1.2.2 From a34dbfb03f472ad3e063d3b1d1e88f7ef8f88bf1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 11 Nov 2011 11:35:58 +0100 Subject: ARM: 7160/1: setup: avoid overflowing {elf,arch}_name from proc_info_list setup_processor copies the arch_name and elf_name fields out of the selected proc_info_list into two fixed size buffers. Since the proc_info_list structure is defined in a proc_*.S assembly file, this can lead to subtle errors if the strings defined there are too long (for example, corrupting the machine ID). This patch uses snprintf instead of sprintf to ensure that these buffers are not overrun. Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7e7977ab994f..3448a3f9cc8c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -461,8 +461,10 @@ static void __init setup_processor(void) cpu_name, read_cpuid_id(), read_cpuid_id() & 15, proc_arch[cpu_architecture()], cr_alignment); - sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); - sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); + snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", + list->arch_name, ENDIANNESS); + snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", + list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; #ifndef CONFIG_ARM_THUMB elf_hwcap &= ~HWCAP_THUMB; -- cgit v1.2.2 From aa2bc1ade59003a379ffc485d6da2d92ea3370a6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 9 Nov 2011 17:56:37 +0100 Subject: perf: Don't use -ENOSPC for out of PMU resources People (Linus) objected to using -ENOSPC to signal not having enough resources on the PMU to satisfy the request. Use -EINVAL. Requested-by: Linus Torvalds Cc: Stephane Eranian Cc: Will Deacon Cc: Deng-Cheng Zhu Cc: David Daney Cc: Ralf Baechle Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-xv8geaz2zpbjhlx0svmpp28n@git.kernel.org [ merged to newer kernel, fixed up MIPS impact ] Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 24e2347be6b1..ff17b17b668e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -347,15 +347,15 @@ validate_group(struct perf_event *event) memset(&fake_pmu, 0, sizeof(fake_pmu)); if (!validate_event(&fake_pmu, leader)) - return -ENOSPC; + return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_pmu, sibling)) - return -ENOSPC; + return -EINVAL; } if (!validate_event(&fake_pmu, event)) - return -ENOSPC; + return -EINVAL; return 0; } -- cgit v1.2.2 From ec84d529b689f4ae31813cf74673f3271c36c83a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Nov 2011 16:12:52 +0000 Subject: ARM: PMU: re-export release_pmu symbol to modules Commit b0e89590 ("ARM: PMU: move CPU PMU platform device handling and init into perf") inadvertently removed the EXPORT_SYMBOL_GPL on release_pmu, so out-of-tree modules can no longer play nice with perf, even if they tried in the first place. This patch re-exports the symbol. Reported-by: Jon Medhurst (Tixy) Signed-off-by: Will Deacon --- arch/arm/kernel/pmu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c3407ee8576..2334bf8a650a 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type) { clear_bit_unlock(type, pmu_lock); } +EXPORT_SYMBOL_GPL(release_pmu); -- cgit v1.2.2 From bce34d14428d35d9a06ddc10cd46ecef311764c9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 17 Nov 2011 15:05:14 +0000 Subject: ARM: perf: initialise used_mask for fake PMU during validation When validating an event group, we call pmu->get_event_idx for each group member in order to check that the group can be scheduled as a unit on an empty PMU. As a result of 3fc2c830 ("ARM: perf: remove event limit from pmu_hw_events"), the used_mask member of struct cpu_hw_events must be setup explicitly, something which we don't do for the fake cpu_hw_events used for validation. This patch sets up an empty used_mask for the fake validation cpu_hw_events, preventing NULL deferences when trying to get the event index. Reported-by: Pawel Moll Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 24e2347be6b1..e508066d3d64 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -343,8 +343,14 @@ validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct pmu_hw_events fake_pmu; + DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); - memset(&fake_pmu, 0, sizeof(fake_pmu)); + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + memset(fake_used_mask, 0, sizeof(fake_used_mask)); + fake_pmu.used_mask = fake_used_mask; if (!validate_event(&fake_pmu, leader)) return -ENOSPC; -- cgit v1.2.2 From e5489847d6fc0ff176048b6e1cf5034507bf703a Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 17 Nov 2011 16:58:00 +0000 Subject: ARM: wire up process_vm_writev and process_vm_readv syscalls These two syscalls were introduced during the last merge window. Add the entries into the ARM call tables for them. Signed-off-by: Russell King --- arch/arm/kernel/calls.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9943e9e74a1b..463ff4a0ec8a 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -385,6 +385,8 @@ CALL(sys_syncfs) CALL(sys_sendmmsg) /* 375 */ CALL(sys_setns) + CALL(sys_process_vm_readv) + CALL(sys_process_vm_writev) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.2 From 11ed0ba1754841316d4095478944300acf19acc3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 14 Nov 2011 17:24:58 +0100 Subject: ARM: 7161/1: errata: no automatic store buffer drain This patch implements a workaround for PL310 erratum 769419. On revisions of the PL310 prior to r3p2, the Store Buffer does not automatically drain. This can cause normal, non-cacheable writes to be retained when the memory system is idle, leading to suboptimal I/O performance for drivers using coherent DMA. This patch adds an optional wmb() call to the cpu_idle loop. On systems with an outer cache, this causes an explicit flush of the store buffer. Cc: stable@vger.kernel.org Acked-by: Catalin Marinas Tested-by: Marc Zyngier Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/process.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 75316f0dd02a..3d0c6fb74ae4 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -192,6 +192,9 @@ void cpu_idle(void) #endif local_irq_disable(); +#ifdef CONFIG_PL310_ERRATA_769419 + wmb(); +#endif if (hlt_counter) { local_irq_enable(); cpu_relax(); -- cgit v1.2.2 From e5a21327644adba32816f74a415114d11c57f2e9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 22 Nov 2011 18:01:46 +0000 Subject: ARM: perf: check that we have a platform device when reserving PMU Attempting to use a hardware counter on a platform with a supported PMU but where the platform_device (defining the interrupts) has not been registered results in a NULL pointer dereference. This patch fixes the problem by checking that we actually have a platform device registered before attempting to grab the interrupts. Reported-by: Pawel Moll Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e508066d3d64..c475379199b1 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -402,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) int i, err, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; + if (!pmu_device) + return -ENODEV; + err = reserve_pmu(armpmu->type); if (err) { pr_warning("unable to reserve pmu\n"); -- cgit v1.2.2 From c89cefed35c8c5f53f36757f8a6768e390394ee3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 22 Nov 2011 23:42:12 +0100 Subject: ARM: 7170/2: fix compilation breakage in entry-armv.S Fix compilation failure, when Thumb support is not enabled: arch/arm/kernel/entry-armv.S: Assembler messages: arch/arm/kernel/entry-armv.S:501: Error: backward ref to unknown label "2:" arch/arm/kernel/entry-armv.S:502: Error: backward ref to unknown label "3:" make[2]: *** [arch/arm/kernel/entry-armv.o] Error 1 Signed-off-by: Guennadi Liakhovetski Reviewed-by: Dave Martin Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9ad50c4208ae..b145f16c91bc 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -497,7 +497,7 @@ ENDPROC(__und_usr) .popsection .pushsection __ex_table,"a" .long 1b, 4b -#if __LINUX_ARM_ARCH__ >= 7 +#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 .long 2b, 4b .long 3b, 4b #endif -- cgit v1.2.2 From 46589e2922509f9134ce79fa75180886c9765c58 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Thu, 24 Nov 2011 13:01:08 +0100 Subject: ARM: 7174/1: Fix build error in kprobes test code on Thumb2 kernels When compiling kprobes-test-thumb.c an error like below may occur: /tmp/ccKcuJcG.s:19179: Error: offset out of range This is caused by the compiler underestimating the size of the inline assembler instructions containing ".space 0x1000" and failing to spill the literal pool in time to prevent the generation of PC relative load instruction with invalid offsets. The fix implemented by this patch is to replace a single large .space directive by a number of 4 byte .space's. This requires splitting the macros which generate test cases for branch instructions into two forms: one with, and one without support for inserting extra code between branch and target. Acked-by: Nicolas Pitre Signed-off-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/kernel/kprobes-test-thumb.c | 16 +++--- arch/arm/kernel/kprobes-test.h | 100 ++++++++++++++++++++++++----------- 2 files changed, 78 insertions(+), 38 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c index 5e726c31c45a..5d8b85792222 100644 --- a/arch/arm/kernel/kprobes-test-thumb.c +++ b/arch/arm/kernel/kprobes-test-thumb.c @@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void) DONT_TEST_IN_ITBLOCK( TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbz r",2,-1,", 2f") - TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) - TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) + TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) + TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) ) TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r7, r",0, HH2,"") @@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK( TESTCASE_START(code) \ TEST_ARG_PTR(13, offset) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code,0) \ + TEST_BRANCH_F(code) \ TESTCASE_END TEST("push {r0}") @@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8, TEST_BF( "b 2f") TEST_BB( "b 2b") - TEST_BF_X("b 2f", 0x400) - TEST_BB_X("b 2b", 0x400) + TEST_BF_X("b 2f", SPACE_0x400) + TEST_BB_X("b 2b", SPACE_0x400) TEST_GROUP("Testing instructions in IT blocks") @@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22, TEST_BB("bne.w 2b") TEST_BF("bgt.w 2f") TEST_BB("blt.w 2b") - TEST_BF_X("bpl.w 2f",0x1000) + TEST_BF_X("bpl.w 2f", SPACE_0x1000) ) TEST_UNSUPPORTED("msr cpsr, r0") @@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22, TEST_BF( "b.w 2f") TEST_BB( "b.w 2b") - TEST_BF_X("b.w 2f", 0x1000) + TEST_BF_X("b.w 2f", SPACE_0x1000) TEST_BF( "bl.w 2f") TEST_BB( "bl.w 2b") - TEST_BB_X("bl.w 2b", 0x1000) + TEST_BB_X("bl.w 2b", SPACE_0x1000) TEST_X( "blx __dummy_arm_subroutine", ".arm \n\t" diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index 0dc5d77b9356..e28a869b1ae4 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h @@ -149,23 +149,31 @@ struct test_arg_end { "1: "instruction" \n\t" \ " nop \n\t" -#define TEST_BRANCH_F(instruction, xtra_dist) \ +#define TEST_BRANCH_F(instruction) \ TEST_INSTRUCTION(instruction) \ - ".if "#xtra_dist" \n\t" \ " b 99f \n\t" \ - ".space "#xtra_dist" \n\t" \ - ".endif \n\t" \ + "2: nop \n\t" + +#define TEST_BRANCH_B(instruction) \ + " b 50f \n\t" \ + " b 99f \n\t" \ + "2: nop \n\t" \ + " b 99f \n\t" \ + TEST_INSTRUCTION(instruction) + +#define TEST_BRANCH_FX(instruction, codex) \ + TEST_INSTRUCTION(instruction) \ + " b 99f \n\t" \ + codex" \n\t" \ " b 99f \n\t" \ "2: nop \n\t" -#define TEST_BRANCH_B(instruction, xtra_dist) \ +#define TEST_BRANCH_BX(instruction, codex) \ " b 50f \n\t" \ " b 99f \n\t" \ "2: nop \n\t" \ " b 99f \n\t" \ - ".if "#xtra_dist" \n\t" \ - ".space "#xtra_dist" \n\t" \ - ".endif \n\t" \ + codex" \n\t" \ TEST_INSTRUCTION(instruction) #define TESTCASE_END \ @@ -301,47 +309,60 @@ struct test_arg_end { TESTCASE_START(code1 #reg1 code2) \ TEST_ARG_PTR(reg1, val1) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg1 code2, 0) \ + TEST_BRANCH_F(code1 #reg1 code2) \ TESTCASE_END -#define TEST_BF_X(code, xtra_dist) \ +#define TEST_BF(code) \ TESTCASE_START(code) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code, xtra_dist) \ + TEST_BRANCH_F(code) \ TESTCASE_END -#define TEST_BB_X(code, xtra_dist) \ +#define TEST_BB(code) \ TESTCASE_START(code) \ TEST_ARG_END("") \ - TEST_BRANCH_B(code, xtra_dist) \ + TEST_BRANCH_B(code) \ TESTCASE_END -#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ - TESTCASE_START(code1 #reg code2) \ - TEST_ARG_REG(reg, val) \ - TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ +#define TEST_BF_R(code1, reg, val, code2) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_F(code1 #reg code2) \ TESTCASE_END -#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ - TESTCASE_START(code1 #reg code2) \ - TEST_ARG_REG(reg, val) \ - TEST_ARG_END("") \ - TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ +#define TEST_BB_R(code1, reg, val, code2) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_B(code1 #reg code2) \ TESTCASE_END -#define TEST_BF(code) TEST_BF_X(code, 0) -#define TEST_BB(code) TEST_BB_X(code, 0) - -#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0) -#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0) - #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ TEST_ARG_REG(reg1, val1) \ TEST_ARG_REG(reg2, val2) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ + TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \ + TESTCASE_END + +#define TEST_BF_X(code, codex) \ + TESTCASE_START(code) \ + TEST_ARG_END("") \ + TEST_BRANCH_FX(code, codex) \ + TESTCASE_END + +#define TEST_BB_X(code, codex) \ + TESTCASE_START(code) \ + TEST_ARG_END("") \ + TEST_BRANCH_BX(code, codex) \ + TESTCASE_END + +#define TEST_BF_RX(code1, reg, val, code2, codex) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_FX(code1 #reg code2, codex) \ TESTCASE_END #define TEST_X(code, codex) \ @@ -372,6 +393,25 @@ struct test_arg_end { TESTCASE_END +/* + * Macros for defining space directives spread over multiple lines. + * These are required so the compiler guesses better the length of inline asm + * code and will spill the literal pool early enough to avoid generating PC + * relative loads with out of range offsets. + */ +#define TWICE(x) x x +#define SPACE_0x8 TWICE(".space 4\n\t") +#define SPACE_0x10 TWICE(SPACE_0x8) +#define SPACE_0x20 TWICE(SPACE_0x10) +#define SPACE_0x40 TWICE(SPACE_0x20) +#define SPACE_0x80 TWICE(SPACE_0x40) +#define SPACE_0x100 TWICE(SPACE_0x80) +#define SPACE_0x200 TWICE(SPACE_0x100) +#define SPACE_0x400 TWICE(SPACE_0x200) +#define SPACE_0x800 TWICE(SPACE_0x400) +#define SPACE_0x1000 TWICE(SPACE_0x800) + + /* Various values used in test cases... */ #define N(val) (val ^ 0xffffffff) #define VAL1 0x12345678 -- cgit v1.2.2 From 14383c295ab48178c449336f5d74e9e615e36723 Mon Sep 17 00:00:00 2001 From: "Jon Medhurst (Tixy)" Date: Tue, 29 Nov 2011 08:14:35 +0100 Subject: ARM: 7180/1: Change kprobes testcase with unpredictable STRD instruction There is a kprobes testcase for the instruction "strd r2, [r3], r4". This has unpredictable behaviour as it uses r3 for register writeback addressing and also stores it to memory. On a cortex A9, this testcase would fail because the instruction writes the updated value of r3 to memory, whereas the kprobes emulation code writes the original value. Fix this by changing testcase to used r5 instead of r3. Reported-by: Leif Lindholm Tested-by: Leif Lindholm Acked-by: Nicolas Pitre Signed-off-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/kernel/kprobes-test-arm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index fc82de8bdcce..edf9ad8d3c6b 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -550,7 +550,7 @@ void kprobe_arm_test_cases(void) TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") - TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") + TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") -- cgit v1.2.2 From b5bed7fe801d1460424b7aeb6b06464e23d2a1e6 Mon Sep 17 00:00:00 2001 From: "Jon Medhurst (Tixy)" Date: Tue, 29 Nov 2011 08:16:02 +0100 Subject: ARM: 7181/1: Restrict kprobes probing SWP instructions to ARMv5 and below The SWP instruction is deprecated on ARMv6 and with ARMv7 it will be UNDEFINED when CONFIG_SWP_EMULATE is selected. In this case, probing a SWP instruction will cause an oops when the kprobes emulation code executes an undefined instruction. As the SWP instruction should be rare or non-existent in kernels for ARMv6 and later, we can simply avoid these problems by not allowing probing of these. Reported-by: Leif Lindholm Tested-by: Leif Lindholm Acked-by: Nicolas Pitre Signed-off-by: Jon Medhurst Signed-off-by: Russell King --- arch/arm/kernel/kprobes-arm.c | 4 +++- arch/arm/kernel/kprobes-test-arm.c | 25 ++++++++++++++++--------- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c index 9fe8910308af..8a30c89da70e 100644 --- a/arch/arm/kernel/kprobes-arm.c +++ b/arch/arm/kernel/kprobes-arm.c @@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = { static const union decode_item arm_cccc_0001_____1001_table[] = { /* Synchronization primitives */ +#if __LINUX_ARM_ARCH__ < 6 + /* Deprecated on ARMv6 and may be UNDEFINED on v7 */ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, REGS(NOPC, NOPC, 0, 0, NOPC)), - +#endif /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ /* And unallocated instructions... */ DECODE_END diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index edf9ad8d3c6b..ba32b393b3f0 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Synchronization primitives") - /* - * Use hard coded constants for SWP instructions to avoid warnings - * about deprecated instructions. - */ - TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") - TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") +#if __LINUX_ARM_ARCH__ < 6 + TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]") + TEST_R( "swpvs r0, r",1,VAL1,", [sp]") + TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]") +#else + TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]") + TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]") + TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]") +#endif TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") - TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") +#if __LINUX_ARM_ARCH__ < 6 + TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") + TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") +#else + TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]") + TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]") +#endif TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ -- cgit v1.2.2 From 4cbd6b167f9ed756ced970e0a95538f60ae3b9ab Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 29 Nov 2011 15:50:20 +0100 Subject: ARM: 7182/1: ARM cpu topology: fix warning kernel/sched.c:7354:2: warning: initialization from incompatible pointer type Align cpu_coregroup_mask prototype interface with sched_domain_mask_f typedef use int cpu instead of unsigned int cpu Cc: Signed-off-by: Vincent Guittot Signed-off-by: Russell King --- arch/arm/kernel/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 1040c00405d0..8200deaa14f6 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -43,7 +43,7 @@ struct cputopo_arm cpu_topology[NR_CPUS]; -const struct cpumask *cpu_coregroup_mask(unsigned int cpu) +const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_sibling; } -- cgit v1.2.2