From a16ede35a2659170c855c5d267776666c0630f1f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 17:59:44 +0000 Subject: ARM: bitops: ensure set/clear/change bitops take a word-aligned pointer Add additional instructions to our assembly bitops functions to ensure that they only operate on word-aligned pointers. This will be necessary when we switch these operations to use the word-based exclusive operations. Signed-off-by: Russell King --- arch/arm/lib/bitops.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/lib/bitops.h') diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index d42252918bf..bd00551fb79 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,6 +1,8 @@ #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) .macro bitop, instr + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned mov r2, #1 and r3, r0, #7 @ Get bit offset add r1, r1, r0, lsr #3 @ Get byte offset @@ -14,6 +16,8 @@ .endm .macro testop, instr, store + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned and r3, r0, #7 @ Get bit offset mov r2, #1 add r1, r1, r0, lsr #3 @ Get byte offset @@ -32,6 +36,8 @@ .endm #else .macro bitop, instr + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned and r2, r0, #7 mov r3, #1 mov r3, r3, lsl r2 @@ -52,6 +58,8 @@ * to avoid dirtying the data cache. */ .macro testop, instr, store + ands ip, r1, #3 + strneb r1, [ip] @ assert word-aligned add r1, r1, r0, lsr #3 and r3, r0, #7 mov r0, #1 -- cgit v1.2.2 From 6323f0ccedf756dfe5f46549cec69a2d6d97937b Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 16 Jan 2011 18:02:17 +0000 Subject: ARM: bitops: switch set/clear/change bitops to use ldrex/strex Switch the set/clear/change bitops to use the word-based exclusive operations, which are only present in a wider range of ARM architectures than the byte-based exclusive operations. Tested record: - Nicolas Pitre: ext3,rw,le - Sourav Poddar: nfs,le - Will Deacon: ext3,rw,le - Tony Lindgren: ext3+nfs,le Reviewed-by: Nicolas Pitre Tested-by: Sourav Poddar Tested-by: Will Deacon Tested-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/lib/bitops.h | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'arch/arm/lib/bitops.h') diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index bd00551fb79..a9d9d152a75 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,15 +1,15 @@ - -#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K) +#if __LINUX_ARM_ARCH__ >= 6 .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned mov r2, #1 - and r3, r0, #7 @ Get bit offset - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 -1: ldrexb r2, [r1] +1: ldrex r2, [r1] \instr r2, r2, r3 - strexb r0, r2, [r1] + strex r0, r2, [r1] cmp r0, #0 bne 1b mov pc, lr @@ -18,15 +18,16 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r3, r0, #7 @ Get bit offset mov r2, #1 - add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #31 @ Get bit offset + mov r0, r0, lsr #5 + add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 @ create mask smp_dmb -1: ldrexb r2, [r1] +1: ldrex r2, [r1] ands r0, r2, r3 @ save old value of bit - \instr r2, r2, r3 @ toggle bit - strexb ip, r2, [r1] + \instr r2, r2, r3 @ toggle bit + strex ip, r2, [r1] cmp ip, #0 bne 1b smp_dmb @@ -38,13 +39,14 @@ .macro bitop, instr ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - and r2, r0, #7 + and r2, r0, #31 + mov r0, r0, lsr #5 mov r3, #1 mov r3, r3, lsl r2 save_and_disable_irqs ip - ldrb r2, [r1, r0, lsr #3] + ldr r2, [r1, r0, lsl #2] \instr r2, r2, r3 - strb r2, [r1, r0, lsr #3] + str r2, [r1, r0, lsl #2] restore_irqs ip mov pc, lr .endm @@ -60,11 +62,11 @@ .macro testop, instr, store ands ip, r1, #3 strneb r1, [ip] @ assert word-aligned - add r1, r1, r0, lsr #3 - and r3, r0, #7 - mov r0, #1 + and r3, r0, #31 + mov r0, r0, lsr #5 save_and_disable_irqs ip - ldrb r2, [r1] + ldr r2, [r1, r0, lsl #2]! + mov r0, #1 tst r2, r0, lsl r3 \instr r2, r2, r0, lsl r3 \store r2, [r1] -- cgit v1.2.2 From 3ba6e69ad887f8a814267ed36fd4bfbddf8855a9 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 8 Feb 2011 12:09:52 +0100 Subject: ARM: 6653/1: bitops: Use BX instead of MOV PC,LR The kernel doesn't officially need to interwork, but using BX wherever appropriate will help educate people into good assembler coding habits. BX is appropriate here because this code is predicated on __LINUX_ARM_ARCH__ >= 6 Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/lib/bitops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/lib/bitops.h') diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index a9d9d152a75..10d868a5a48 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -12,7 +12,7 @@ strex r0, r2, [r1] cmp r0, #0 bne 1b - mov pc, lr + bx lr .endm .macro testop, instr, store @@ -33,7 +33,7 @@ smp_dmb cmp r0, #0 movne r0, #1 -2: mov pc, lr +2: bx lr .endm #else .macro bitop, instr -- cgit v1.2.2