aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/kspd.c
diff options
context:
space:
mode:
authorJulia Lawall <julia@diku.dk>2010-08-05 16:17:22 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-10-04 13:33:54 -0400
commit26deda5ceedbe28df4beb3b98e3fbce281b53a07 (patch)
tree663d766d5d848fb56b08b5ea625f2722ba50543e /arch/mips/kernel/kspd.c
parenteefc3f329d93404bfe1285d5b2f4380fede42e89 (diff)
MIPS: kspd: Adjust confusing if indentation
Indent the branch of an if. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // <smpl> @r disable braces4@ position p1,p2; statement S1,S2; @@ ( if (...) { ... } | if (...) S1@p1 S2@p2 ) @script:python@ p1 << r.p1; p2 << r.p2; @@ if (p1[0].column == p2[0].column): cocci.print_main("branch",p1) cocci.print_secs("after",p2) // </smpl> Signed-off-by: Julia Lawall <julia@diku.dk> To: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org To: kernel-janitors@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1539/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/kspd.c')
-rw-r--r--arch/mips/kernel/kspd.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 80e2ba694bab..29811f043399 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
251 memset(&tz, 0, sizeof(tz)); 251 memset(&tz, 0, sizeof(tz));
252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, 252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
253 (int)&tz, 0, 0)) == 0) 253 (int)&tz, 0, 0)) == 0)
254 ret.retval = tv.tv_sec; 254 ret.retval = tv.tv_sec;
255 break; 255 break;
256 256
257 case MTSP_SYSCALL_EXIT: 257 case MTSP_SYSCALL_EXIT:
span> * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_AVR32_BITOPS_H #define __ASM_AVR32_BITOPS_H #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <asm/byteorder.h> #include <asm/system.h> /* * clear_bit() doesn't provide any barrier for the compiler */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() /* * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long tmp; if (__builtin_constant_p(nr)) { asm volatile( "1: ssrf 5\n" " ld.w %0, %2\n" " sbr %0, %3\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p) : "m"(*p), "i"(nr) : "cc"); } else { unsigned long mask = 1UL << (nr % BITS_PER_LONG); asm volatile( "1: ssrf 5\n" " ld.w %0, %2\n" " or %0, %3\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p) : "m"(*p), "r"(mask) : "cc"); } } /* * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long tmp; if (__builtin_constant_p(nr)) { asm volatile( "1: ssrf 5\n" " ld.w %0, %2\n" " cbr %0, %3\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p) : "m"(*p), "i"(nr) : "cc"); } else { unsigned long mask = 1UL << (nr % BITS_PER_LONG); asm volatile( "1: ssrf 5\n" " ld.w %0, %2\n" " andn %0, %3\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p) : "m"(*p), "r"(mask) : "cc"); } } /* * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void change_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long mask = 1UL << (nr % BITS_PER_LONG); unsigned long tmp; asm volatile( "1: ssrf 5\n" " ld.w %0, %2\n" " eor %0, %3\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p) : "m"(*p), "r"(mask) : "cc"); } /* * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_set_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long mask = 1UL << (nr % BITS_PER_LONG); unsigned long tmp, old; if (__builtin_constant_p(nr)) { asm volatile( "1: ssrf 5\n" " ld.w %0, %3\n" " mov %2, %0\n" " sbr %0, %4\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p), "=&r"(old) : "m"(*p), "i"(nr) : "memory", "cc"); } else { asm volatile( "1: ssrf 5\n" " ld.w %2, %3\n" " or %0, %2, %4\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p), "=&r"(old) : "m"(*p), "r"(mask) : "memory", "cc"); } return (old & mask) != 0; } /* * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_clear_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long mask = 1UL << (nr % BITS_PER_LONG); unsigned long tmp, old; if (__builtin_constant_p(nr)) { asm volatile( "1: ssrf 5\n" " ld.w %0, %3\n" " mov %2, %0\n" " cbr %0, %4\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p), "=&r"(old) : "m"(*p), "i"(nr) : "memory", "cc"); } else { asm volatile( "1: ssrf 5\n" " ld.w %0, %3\n" " mov %2, %0\n" " andn %0, %4\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p), "=&r"(old) : "m"(*p), "r"(mask) : "memory", "cc"); } return (old & mask) != 0; } /* * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_change_bit(int nr, volatile void * addr) { unsigned long *p = ((unsigned long *)addr) + nr / BITS_PER_LONG; unsigned long mask = 1UL << (nr % BITS_PER_LONG); unsigned long tmp, old; asm volatile( "1: ssrf 5\n" " ld.w %2, %3\n" " eor %0, %2, %4\n" " stcond %1, %0\n" " brne 1b" : "=&r"(tmp), "=o"(*p), "=&r"(old) : "m"(*p), "r"(mask) : "memory", "cc"); return (old & mask) != 0; } #include <asm-generic/bitops/non-atomic.h> /* Find First bit Set */ static inline unsigned long __ffs(unsigned long word) { unsigned long result; asm("brev %1\n\t"