aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/bitops.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 12:58:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 12:58:09 -0500
commit16e024f30ce96ef5fa651e2914e19d175a924cab (patch)
treed68106151a0b36e22625d7af7b23081a48c92e87 /arch/powerpc/include/asm/bitops.h
parentc36e0501ee91d7616a188efbf9714b1fce150032 (diff)
parent376bddd34433065aeb9b9a140870537feecf90ef (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc update from Benjamin Herrenschmidt: "The main highlight is probably some base POWER8 support. There's more to come such as transactional memory support but that will wait for the next one. Overall it's pretty quiet, or rather I've been pretty poor at picking things up from patchwork and reviewing them this time around and Kumar no better on the FSL side it seems..." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (73 commits) powerpc+of: Rename and fix OF reconfig notifier error inject module powerpc: mpc5200: Add a3m071 board support powerpc/512x: don't compile any platform DIU code if the DIU is not enabled powerpc/mpc52xx: use module_platform_driver macro powerpc+of: Export of_reconfig_notifier_[register,unregister] powerpc/dma/raidengine: add raidengine device powerpc/iommu/fsl: Add PAMU bypass enable register to ccsr_guts struct powerpc/mpc85xx: Change spin table to cached memory powerpc/fsl-pci: Add PCI controller ATMU PM support powerpc/86xx: fsl_pcibios_fixup_bus requires CONFIG_PCI drivers/virt: the Freescale hypervisor driver doesn't need to check MSR[GS] powerpc/85xx: p1022ds: Use NULL instead of 0 for pointers powerpc: Disable relocation on exceptions when kexecing powerpc: Enable relocation on during exceptions at boot powerpc: Move get_longbusy_msecs into hvcall.h and remove duplicate function powerpc: Add wrappers to enable/disable relocation on exceptions powerpc: Add set_mode hcall powerpc: Setup relocation on exceptions for bare metal systems powerpc: Move initial mfspr LPCR out of __init_LPCR powerpc: Add relocation on exception vector handlers ...
Diffstat (limited to 'arch/powerpc/include/asm/bitops.h')
-rw-r--r--arch/powerpc/include/asm/bitops.h75
1 files changed, 10 insertions, 65 deletions
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index dc2cf9c6d9e6..ef918a2328bb 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
52#define smp_mb__before_clear_bit() smp_mb() 52#define smp_mb__before_clear_bit() smp_mb()
53#define smp_mb__after_clear_bit() smp_mb() 53#define smp_mb__after_clear_bit() smp_mb()
54 54
55#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
56#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
57#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 55#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
58 56
59/* Macro for generating the ***_bits() functions */ 57/* Macro for generating the ***_bits() functions */
@@ -83,22 +81,22 @@ DEFINE_BITOP(change_bits, xor, "", "")
83 81
84static __inline__ void set_bit(int nr, volatile unsigned long *addr) 82static __inline__ void set_bit(int nr, volatile unsigned long *addr)
85{ 83{
86 set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); 84 set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
87} 85}
88 86
89static __inline__ void clear_bit(int nr, volatile unsigned long *addr) 87static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
90{ 88{
91 clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); 89 clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
92} 90}
93 91
94static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr) 92static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
95{ 93{
96 clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr)); 94 clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
97} 95}
98 96
99static __inline__ void change_bit(int nr, volatile unsigned long *addr) 97static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100{ 98{
101 change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)); 99 change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
102} 100}
103 101
104/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output 102/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
@@ -136,26 +134,26 @@ DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
136static __inline__ int test_and_set_bit(unsigned long nr, 134static __inline__ int test_and_set_bit(unsigned long nr,
137 volatile unsigned long *addr) 135 volatile unsigned long *addr)
138{ 136{
139 return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; 137 return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
140} 138}
141 139
142static __inline__ int test_and_set_bit_lock(unsigned long nr, 140static __inline__ int test_and_set_bit_lock(unsigned long nr,
143 volatile unsigned long *addr) 141 volatile unsigned long *addr)
144{ 142{
145 return test_and_set_bits_lock(BITOP_MASK(nr), 143 return test_and_set_bits_lock(BIT_MASK(nr),
146 addr + BITOP_WORD(nr)) != 0; 144 addr + BIT_WORD(nr)) != 0;
147} 145}
148 146
149static __inline__ int test_and_clear_bit(unsigned long nr, 147static __inline__ int test_and_clear_bit(unsigned long nr,
150 volatile unsigned long *addr) 148 volatile unsigned long *addr)
151{ 149{
152 return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; 150 return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
153} 151}
154 152
155static __inline__ int test_and_change_bit(unsigned long nr, 153static __inline__ int test_and_change_bit(unsigned long nr,
156 volatile unsigned long *addr) 154 volatile unsigned long *addr)
157{ 155{
158 return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0; 156 return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
159} 157}
160 158
161#include <asm-generic/bitops/non-atomic.h> 159#include <asm-generic/bitops/non-atomic.h>
@@ -280,61 +278,8 @@ unsigned long __arch_hweight64(__u64 w);
280#include <asm-generic/bitops/find.h> 278#include <asm-generic/bitops/find.h>
281 279
282/* Little-endian versions */ 280/* Little-endian versions */
281#include <asm-generic/bitops/le.h>
283 282
284static __inline__ int test_bit_le(unsigned long nr,
285 __const__ void *addr)
286{
287 __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
288 return (tmp[nr >> 3] >> (nr & 7)) & 1;
289}
290
291static inline void set_bit_le(int nr, void *addr)
292{
293 set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
294}
295
296static inline void clear_bit_le(int nr, void *addr)
297{
298 clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
299}
300
301static inline void __set_bit_le(int nr, void *addr)
302{
303 __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
304}
305
306static inline void __clear_bit_le(int nr, void *addr)
307{
308 __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
309}
310
311static inline int test_and_set_bit_le(int nr, void *addr)
312{
313 return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
314}
315
316static inline int test_and_clear_bit_le(int nr, void *addr)
317{
318 return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
319}
320
321static inline int __test_and_set_bit_le(int nr, void *addr)
322{
323 return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
324}
325
326static inline int __test_and_clear_bit_le(int nr, void *addr)
327{
328 return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
329}
330
331#define find_first_zero_bit_le(addr, size) \
332 find_next_zero_bit_le((addr), (size), 0)
333unsigned long find_next_zero_bit_le(const void *addr,
334 unsigned long size, unsigned long offset);
335
336unsigned long find_next_bit_le(const void *addr,
337 unsigned long size, unsigned long offset);
338/* Bitmap functions for the ext2 filesystem */ 283/* Bitmap functions for the ext2 filesystem */
339 284
340#include <asm-generic/bitops/ext2-atomic-setbit.h> 285#include <asm-generic/bitops/ext2-atomic-setbit.h>