diff options
-rw-r--r-- | include/asm-x86/sync_bitops.h | 9 |
1 files changed, 1 insertions, 8 deletions
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index cbce08a2d135..6b775c905666 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h | |||
@@ -23,10 +23,6 @@ | |||
23 | * This function is atomic and may not be reordered. See __set_bit() | 23 | * This function is atomic and may not be reordered. See __set_bit() |
24 | * if you do not require the atomic guarantees. | 24 | * if you do not require the atomic guarantees. |
25 | * | 25 | * |
26 | * Note: there are no guarantees that this function will not be reordered | ||
27 | * on non-x86 architectures, so if you are writing portable code, | ||
28 | * make sure not to rely on its reordering guarantees. | ||
29 | * | ||
30 | * Note that @nr may be almost arbitrarily large; this function is not | 26 | * Note that @nr may be almost arbitrarily large; this function is not |
31 | * restricted to acting on a single-word quantity. | 27 | * restricted to acting on a single-word quantity. |
32 | */ | 28 | */ |
@@ -61,8 +57,7 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | |||
61 | * @nr: Bit to change | 57 | * @nr: Bit to change |
62 | * @addr: Address to start counting from | 58 | * @addr: Address to start counting from |
63 | * | 59 | * |
64 | * change_bit() is atomic and may not be reordered. It may be | 60 | * sync_change_bit() is atomic and may not be reordered. |
65 | * reordered on other architectures than x86. | ||
66 | * Note that @nr may be almost arbitrarily large; this function is not | 61 | * Note that @nr may be almost arbitrarily large; this function is not |
67 | * restricted to acting on a single-word quantity. | 62 | * restricted to acting on a single-word quantity. |
68 | */ | 63 | */ |
@@ -80,7 +75,6 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) | |||
80 | * @addr: Address to count from | 75 | * @addr: Address to count from |
81 | * | 76 | * |
82 | * This operation is atomic and cannot be reordered. | 77 | * This operation is atomic and cannot be reordered. |
83 | * It may be reordered on other architectures than x86. | ||
84 | * It also implies a memory barrier. | 78 | * It also implies a memory barrier. |
85 | */ | 79 | */ |
86 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | 80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) |
@@ -99,7 +93,6 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | |||
99 | * @addr: Address to count from | 93 | * @addr: Address to count from |
100 | * | 94 | * |
101 | * This operation is atomic and cannot be reordered. | 95 | * This operation is atomic and cannot be reordered. |
102 | * It can be reorderdered on other architectures other than x86. | ||
103 | * It also implies a memory barrier. | 96 | * It also implies a memory barrier. |
104 | */ | 97 | */ |
105 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | 98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) |