aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h39
-rw-r--r--include/asm-generic/bitops/find.h39
-rw-r--r--include/asm-generic/cmpxchg-local.h1
-rw-r--r--include/asm-generic/fcntl.h2
-rw-r--r--include/asm-generic/hardirq.h2
-rw-r--r--include/asm-generic/io.h26
-rw-r--r--include/asm-generic/irqflags.h52
-rw-r--r--include/asm-generic/kdebug.h2
-rw-r--r--include/asm-generic/percpu.h14
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-generic/system.h20
-rw-r--r--include/asm-generic/vmlinux.lds.h14
12 files changed, 138 insertions, 77 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e53347fbf1da..e994197f84b7 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -43,6 +43,7 @@
43 */ 43 */
44#define atomic_set(v, i) (((v)->counter) = (i)) 44#define atomic_set(v, i) (((v)->counter) = (i))
45 45
46#include <linux/irqflags.h>
46#include <asm/system.h> 47#include <asm/system.h>
47 48
48/** 49/**
@@ -57,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
57 unsigned long flags; 58 unsigned long flags;
58 int temp; 59 int temp;
59 60
60 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 61 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
61 temp = v->counter; 62 temp = v->counter;
62 temp += i; 63 temp += i;
63 v->counter = temp; 64 v->counter = temp;
@@ -78,7 +79,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
78 unsigned long flags; 79 unsigned long flags;
79 int temp; 80 int temp;
80 81
81 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 82 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
82 temp = v->counter; 83 temp = v->counter;
83 temp -= i; 84 temp -= i;
84 v->counter = temp; 85 v->counter = temp;
@@ -119,14 +120,23 @@ static inline void atomic_dec(atomic_t *v)
119#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 120#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
120#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 121#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
121 122
122#define atomic_add_unless(v, a, u) \ 123#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
123({ \ 124#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
124 int c, old; \ 125
125 c = atomic_read(v); \ 126#define cmpxchg_local(ptr, o, n) \
126 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 127 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
127 c = old; \ 128 (unsigned long)(n), sizeof(*(ptr))))
128 c != (u); \ 129
129}) 130#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
131
132static inline int atomic_add_unless(atomic_t *v, int a, int u)
133{
134 int c, old;
135 c = atomic_read(v);
136 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
137 c = old;
138 return c != u;
139}
130 140
131#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 141#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
132 142
@@ -140,15 +150,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
140 raw_local_irq_restore(flags); 150 raw_local_irq_restore(flags);
141} 151}
142 152
143#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
144#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
145
146#define cmpxchg_local(ptr, o, n) \
147 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
148 (unsigned long)(n), sizeof(*(ptr))))
149
150#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
151
152/* Assume that atomic operations are already serializing */ 153/* Assume that atomic operations are already serializing */
153#define smp_mb__before_atomic_dec() barrier() 154#define smp_mb__before_atomic_dec() barrier()
154#define smp_mb__after_atomic_dec() barrier() 155#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 1914e9742512..110fa700f853 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,15 +1,50 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_ 1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_ 2#define _ASM_GENERIC_BITOPS_FIND_H_
3 3
4#ifndef CONFIG_GENERIC_FIND_NEXT_BIT 4/**
5 * find_next_bit - find the next set bit in a memory region
6 * @addr: The address to base the search on
7 * @offset: The bitnumber to start searching at
8 * @size: The bitmap size in bits
9 */
5extern unsigned long find_next_bit(const unsigned long *addr, unsigned long 10extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
6 size, unsigned long offset); 11 size, unsigned long offset);
7 12
13/**
14 * find_next_zero_bit - find the next cleared bit in a memory region
15 * @addr: The address to base the search on
16 * @offset: The bitnumber to start searching at
17 * @size: The bitmap size in bits
18 */
8extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned 19extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
9 long size, unsigned long offset); 20 long size, unsigned long offset);
10#endif 21
22#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
23
24/**
25 * find_first_bit - find the first set bit in a memory region
26 * @addr: The address to start the search at
27 * @size: The maximum size to search
28 *
29 * Returns the bit number of the first set bit.
30 */
31extern unsigned long find_first_bit(const unsigned long *addr,
32 unsigned long size);
33
34/**
35 * find_first_zero_bit - find the first cleared bit in a memory region
36 * @addr: The address to start the search at
37 * @size: The maximum size to search
38 *
39 * Returns the bit number of the first cleared bit.
40 */
41extern unsigned long find_first_zero_bit(const unsigned long *addr,
42 unsigned long size);
43#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
11 44
12#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) 45#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
13#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 46#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
14 47
48#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
49
15#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ 50#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index b2ba2fc8829a..2533fddd34a6 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -2,6 +2,7 @@
2#define __ASM_GENERIC_CMPXCHG_LOCAL_H 2#define __ASM_GENERIC_CMPXCHG_LOCAL_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/irqflags.h>
5 6
6extern unsigned long wrong_size_cmpxchg(volatile void *ptr); 7extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
7 8
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index a70b2d2bfc14..0fc16e3f0bfc 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -122,7 +122,7 @@
122 122
123struct f_owner_ex { 123struct f_owner_ex {
124 int type; 124 int type;
125 pid_t pid; 125 __kernel_pid_t pid;
126}; 126};
127 127
128/* for F_[GET|SET]FL */ 128/* for F_[GET|SET]FL */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 62f59080e5cc..04d0a977cd43 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -3,13 +3,13 @@
3 3
4#include <linux/cache.h> 4#include <linux/cache.h>
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <linux/irq.h>
7 6
8typedef struct { 7typedef struct {
9 unsigned int __softirq_pending; 8 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t; 9} ____cacheline_aligned irq_cpustat_t;
11 10
12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 11#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
12#include <linux/irq.h>
13 13
14#ifndef ack_bad_irq 14#ifndef ack_bad_irq
15static inline void ack_bad_irq(unsigned int irq) 15static inline void ack_bad_irq(unsigned int irq)
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 118601fce92d..3577ca11a0be 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -19,7 +19,9 @@
19#include <asm-generic/iomap.h> 19#include <asm-generic/iomap.h>
20#endif 20#endif
21 21
22#ifndef mmiowb
22#define mmiowb() do {} while (0) 23#define mmiowb() do {} while (0)
24#endif
23 25
24/*****************************************************************************/ 26/*****************************************************************************/
25/* 27/*
@@ -28,39 +30,51 @@
28 * differently. On the simple architectures, we just read/write the 30 * differently. On the simple architectures, we just read/write the
29 * memory location directly. 31 * memory location directly.
30 */ 32 */
33#ifndef __raw_readb
31static inline u8 __raw_readb(const volatile void __iomem *addr) 34static inline u8 __raw_readb(const volatile void __iomem *addr)
32{ 35{
33 return *(const volatile u8 __force *) addr; 36 return *(const volatile u8 __force *) addr;
34} 37}
38#endif
35 39
40#ifndef __raw_readw
36static inline u16 __raw_readw(const volatile void __iomem *addr) 41static inline u16 __raw_readw(const volatile void __iomem *addr)
37{ 42{
38 return *(const volatile u16 __force *) addr; 43 return *(const volatile u16 __force *) addr;
39} 44}
45#endif
40 46
47#ifndef __raw_readl
41static inline u32 __raw_readl(const volatile void __iomem *addr) 48static inline u32 __raw_readl(const volatile void __iomem *addr)
42{ 49{
43 return *(const volatile u32 __force *) addr; 50 return *(const volatile u32 __force *) addr;
44} 51}
52#endif
45 53
46#define readb __raw_readb 54#define readb __raw_readb
47#define readw(addr) __le16_to_cpu(__raw_readw(addr)) 55#define readw(addr) __le16_to_cpu(__raw_readw(addr))
48#define readl(addr) __le32_to_cpu(__raw_readl(addr)) 56#define readl(addr) __le32_to_cpu(__raw_readl(addr))
49 57
58#ifndef __raw_writeb
50static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 59static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
51{ 60{
52 *(volatile u8 __force *) addr = b; 61 *(volatile u8 __force *) addr = b;
53} 62}
63#endif
54 64
65#ifndef __raw_writew
55static inline void __raw_writew(u16 b, volatile void __iomem *addr) 66static inline void __raw_writew(u16 b, volatile void __iomem *addr)
56{ 67{
57 *(volatile u16 __force *) addr = b; 68 *(volatile u16 __force *) addr = b;
58} 69}
70#endif
59 71
72#ifndef __raw_writel
60static inline void __raw_writel(u32 b, volatile void __iomem *addr) 73static inline void __raw_writel(u32 b, volatile void __iomem *addr)
61{ 74{
62 *(volatile u32 __force *) addr = b; 75 *(volatile u32 __force *) addr = b;
63} 76}
77#endif
64 78
65#define writeb __raw_writeb 79#define writeb __raw_writeb
66#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) 80#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
@@ -122,6 +136,7 @@ static inline void outl(u32 b, unsigned long addr)
122#define outw_p(x, addr) outw((x), (addr)) 136#define outw_p(x, addr) outw((x), (addr))
123#define outl_p(x, addr) outl((x), (addr)) 137#define outl_p(x, addr) outl((x), (addr))
124 138
139#ifndef insb
125static inline void insb(unsigned long addr, void *buffer, int count) 140static inline void insb(unsigned long addr, void *buffer, int count)
126{ 141{
127 if (count) { 142 if (count) {
@@ -132,7 +147,9 @@ static inline void insb(unsigned long addr, void *buffer, int count)
132 } while (--count); 147 } while (--count);
133 } 148 }
134} 149}
150#endif
135 151
152#ifndef insw
136static inline void insw(unsigned long addr, void *buffer, int count) 153static inline void insw(unsigned long addr, void *buffer, int count)
137{ 154{
138 if (count) { 155 if (count) {
@@ -143,7 +160,9 @@ static inline void insw(unsigned long addr, void *buffer, int count)
143 } while (--count); 160 } while (--count);
144 } 161 }
145} 162}
163#endif
146 164
165#ifndef insl
147static inline void insl(unsigned long addr, void *buffer, int count) 166static inline void insl(unsigned long addr, void *buffer, int count)
148{ 167{
149 if (count) { 168 if (count) {
@@ -154,7 +173,9 @@ static inline void insl(unsigned long addr, void *buffer, int count)
154 } while (--count); 173 } while (--count);
155 } 174 }
156} 175}
176#endif
157 177
178#ifndef outsb
158static inline void outsb(unsigned long addr, const void *buffer, int count) 179static inline void outsb(unsigned long addr, const void *buffer, int count)
159{ 180{
160 if (count) { 181 if (count) {
@@ -164,7 +185,9 @@ static inline void outsb(unsigned long addr, const void *buffer, int count)
164 } while (--count); 185 } while (--count);
165 } 186 }
166} 187}
188#endif
167 189
190#ifndef outsw
168static inline void outsw(unsigned long addr, const void *buffer, int count) 191static inline void outsw(unsigned long addr, const void *buffer, int count)
169{ 192{
170 if (count) { 193 if (count) {
@@ -174,7 +197,9 @@ static inline void outsw(unsigned long addr, const void *buffer, int count)
174 } while (--count); 197 } while (--count);
175 } 198 }
176} 199}
200#endif
177 201
202#ifndef outsl
178static inline void outsl(unsigned long addr, const void *buffer, int count) 203static inline void outsl(unsigned long addr, const void *buffer, int count)
179{ 204{
180 if (count) { 205 if (count) {
@@ -184,6 +209,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
184 } while (--count); 209 } while (--count);
185 } 210 }
186} 211}
212#endif
187 213
188#ifndef CONFIG_GENERIC_IOMAP 214#ifndef CONFIG_GENERIC_IOMAP
189#define ioread8(addr) readb(addr) 215#define ioread8(addr) readb(addr)
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
index 9aebf618275a..1f40d0024cf3 100644
--- a/include/asm-generic/irqflags.h
+++ b/include/asm-generic/irqflags.h
@@ -5,68 +5,62 @@
5 * All architectures should implement at least the first two functions, 5 * All architectures should implement at least the first two functions,
6 * usually inline assembly will be the best way. 6 * usually inline assembly will be the best way.
7 */ 7 */
8#ifndef RAW_IRQ_DISABLED 8#ifndef ARCH_IRQ_DISABLED
9#define RAW_IRQ_DISABLED 0 9#define ARCH_IRQ_DISABLED 0
10#define RAW_IRQ_ENABLED 1 10#define ARCH_IRQ_ENABLED 1
11#endif 11#endif
12 12
13/* read interrupt enabled status */ 13/* read interrupt enabled status */
14#ifndef __raw_local_save_flags 14#ifndef arch_local_save_flags
15unsigned long __raw_local_save_flags(void); 15unsigned long arch_local_save_flags(void);
16#endif 16#endif
17 17
18/* set interrupt enabled status */ 18/* set interrupt enabled status */
19#ifndef raw_local_irq_restore 19#ifndef arch_local_irq_restore
20void raw_local_irq_restore(unsigned long flags); 20void arch_local_irq_restore(unsigned long flags);
21#endif 21#endif
22 22
23/* get status and disable interrupts */ 23/* get status and disable interrupts */
24#ifndef __raw_local_irq_save 24#ifndef arch_local_irq_save
25static inline unsigned long __raw_local_irq_save(void) 25static inline unsigned long arch_local_irq_save(void)
26{ 26{
27 unsigned long flags; 27 unsigned long flags;
28 flags = __raw_local_save_flags(); 28 flags = arch_local_save_flags();
29 raw_local_irq_restore(RAW_IRQ_DISABLED); 29 arch_local_irq_restore(ARCH_IRQ_DISABLED);
30 return flags; 30 return flags;
31} 31}
32#endif 32#endif
33 33
34/* test flags */ 34/* test flags */
35#ifndef raw_irqs_disabled_flags 35#ifndef arch_irqs_disabled_flags
36static inline int raw_irqs_disabled_flags(unsigned long flags) 36static inline int arch_irqs_disabled_flags(unsigned long flags)
37{ 37{
38 return flags == RAW_IRQ_DISABLED; 38 return flags == ARCH_IRQ_DISABLED;
39} 39}
40#endif 40#endif
41 41
42/* unconditionally enable interrupts */ 42/* unconditionally enable interrupts */
43#ifndef raw_local_irq_enable 43#ifndef arch_local_irq_enable
44static inline void raw_local_irq_enable(void) 44static inline void arch_local_irq_enable(void)
45{ 45{
46 raw_local_irq_restore(RAW_IRQ_ENABLED); 46 arch_local_irq_restore(ARCH_IRQ_ENABLED);
47} 47}
48#endif 48#endif
49 49
50/* unconditionally disable interrupts */ 50/* unconditionally disable interrupts */
51#ifndef raw_local_irq_disable 51#ifndef arch_local_irq_disable
52static inline void raw_local_irq_disable(void) 52static inline void arch_local_irq_disable(void)
53{ 53{
54 raw_local_irq_restore(RAW_IRQ_DISABLED); 54 arch_local_irq_restore(ARCH_IRQ_DISABLED);
55} 55}
56#endif 56#endif
57 57
58/* test hardware interrupt enable bit */ 58/* test hardware interrupt enable bit */
59#ifndef raw_irqs_disabled 59#ifndef arch_irqs_disabled
60static inline int raw_irqs_disabled(void) 60static inline int arch_irqs_disabled(void)
61{ 61{
62 return raw_irqs_disabled_flags(__raw_local_save_flags()); 62 return arch_irqs_disabled_flags(arch_local_save_flags());
63} 63}
64#endif 64#endif
65 65
66#define raw_local_save_flags(flags) \
67 do { (flags) = __raw_local_save_flags(); } while (0)
68
69#define raw_local_irq_save(flags) \
70 do { (flags) = __raw_local_irq_save(); } while (0)
71
72#endif /* __ASM_GENERIC_IRQFLAGS_H */ 66#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
index 11e57b6a85fc..d1814497bcdb 100644
--- a/include/asm-generic/kdebug.h
+++ b/include/asm-generic/kdebug.h
@@ -3,7 +3,7 @@
3 3
4enum die_val { 4enum die_val {
5 DIE_UNUSED, 5 DIE_UNUSED,
6 DIE_OOPS=1 6 DIE_OOPS = 1,
7}; 7};
8 8
9#endif /* _ASM_GENERIC_KDEBUG_H */ 9#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 08923b684768..d17784ea37ff 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
55 */ 55 */
56#define per_cpu(var, cpu) \ 56#define per_cpu(var, cpu) \
57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) 57 (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
58#define __get_cpu_var(var) \
59 (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
60#define __raw_get_cpu_var(var) \
61 (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
62 58
63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) 59#ifndef __this_cpu_ptr
64#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) 60#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
61#endif
62#ifdef CONFIG_DEBUG_PREEMPT
63#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
64#else
65#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
66#endif
65 67
68#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
69#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
66 70
67#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 71#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
68extern void setup_per_cpu_areas(void); 72extern void setup_per_cpu_areas(void);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e2bd73e8f9c0..f4d4120e5128 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
129#define move_pte(pte, prot, old_addr, new_addr) (pte) 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
130#endif 130#endif
131 131
132#ifndef flush_tlb_fix_spurious_fault
133#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
134#endif
135
132#ifndef pgprot_noncached 136#ifndef pgprot_noncached
133#define pgprot_noncached(prot) (prot) 137#define pgprot_noncached(prot) (prot)
134#endif 138#endif
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
index efa403b5e121..4b0b9cbbfae5 100644
--- a/include/asm-generic/system.h
+++ b/include/asm-generic/system.h
@@ -21,6 +21,7 @@
21#include <linux/irqflags.h> 21#include <linux/irqflags.h>
22 22
23#include <asm/cmpxchg-local.h> 23#include <asm/cmpxchg-local.h>
24#include <asm/cmpxchg.h>
24 25
25struct task_struct; 26struct task_struct;
26 27
@@ -136,25 +137,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
136#define xchg(ptr, x) \ 137#define xchg(ptr, x) \
137 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 138 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
138 139
139static inline unsigned long __cmpxchg(volatile unsigned long *m,
140 unsigned long old, unsigned long new)
141{
142 unsigned long retval;
143 unsigned long flags;
144
145 local_irq_save(flags);
146 retval = *m;
147 if (retval == old)
148 *m = new;
149 local_irq_restore(flags);
150 return retval;
151}
152
153#define cmpxchg(ptr, o, n) \
154 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
155 (unsigned long)(o), \
156 (unsigned long)(n)))
157
158#endif /* !__ASSEMBLY__ */ 140#endif /* !__ASSEMBLY__ */
159 141
160#endif /* __KERNEL__ */ 142#endif /* __KERNEL__ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8a92a170fb7d..f4229fb315e1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -220,6 +220,8 @@
220 \ 220 \
221 BUG_TABLE \ 221 BUG_TABLE \
222 \ 222 \
223 JUMP_TABLE \
224 \
223 /* PCI quirks */ \ 225 /* PCI quirks */ \
224 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 226 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
225 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 227 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -563,6 +565,14 @@
563#define BUG_TABLE 565#define BUG_TABLE
564#endif 566#endif
565 567
568#define JUMP_TABLE \
569 . = ALIGN(8); \
570 __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
571 VMLINUX_SYMBOL(__start___jump_table) = .; \
572 *(__jump_table) \
573 VMLINUX_SYMBOL(__stop___jump_table) = .; \
574 }
575
566#ifdef CONFIG_PM_TRACE 576#ifdef CONFIG_PM_TRACE
567#define TRACEDATA \ 577#define TRACEDATA \
568 . = ALIGN(4); \ 578 . = ALIGN(4); \
@@ -677,7 +687,9 @@
677 - LOAD_OFFSET) { \ 687 - LOAD_OFFSET) { \
678 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 688 VMLINUX_SYMBOL(__per_cpu_start) = .; \
679 *(.data..percpu..first) \ 689 *(.data..percpu..first) \
690 . = ALIGN(PAGE_SIZE); \
680 *(.data..percpu..page_aligned) \ 691 *(.data..percpu..page_aligned) \
692 *(.data..percpu..readmostly) \
681 *(.data..percpu) \ 693 *(.data..percpu) \
682 *(.data..percpu..shared_aligned) \ 694 *(.data..percpu..shared_aligned) \
683 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 695 VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -703,7 +715,9 @@
703 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 715 VMLINUX_SYMBOL(__per_cpu_load) = .; \
704 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 716 VMLINUX_SYMBOL(__per_cpu_start) = .; \
705 *(.data..percpu..first) \ 717 *(.data..percpu..first) \
718 . = ALIGN(PAGE_SIZE); \
706 *(.data..percpu..page_aligned) \ 719 *(.data..percpu..page_aligned) \
720 *(.data..percpu..readmostly) \
707 *(.data..percpu) \ 721 *(.data..percpu) \
708 *(.data..percpu..shared_aligned) \ 722 *(.data..percpu..shared_aligned) \
709 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 723 VMLINUX_SYMBOL(__per_cpu_end) = .; \