aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-07-25 18:46:44 -0400
committerTony Luck <tony.luck@intel.com>2005-07-25 18:46:44 -0400
commit05cb784c81a0fd1f97732156ea464bd392ce875a (patch)
tree24122979b411dcec6ff390fc9ae84ad9413128e9 /include
parent3190186362466658f01b2e354e639378ce07e1a9 (diff)
parent6b6a93c6876ea1c530d5d3f68e3678093a27fab0 (diff)
Auto merge with /home/aegl/GIT/linus
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-imx/imxfb.h1
-rw-r--r--include/asm-arm/locks.h4
-rw-r--r--include/asm-arm/spinlock.h35
-rw-r--r--include/asm-i386/i387.h26
-rw-r--r--include/asm-sparc64/bitops.h56
-rw-r--r--include/asm-sparc64/ptrace.h5
-rw-r--r--include/asm-sparc64/rwsem.h48
-rw-r--r--include/asm-sparc64/spitfire.h130
-rw-r--r--include/asm-sparc64/system.h11
-rw-r--r--include/asm-sparc64/thread_info.h12
-rw-r--r--include/asm-sparc64/timer.h41
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_helper.h7
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h5
-rw-r--r--include/net/sctp/sctp.h7
-rw-r--r--include/net/xfrm.h2
19 files changed, 102 insertions, 298 deletions
diff --git a/include/asm-arm/arch-imx/imxfb.h b/include/asm-arm/arch-imx/imxfb.h
index 2346d454ab..7dbc7bbba6 100644
--- a/include/asm-arm/arch-imx/imxfb.h
+++ b/include/asm-arm/arch-imx/imxfb.h
@@ -25,6 +25,7 @@ struct imxfb_mach_info {
25 u_int pcr; 25 u_int pcr;
26 u_int pwmr; 26 u_int pwmr;
27 u_int lscr1; 27 u_int lscr1;
28 u_int dmacr;
28 29
29 u_char * fixed_screen_cpu; 30 u_char * fixed_screen_cpu;
30 dma_addr_t fixed_screen_dma; 31 dma_addr_t fixed_screen_dma;
diff --git a/include/asm-arm/locks.h b/include/asm-arm/locks.h
index c26298f389..9cb33fcc06 100644
--- a/include/asm-arm/locks.h
+++ b/include/asm-arm/locks.h
@@ -61,7 +61,7 @@
61" strex ip, lr, [%0]\n" \ 61" strex ip, lr, [%0]\n" \
62" teq ip, #0\n" \ 62" teq ip, #0\n" \
63" bne 1b\n" \ 63" bne 1b\n" \
64" teq lr, #0\n" \ 64" cmp lr, #0\n" \
65" movle ip, %0\n" \ 65" movle ip, %0\n" \
66" blle " #wake \ 66" blle " #wake \
67 : \ 67 : \
@@ -100,7 +100,7 @@
100 __asm__ __volatile__( \ 100 __asm__ __volatile__( \
101 "@ up_op_read\n" \ 101 "@ up_op_read\n" \
102"1: ldrex lr, [%0]\n" \ 102"1: ldrex lr, [%0]\n" \
103" add lr, lr, %1\n" \ 103" adds lr, lr, %1\n" \
104" strex ip, lr, [%0]\n" \ 104" strex ip, lr, [%0]\n" \
105" teq ip, #0\n" \ 105" teq ip, #0\n" \
106" bne 1b\n" \ 106" bne 1b\n" \
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 182323619c..9705d5eec9 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -79,7 +79,8 @@ typedef struct {
79} rwlock_t; 79} rwlock_t;
80 80
81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } 81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
82#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) 82#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
83#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
83 84
84/* 85/*
85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * Write locks are easy - we just set bit 31. When unlocking, we can
@@ -100,6 +101,21 @@ static inline void _raw_write_lock(rwlock_t *rw)
100 : "cc", "memory"); 101 : "cc", "memory");
101} 102}
102 103
104static inline int _raw_write_trylock(rwlock_t *rw)
105{
106 unsigned long tmp;
107
108 __asm__ __volatile__(
109"1: ldrex %0, [%1]\n"
110" teq %0, #0\n"
111" strexeq %0, %2, [%1]"
112 : "=&r" (tmp)
113 : "r" (&rw->lock), "r" (0x80000000)
114 : "cc", "memory");
115
116 return tmp == 0;
117}
118
103static inline void _raw_write_unlock(rwlock_t *rw) 119static inline void _raw_write_unlock(rwlock_t *rw)
104{ 120{
105 __asm__ __volatile__( 121 __asm__ __volatile__(
@@ -138,6 +154,8 @@ static inline void _raw_read_lock(rwlock_t *rw)
138 154
139static inline void _raw_read_unlock(rwlock_t *rw) 155static inline void _raw_read_unlock(rwlock_t *rw)
140{ 156{
157 unsigned long tmp, tmp2;
158
141 __asm__ __volatile__( 159 __asm__ __volatile__(
142"1: ldrex %0, [%2]\n" 160"1: ldrex %0, [%2]\n"
143" sub %0, %0, #1\n" 161" sub %0, %0, #1\n"
@@ -151,19 +169,4 @@ static inline void _raw_read_unlock(rwlock_t *rw)
151 169
152#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 170#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
153 171
154static inline int _raw_write_trylock(rwlock_t *rw)
155{
156 unsigned long tmp;
157
158 __asm__ __volatile__(
159"1: ldrex %0, [%1]\n"
160" teq %0, #0\n"
161" strexeq %0, %2, [%1]"
162 : "=&r" (tmp)
163 : "r" (&rw->lock), "r" (0x80000000)
164 : "cc", "memory");
165
166 return tmp == 0;
167}
168
169#endif /* __ASM_SPINLOCK_H */ 172#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index f6feb98a93..6747006743 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -19,10 +19,21 @@
19 19
20extern void mxcsr_feature_mask_init(void); 20extern void mxcsr_feature_mask_init(void);
21extern void init_fpu(struct task_struct *); 21extern void init_fpu(struct task_struct *);
22
22/* 23/*
23 * FPU lazy state save handling... 24 * FPU lazy state save handling...
24 */ 25 */
25extern void restore_fpu( struct task_struct *tsk ); 26
27/*
28 * The "nop" is needed to make the instructions the same
29 * length.
30 */
31#define restore_fpu(tsk) \
32 alternative_input( \
33 "nop ; frstor %1", \
34 "fxrstor %1", \
35 X86_FEATURE_FXSR, \
36 "m" ((tsk)->thread.i387.fxsave))
26 37
27extern void kernel_fpu_begin(void); 38extern void kernel_fpu_begin(void);
28#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) 39#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
@@ -32,13 +43,12 @@ extern void kernel_fpu_begin(void);
32 */ 43 */
33static inline void __save_init_fpu( struct task_struct *tsk ) 44static inline void __save_init_fpu( struct task_struct *tsk )
34{ 45{
35 if ( cpu_has_fxsr ) { 46 alternative_input(
36 asm volatile( "fxsave %0 ; fnclex" 47 "fnsave %1 ; fwait ;" GENERIC_NOP2,
37 : "=m" (tsk->thread.i387.fxsave) ); 48 "fxsave %1 ; fnclex",
38 } else { 49 X86_FEATURE_FXSR,
39 asm volatile( "fnsave %0 ; fwait" 50 "m" (tsk->thread.i387.fxsave)
40 : "=m" (tsk->thread.i387.fsave) ); 51 :"memory");
41 }
42 tsk->thread_info->status &= ~TS_USEDFPU; 52 tsk->thread_info->status &= ~TS_USEDFPU;
43} 53}
44 54
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9d722dc8cc..9c5e719702 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
20 20
21/* "non-atomic" versions... */ 21/* "non-atomic" versions... */
22 22
23static __inline__ void __set_bit(int nr, volatile unsigned long *addr) 23static inline void __set_bit(int nr, volatile unsigned long *addr)
24{ 24{
25 volatile unsigned long *m = addr + (nr >> 6); 25 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
26 26
27 *m |= (1UL << (nr & 63)); 27 *m |= (1UL << (nr & 63));
28} 28}
29 29
30static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) 30static inline void __clear_bit(int nr, volatile unsigned long *addr)
31{ 31{
32 volatile unsigned long *m = addr + (nr >> 6); 32 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
33 33
34 *m &= ~(1UL << (nr & 63)); 34 *m &= ~(1UL << (nr & 63));
35} 35}
36 36
37static __inline__ void __change_bit(int nr, volatile unsigned long *addr) 37static inline void __change_bit(int nr, volatile unsigned long *addr)
38{ 38{
39 volatile unsigned long *m = addr + (nr >> 6); 39 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
40 40
41 *m ^= (1UL << (nr & 63)); 41 *m ^= (1UL << (nr & 63));
42} 42}
43 43
44static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) 44static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
45{ 45{
46 volatile unsigned long *m = addr + (nr >> 6); 46 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
47 long old = *m; 47 unsigned long old = *m;
48 long mask = (1UL << (nr & 63)); 48 unsigned long mask = (1UL << (nr & 63));
49 49
50 *m = (old | mask); 50 *m = (old | mask);
51 return ((old & mask) != 0); 51 return ((old & mask) != 0);
52} 52}
53 53
54static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) 54static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
55{ 55{
56 volatile unsigned long *m = addr + (nr >> 6); 56 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
57 long old = *m; 57 unsigned long old = *m;
58 long mask = (1UL << (nr & 63)); 58 unsigned long mask = (1UL << (nr & 63));
59 59
60 *m = (old & ~mask); 60 *m = (old & ~mask);
61 return ((old & mask) != 0); 61 return ((old & mask) != 0);
62} 62}
63 63
64static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) 64static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
65{ 65{
66 volatile unsigned long *m = addr + (nr >> 6); 66 unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
67 long old = *m; 67 unsigned long old = *m;
68 long mask = (1UL << (nr & 63)); 68 unsigned long mask = (1UL << (nr & 63));
69 69
70 *m = (old ^ mask); 70 *m = (old ^ mask);
71 return ((old & mask) != 0); 71 return ((old & mask) != 0);
@@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
79#define smp_mb__after_clear_bit() barrier() 79#define smp_mb__after_clear_bit() barrier()
80#endif 80#endif
81 81
82static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) 82static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
83{ 83{
84 return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL; 84 return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
85} 85}
86 86
87/* The easy/cheese version for now. */ 87/* The easy/cheese version for now. */
88static __inline__ unsigned long ffz(unsigned long word) 88static inline unsigned long ffz(unsigned long word)
89{ 89{
90 unsigned long result; 90 unsigned long result;
91 91
@@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word)
103 * 103 *
104 * Undefined if no bit exists, so code should check against 0 first. 104 * Undefined if no bit exists, so code should check against 0 first.
105 */ 105 */
106static __inline__ unsigned long __ffs(unsigned long word) 106static inline unsigned long __ffs(unsigned long word)
107{ 107{
108 unsigned long result = 0; 108 unsigned long result = 0;
109 109
@@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b)
144 * the libc and compiler builtin ffs routines, therefore 144 * the libc and compiler builtin ffs routines, therefore
145 * differs in spirit from the above ffz (man ffs). 145 * differs in spirit from the above ffz (man ffs).
146 */ 146 */
147static __inline__ int ffs(int x) 147static inline int ffs(int x)
148{ 148{
149 if (!x) 149 if (!x)
150 return 0; 150 return 0;
@@ -158,7 +158,7 @@ static __inline__ int ffs(int x)
158 158
159#ifdef ULTRA_HAS_POPULATION_COUNT 159#ifdef ULTRA_HAS_POPULATION_COUNT
160 160
161static __inline__ unsigned int hweight64(unsigned long w) 161static inline unsigned int hweight64(unsigned long w)
162{ 162{
163 unsigned int res; 163 unsigned int res;
164 164
@@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w)
166 return res; 166 return res;
167} 167}
168 168
169static __inline__ unsigned int hweight32(unsigned int w) 169static inline unsigned int hweight32(unsigned int w)
170{ 170{
171 unsigned int res; 171 unsigned int res;
172 172
@@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w)
174 return res; 174 return res;
175} 175}
176 176
177static __inline__ unsigned int hweight16(unsigned int w) 177static inline unsigned int hweight16(unsigned int w)
178{ 178{
179 unsigned int res; 179 unsigned int res;
180 180
@@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w)
182 return res; 182 return res;
183} 183}
184 184
185static __inline__ unsigned int hweight8(unsigned int w) 185static inline unsigned int hweight8(unsigned int w)
186{ 186{
187 unsigned int res; 187 unsigned int res;
188 188
@@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
236#define test_and_clear_le_bit(nr,addr) \ 236#define test_and_clear_le_bit(nr,addr) \
237 test_and_clear_bit((nr) ^ 0x38, (addr)) 237 test_and_clear_bit((nr) ^ 0x38, (addr))
238 238
239static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) 239static inline int test_le_bit(int nr, __const__ unsigned long * addr)
240{ 240{
241 int mask; 241 int mask;
242 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; 242 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index 2d2b5a113d..6194f771e9 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -94,8 +94,9 @@ struct sparc_trapf {
94#define STACKFRAME32_SZ sizeof(struct sparc_stackf32) 94#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
95 95
96#ifdef __KERNEL__ 96#ifdef __KERNEL__
97#define force_successful_syscall_return() \ 97#define force_successful_syscall_return() \
98 set_thread_flag(TIF_SYSCALL_SUCCESS) 98do { current_thread_info()->syscall_noerror = 1; \
99} while (0)
99#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) 100#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
100#define instruction_pointer(regs) ((regs)->tpc) 101#define instruction_pointer(regs) ((regs)->tpc)
101#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index a1cc94f959..4568ee4022 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem);
46extern void __up_write(struct rw_semaphore *sem); 46extern void __up_write(struct rw_semaphore *sem);
47extern void __downgrade_write(struct rw_semaphore *sem); 47extern void __downgrade_write(struct rw_semaphore *sem);
48 48
49static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 49static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
50{ 50{
51 int tmp = delta; 51 return atomic_add_return(delta, (atomic_t *)(&sem->count));
52
53 __asm__ __volatile__(
54 "1:\tlduw [%2], %%g1\n\t"
55 "add %%g1, %1, %%g7\n\t"
56 "cas [%2], %%g1, %%g7\n\t"
57 "cmp %%g1, %%g7\n\t"
58 "membar #StoreLoad | #StoreStore\n\t"
59 "bne,pn %%icc, 1b\n\t"
60 " nop\n\t"
61 "mov %%g7, %0\n\t"
62 : "=&r" (tmp)
63 : "0" (tmp), "r" (sem)
64 : "g1", "g7", "memory", "cc");
65
66 return tmp + delta;
67}
68
69#define rwsem_atomic_add rwsem_atomic_update
70
71static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
72{
73 u32 old = (sem->count & 0xffff0000) | (u32) __old;
74 u32 new = (old & 0xffff0000) | (u32) __new;
75 u32 prev;
76
77again:
78 __asm__ __volatile__("cas [%2], %3, %0\n\t"
79 "membar #StoreLoad | #StoreStore"
80 : "=&r" (prev)
81 : "0" (new), "r" (sem), "r" (old)
82 : "memory");
83
84 /* To give the same semantics as x86 cmpxchgw, keep trying
85 * if only the upper 16-bits changed.
86 */
87 if (prev != old &&
88 ((prev & 0xffff) == (old & 0xffff)))
89 goto again;
90
91 return prev & 0xffff;
92} 52}
93 53
94static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) 54static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
95{ 55{
96 return cmpxchg(&sem->count,old,new); 56 atomic_add(delta, (atomic_t *)(&sem->count));
97} 57}
98 58
99#endif /* __KERNEL__ */ 59#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 1aa932773a..962638c9d1 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void);
56 SPITFIRE_HIGHEST_LOCKED_TLBENT : \ 56 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
57 CHEETAH_HIGHEST_LOCKED_TLBENT) 57 CHEETAH_HIGHEST_LOCKED_TLBENT)
58 58
59static __inline__ unsigned long spitfire_get_isfsr(void)
60{
61 unsigned long ret;
62
63 __asm__ __volatile__("ldxa [%1] %2, %0"
64 : "=r" (ret)
65 : "r" (TLB_SFSR), "i" (ASI_IMMU));
66 return ret;
67}
68
69static __inline__ unsigned long spitfire_get_dsfsr(void)
70{
71 unsigned long ret;
72
73 __asm__ __volatile__("ldxa [%1] %2, %0"
74 : "=r" (ret)
75 : "r" (TLB_SFSR), "i" (ASI_DMMU));
76 return ret;
77}
78
79static __inline__ unsigned long spitfire_get_sfar(void)
80{
81 unsigned long ret;
82
83 __asm__ __volatile__("ldxa [%1] %2, %0"
84 : "=r" (ret)
85 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
86 return ret;
87}
88
89static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
90{
91 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
92 "membar #Sync"
93 : /* no outputs */
94 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
95}
96
97static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
98{
99 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
100 "membar #Sync"
101 : /* no outputs */
102 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
103}
104
105/* The data cache is write through, so this just invalidates the 59/* The data cache is write through, so this just invalidates the
106 * specified line. 60 * specified line.
107 */ 61 */
@@ -193,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
193 "i" (ASI_ITLB_DATA_ACCESS)); 147 "i" (ASI_ITLB_DATA_ACCESS));
194} 148}
195 149
196/* Spitfire hardware assisted TLB flushes. */
197
198/* Context level flushes. */
199static __inline__ void spitfire_flush_dtlb_primary_context(void)
200{
201 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
202 "membar #Sync"
203 : /* No outputs */
204 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
205}
206
207static __inline__ void spitfire_flush_itlb_primary_context(void)
208{
209 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
210 "membar #Sync"
211 : /* No outputs */
212 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
213}
214
215static __inline__ void spitfire_flush_dtlb_secondary_context(void)
216{
217 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
218 "membar #Sync"
219 : /* No outputs */
220 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
221}
222
223static __inline__ void spitfire_flush_itlb_secondary_context(void)
224{
225 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
226 "membar #Sync"
227 : /* No outputs */
228 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
229}
230
231static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
232{
233 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
234 "membar #Sync"
235 : /* No outputs */
236 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
237}
238
239static __inline__ void spitfire_flush_itlb_nucleus_context(void)
240{
241 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
242 "membar #Sync"
243 : /* No outputs */
244 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
245}
246
247/* Page level flushes. */
248static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
249{
250 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
251 "membar #Sync"
252 : /* No outputs */
253 : "r" (page), "i" (ASI_DMMU_DEMAP));
254}
255
256static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
257{
258 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
259 "membar #Sync"
260 : /* No outputs */
261 : "r" (page), "i" (ASI_IMMU_DEMAP));
262}
263
264static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
265{
266 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
267 "membar #Sync"
268 : /* No outputs */
269 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
270}
271
272static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
273{
274 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
275 "membar #Sync"
276 : /* No outputs */
277 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
278}
279
280static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) 150static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
281{ 151{
282 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" 152 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index f9be2c5b4d..ee4bdfc6b8 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -190,24 +190,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
190 "wrpr %%g1, %%cwp\n\t" \ 190 "wrpr %%g1, %%cwp\n\t" \
191 "ldx [%%g6 + %3], %%o6\n\t" \ 191 "ldx [%%g6 + %3], %%o6\n\t" \
192 "ldub [%%g6 + %2], %%o5\n\t" \ 192 "ldub [%%g6 + %2], %%o5\n\t" \
193 "ldx [%%g6 + %4], %%o7\n\t" \ 193 "ldub [%%g6 + %4], %%o7\n\t" \
194 "mov %%g6, %%l2\n\t" \ 194 "mov %%g6, %%l2\n\t" \
195 "wrpr %%o5, 0x0, %%wstate\n\t" \ 195 "wrpr %%o5, 0x0, %%wstate\n\t" \
196 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 196 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
197 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 197 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
198 "wrpr %%g0, 0x94, %%pstate\n\t" \ 198 "wrpr %%g0, 0x94, %%pstate\n\t" \
199 "mov %%l2, %%g6\n\t" \ 199 "mov %%l2, %%g6\n\t" \
200 "ldx [%%g6 + %7], %%g4\n\t" \ 200 "ldx [%%g6 + %6], %%g4\n\t" \
201 "wrpr %%g0, 0x96, %%pstate\n\t" \ 201 "wrpr %%g0, 0x96, %%pstate\n\t" \
202 "andcc %%o7, %6, %%g0\n\t" \ 202 "brz,pt %%o7, 1f\n\t" \
203 "beq,pt %%icc, 1f\n\t" \
204 " mov %%g7, %0\n\t" \ 203 " mov %%g7, %0\n\t" \
205 "b,a ret_from_syscall\n\t" \ 204 "b,a ret_from_syscall\n\t" \
206 "1:\n\t" \ 205 "1:\n\t" \
207 : "=&r" (last) \ 206 : "=&r" (last) \
208 : "0" (next->thread_info), \ 207 : "0" (next->thread_info), \
209 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ 208 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
210 "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ 209 "i" (TI_CWP), "i" (TI_TASK) \
211 : "cc", \ 210 : "cc", \
212 "g1", "g2", "g3", "g7", \ 211 "g1", "g2", "g3", "g7", \
213 "l2", "l3", "l4", "l5", "l6", "l7", \ 212 "l2", "l3", "l4", "l5", "l6", "l7", \
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index a1d25c06f9..352d994366 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -47,7 +47,9 @@ struct thread_info {
47 struct pt_regs *kregs; 47 struct pt_regs *kregs;
48 struct exec_domain *exec_domain; 48 struct exec_domain *exec_domain;
49 int preempt_count; /* 0 => preemptable, <0 => BUG */ 49 int preempt_count; /* 0 => preemptable, <0 => BUG */
50 int __pad; 50 __u8 new_child;
51 __u8 syscall_noerror;
52 __u16 __pad;
51 53
52 unsigned long *utraps; 54 unsigned long *utraps;
53 55
@@ -87,6 +89,8 @@ struct thread_info {
87#define TI_KREGS 0x00000028 89#define TI_KREGS 0x00000028
88#define TI_EXEC_DOMAIN 0x00000030 90#define TI_EXEC_DOMAIN 0x00000030
89#define TI_PRE_COUNT 0x00000038 91#define TI_PRE_COUNT 0x00000038
92#define TI_NEW_CHILD 0x0000003c
93#define TI_SYS_NOERROR 0x0000003d
90#define TI_UTRAPS 0x00000040 94#define TI_UTRAPS 0x00000040
91#define TI_REG_WINDOW 0x00000048 95#define TI_REG_WINDOW 0x00000048
92#define TI_RWIN_SPTRS 0x000003c8 96#define TI_RWIN_SPTRS 0x000003c8
@@ -219,10 +223,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
219#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ 223#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
220#define TIF_NEWSIGNALS 6 /* wants new-style signals */ 224#define TIF_NEWSIGNALS 6 /* wants new-style signals */
221#define TIF_32BIT 7 /* 32-bit binary */ 225#define TIF_32BIT 7 /* 32-bit binary */
222#define TIF_NEWCHILD 8 /* just-spawned child process */ 226/* flag bit 8 is available */
223#define TIF_SECCOMP 9 /* secure computing */ 227#define TIF_SECCOMP 9 /* secure computing */
224#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ 228#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
225#define TIF_SYSCALL_SUCCESS 11 229/* flag bit 11 is available */
226/* NOTE: Thread flags >= 12 should be ones we have no interest 230/* NOTE: Thread flags >= 12 should be ones we have no interest
227 * in using in assembly, else we can't use the mask as 231 * in using in assembly, else we can't use the mask as
228 * an immediate value in instructions such as andcc. 232 * an immediate value in instructions such as andcc.
@@ -239,10 +243,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
239#define _TIF_UNALIGNED (1<<TIF_UNALIGNED) 243#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
240#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS) 244#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
241#define _TIF_32BIT (1<<TIF_32BIT) 245#define _TIF_32BIT (1<<TIF_32BIT)
242#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
243#define _TIF_SECCOMP (1<<TIF_SECCOMP) 246#define _TIF_SECCOMP (1<<TIF_SECCOMP)
244#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 247#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
245#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
246#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 248#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
247#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 249#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
248 250
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h
index ba33a2b6b7..edc8e08c3a 100644
--- a/include/asm-sparc64/timer.h
+++ b/include/asm-sparc64/timer.h
@@ -9,49 +9,8 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12/* How timers work:
13 *
14 * On uniprocessors we just use counter zero for the system wide
15 * ticker, this performs thread scheduling, clock book keeping,
16 * and runs timer based events. Previously we used the Ultra
17 * %tick interrupt for this purpose.
18 *
19 * On multiprocessors we pick one cpu as the master level 10 tick
20 * processor. Here this counter zero tick handles clock book
21 * keeping and timer events only. Each Ultra has it's level
22 * 14 %tick interrupt set to fire off as well, even the master
23 * tick cpu runs this locally. This ticker performs thread
24 * scheduling, system/user tick counting for the current thread,
25 * and also profiling if enabled.
26 */
27
28#include <linux/config.h> 12#include <linux/config.h>
29 13
30/* Two timers, traditionally steered to PIL's 10 and 14 respectively.
31 * But since INO packets are used on sun5, we could use any PIL level
32 * we like, however for now we use the normal ones.
33 *
34 * The 'reg' and 'interrupts' properties for these live in nodes named
35 * 'counter-timer'. The first of three 'reg' properties describe where
36 * the sun5_timer registers are. The other two I have no idea. (XXX)
37 */
38struct sun5_timer {
39 u64 count0;
40 u64 limit0;
41 u64 count1;
42 u64 limit1;
43};
44
45#define SUN5_LIMIT_ENABLE 0x80000000
46#define SUN5_LIMIT_TOZERO 0x40000000
47#define SUN5_LIMIT_ZRESTART 0x20000000
48#define SUN5_LIMIT_CMASK 0x1fffffff
49
50/* Given a HZ value, set the limit register to so that the timer IRQ
51 * gets delivered that often.
52 */
53#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
54
55struct sparc64_tick_ops { 14struct sparc64_tick_ops {
56 void (*init_tick)(unsigned long); 15 void (*init_tick)(unsigned long);
57 unsigned long (*get_tick)(void); 16 unsigned long (*get_tick)(void);
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 3781192ce1..f8da7ddeff 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -197,6 +197,9 @@ struct ip_conntrack_expect
197 /* Timer function; deletes the expectation. */ 197 /* Timer function; deletes the expectation. */
198 struct timer_list timeout; 198 struct timer_list timeout;
199 199
200 /* Usage count. */
201 atomic_t use;
202
200#ifdef CONFIG_IP_NF_NAT_NEEDED 203#ifdef CONFIG_IP_NF_NAT_NEEDED
201 /* This is the original per-proto part, used to map the 204 /* This is the original per-proto part, used to map the
202 * expected connection the way the recipient expects. */ 205 * expected connection the way the recipient expects. */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_helper.h b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
index b1bbba0a12..3692daa93d 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_helper.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_helper.h
@@ -30,9 +30,10 @@ extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
30extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *); 30extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *);
31 31
32/* Allocate space for an expectation: this is mandatory before calling 32/* Allocate space for an expectation: this is mandatory before calling
33 ip_conntrack_expect_related. */ 33 ip_conntrack_expect_related. You will have to call put afterwards. */
34extern struct ip_conntrack_expect *ip_conntrack_expect_alloc(void); 34extern struct ip_conntrack_expect *
35extern void ip_conntrack_expect_free(struct ip_conntrack_expect *exp); 35ip_conntrack_expect_alloc(struct ip_conntrack *master);
36extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp);
36 37
37/* Add an expected connection: can have more than one per connection */ 38/* Add an expected connection: can have more than one per connection */
38extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp); 39extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 2f0c085f2c..70c2a9dc4b 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -5,7 +5,7 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#define NETLINK_ROUTE 0 /* Routing/device hook */ 7#define NETLINK_ROUTE 0 /* Routing/device hook */
8#define NETLINK_SKIP 1 /* Reserved for ENskip */ 8#define NETLINK_W1 1 /* 1-wire subsystem */
9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ 9#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
10#define NETLINK_FIREWALL 3 /* Firewalling hook */ 10#define NETLINK_FIREWALL 3 /* Firewalling hook */
11#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */ 11#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 27348c22da..9a28b312ee 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1872,6 +1872,7 @@
1872#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001 1872#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
1873 1873
1874#define PCI_VENDOR_ID_SIIG 0x131f 1874#define PCI_VENDOR_ID_SIIG 0x131f
1875#define PCI_SUBVENDOR_ID_SIIG 0x131f
1875#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 1876#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
1876#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 1877#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
1877#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 1878#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
@@ -1909,6 +1910,7 @@
1909#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 1910#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
1910#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 1911#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
1911#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 1912#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
1913#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
1912 1914
1913#define PCI_VENDOR_ID_RADISYS 0x1331 1915#define PCI_VENDOR_ID_RADISYS 0x1331
1914#define PCI_DEVICE_ID_RADISYS_ENP2611 0x0030 1916#define PCI_DEVICE_ID_RADISYS_ENP2611 0x0030
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5d4a990d55..0061c94704 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -502,7 +502,8 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
502 * 502 *
503 * %NULL is returned on a memory allocation failure. 503 * %NULL is returned on a memory allocation failure.
504 */ 504 */
505static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) 505static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
506 unsigned int __nocast pri)
506{ 507{
507 might_sleep_if(pri & __GFP_WAIT); 508 might_sleep_if(pri & __GFP_WAIT);
508 if (skb_cloned(skb)) { 509 if (skb_cloned(skb)) {
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index bcb762d931..081b1ee851 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -41,19 +41,14 @@ enum
41 TCF_META_ID_LOADAVG_1, 41 TCF_META_ID_LOADAVG_1,
42 TCF_META_ID_LOADAVG_2, 42 TCF_META_ID_LOADAVG_2,
43 TCF_META_ID_DEV, 43 TCF_META_ID_DEV,
44 TCF_META_ID_INDEV,
45 TCF_META_ID_REALDEV,
46 TCF_META_ID_PRIORITY, 44 TCF_META_ID_PRIORITY,
47 TCF_META_ID_PROTOCOL, 45 TCF_META_ID_PROTOCOL,
48 TCF_META_ID_SECURITY, /* obsolete */
49 TCF_META_ID_PKTTYPE, 46 TCF_META_ID_PKTTYPE,
50 TCF_META_ID_PKTLEN, 47 TCF_META_ID_PKTLEN,
51 TCF_META_ID_DATALEN, 48 TCF_META_ID_DATALEN,
52 TCF_META_ID_MACLEN, 49 TCF_META_ID_MACLEN,
53 TCF_META_ID_NFMARK, 50 TCF_META_ID_NFMARK,
54 TCF_META_ID_TCINDEX, 51 TCF_META_ID_TCINDEX,
55 TCF_META_ID_TCVERDICT,
56 TCF_META_ID_TCCLASSID,
57 TCF_META_ID_RTCLASSID, 52 TCF_META_ID_RTCLASSID,
58 TCF_META_ID_RTIIF, 53 TCF_META_ID_RTIIF,
59 TCF_META_ID_SK_FAMILY, 54 TCF_META_ID_SK_FAMILY,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 4a26adfaed..e1d5ec1c23 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -167,15 +167,12 @@ void sctp_unhash_established(struct sctp_association *);
167void sctp_hash_endpoint(struct sctp_endpoint *); 167void sctp_hash_endpoint(struct sctp_endpoint *);
168void sctp_unhash_endpoint(struct sctp_endpoint *); 168void sctp_unhash_endpoint(struct sctp_endpoint *);
169struct sock *sctp_err_lookup(int family, struct sk_buff *, 169struct sock *sctp_err_lookup(int family, struct sk_buff *,
170 struct sctphdr *, struct sctp_endpoint **, 170 struct sctphdr *, struct sctp_association **,
171 struct sctp_association **,
172 struct sctp_transport **); 171 struct sctp_transport **);
173void sctp_err_finish(struct sock *, struct sctp_endpoint *, 172void sctp_err_finish(struct sock *, struct sctp_association *);
174 struct sctp_association *);
175void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, 173void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
176 struct sctp_transport *t, __u32 pmtu); 174 struct sctp_transport *t, __u32 pmtu);
177void sctp_icmp_proto_unreachable(struct sock *sk, 175void sctp_icmp_proto_unreachable(struct sock *sk,
178 struct sctp_endpoint *ep,
179 struct sctp_association *asoc, 176 struct sctp_association *asoc,
180 struct sctp_transport *t); 177 struct sctp_transport *t);
181 178
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 029522a4ce..868ef88ef9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -803,7 +803,7 @@ struct xfrm_algo_desc {
803/* XFRM tunnel handlers. */ 803/* XFRM tunnel handlers. */
804struct xfrm_tunnel { 804struct xfrm_tunnel {
805 int (*handler)(struct sk_buff *skb); 805 int (*handler)(struct sk_buff *skb);
806 void (*err_handler)(struct sk_buff *skb, void *info); 806 void (*err_handler)(struct sk_buff *skb, __u32 info);
807}; 807};
808 808
809struct xfrm6_tunnel { 809struct xfrm6_tunnel {