aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h22
-rw-r--r--include/asm-i386/unwind.h91
-rw-r--r--include/asm-s390/qdio.h1
-rw-r--r--include/asm-s390/reset.h1
-rw-r--r--include/asm-sparc/bitops.h100
-rw-r--r--include/asm-sparc64/hw_irq.h2
-rw-r--r--include/asm-sparc64/percpu.h10
-rw-r--r--include/asm-x86_64/unwind.h96
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/unwind.h63
-rw-r--r--include/linux/workqueue.h36
-rw-r--r--include/net/ax25.h26
-rw-r--r--include/net/rose.h6
-rw-r--r--include/rdma/ib_verbs.h9
14 files changed, 86 insertions, 379 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7437ccaada77..1587121730c5 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -119,8 +119,6 @@
119 *(__ksymtab_strings) \ 119 *(__ksymtab_strings) \
120 } \ 120 } \
121 \ 121 \
122 EH_FRAME \
123 \
124 /* Built-in module parameters. */ \ 122 /* Built-in module parameters. */ \
125 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 123 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
126 VMLINUX_SYMBOL(__start___param) = .; \ 124 VMLINUX_SYMBOL(__start___param) = .; \
@@ -160,26 +158,6 @@
160 *(.kprobes.text) \ 158 *(.kprobes.text) \
161 VMLINUX_SYMBOL(__kprobes_text_end) = .; 159 VMLINUX_SYMBOL(__kprobes_text_end) = .;
162 160
163#ifdef CONFIG_STACK_UNWIND
164#define EH_FRAME \
165 /* Unwind data binary search table */ \
166 . = ALIGN(8); \
167 .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
168 VMLINUX_SYMBOL(__start_unwind_hdr) = .; \
169 *(.eh_frame_hdr) \
170 VMLINUX_SYMBOL(__end_unwind_hdr) = .; \
171 } \
172 /* Unwind data */ \
173 . = ALIGN(8); \
174 .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
175 VMLINUX_SYMBOL(__start_unwind) = .; \
176 *(.eh_frame) \
177 VMLINUX_SYMBOL(__end_unwind) = .; \
178 }
179#else
180#define EH_FRAME
181#endif
182
183 /* DWARF debug sections. 161 /* DWARF debug sections.
184 Symbols in the DWARF debugging sections are relative to 162 Symbols in the DWARF debugging sections are relative to
185 the beginning of the section so we begin them at 0. */ 163 the beginning of the section so we begin them at 0. */
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index aa2c931e30db..43c70c3de2f9 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -1,95 +1,6 @@
1#ifndef _ASM_I386_UNWIND_H 1#ifndef _ASM_I386_UNWIND_H
2#define _ASM_I386_UNWIND_H 2#define _ASM_I386_UNWIND_H
3 3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/fixmap.h>
14#include <asm/ptrace.h>
15#include <asm/uaccess.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21 unsigned call_frame:1;
22};
23
24#define UNW_PC(frame) (frame)->regs.eip
25#define UNW_SP(frame) (frame)->regs.esp
26#ifdef CONFIG_FRAME_POINTER
27#define UNW_FP(frame) (frame)->regs.ebp
28#define FRAME_RETADDR_OFFSET 4
29#define FRAME_LINK_OFFSET 0
30#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
31#define STACK_TOP(tsk) ((tsk)->thread.esp0)
32#else
33#define UNW_FP(frame) ((void)(frame), 0)
34#endif
35#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
36
37#define UNW_REGISTER_INFO \
38 PTREGS_INFO(eax), \
39 PTREGS_INFO(ecx), \
40 PTREGS_INFO(edx), \
41 PTREGS_INFO(ebx), \
42 PTREGS_INFO(esp), \
43 PTREGS_INFO(ebp), \
44 PTREGS_INFO(esi), \
45 PTREGS_INFO(edi), \
46 PTREGS_INFO(eip)
47
48#define UNW_DEFAULT_RA(raItem, dataAlign) \
49 ((raItem).where == Memory && \
50 !((raItem).value * (dataAlign) + 4))
51
52static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
53 /*const*/ struct pt_regs *regs)
54{
55 if (user_mode_vm(regs))
56 info->regs = *regs;
57 else {
58 memcpy(&info->regs, regs, offsetof(struct pt_regs, esp));
59 info->regs.esp = (unsigned long)&regs->esp;
60 info->regs.xss = __KERNEL_DS;
61 }
62}
63
64static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
65{
66 memset(&info->regs, 0, sizeof(info->regs));
67 info->regs.eip = info->task->thread.eip;
68 info->regs.xcs = __KERNEL_CS;
69 __get_user(info->regs.ebp, (long *)info->task->thread.esp);
70 info->regs.esp = info->task->thread.esp;
71 info->regs.xss = __KERNEL_DS;
72 info->regs.xds = __USER_DS;
73 info->regs.xes = __USER_DS;
74 info->regs.xgs = __KERNEL_PDA;
75}
76
77extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
78 asmlinkage int (*callback)(struct unwind_frame_info *,
79 void *arg),
80 void *arg);
81
82static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
83{
84 return user_mode_vm(&info->regs)
85 || info->regs.eip < PAGE_OFFSET
86 || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
87 && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
88 || info->regs.esp < PAGE_OFFSET;
89}
90
91#else
92
93#define UNW_PC(frame) ((void)(frame), 0) 4#define UNW_PC(frame) ((void)(frame), 0)
94#define UNW_SP(frame) ((void)(frame), 0) 5#define UNW_SP(frame) ((void)(frame), 0)
95#define UNW_FP(frame) ((void)(frame), 0) 6#define UNW_FP(frame) ((void)(frame), 0)
@@ -99,6 +10,4 @@ static inline int arch_unw_user_mode(const void *info)
99 return 0; 10 return 0;
100} 11}
101 12
102#endif
103
104#endif /* _ASM_I386_UNWIND_H */ 13#endif /* _ASM_I386_UNWIND_H */
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 7189c79bc673..127f72e77419 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -34,6 +34,7 @@
34#define QDIO_QETH_QFMT 0 34#define QDIO_QETH_QFMT 0
35#define QDIO_ZFCP_QFMT 1 35#define QDIO_ZFCP_QFMT 1
36#define QDIO_IQDIO_QFMT 2 36#define QDIO_IQDIO_QFMT 2
37#define QDIO_IQDIO_QFMT_ASYNCH 3
37 38
38struct qdio_buffer_element{ 39struct qdio_buffer_element{
39 unsigned int flags; 40 unsigned int flags;
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h
index 9b439cf67800..532e65a2aafc 100644
--- a/include/asm-s390/reset.h
+++ b/include/asm-s390/reset.h
@@ -19,5 +19,6 @@ extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void);
21extern void (*s390_reset_mcck_handler)(void); 21extern void (*s390_reset_mcck_handler)(void);
22extern void (*s390_reset_pgm_handler)(void);
22 23
23#endif /* _ASM_S390_RESET_H */ 24#endif /* _ASM_S390_RESET_H */
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 04aa3318f76a..329e696e7751 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -14,6 +14,10 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
18extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
19extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
20
17/* 21/*
18 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0' 22 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
19 * is in the highest of the four bytes and bit '31' is the high bit 23 * is in the highest of the four bytes and bit '31' is the high bit
@@ -22,134 +26,62 @@
22 */ 26 */
23static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 27static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
24{ 28{
25 register unsigned long mask asm("g2"); 29 unsigned long *ADDR, mask;
26 register unsigned long *ADDR asm("g1");
27 register int tmp1 asm("g3");
28 register int tmp2 asm("g4");
29 register int tmp3 asm("g5");
30 register int tmp4 asm("g7");
31 30
32 ADDR = ((unsigned long *) addr) + (nr >> 5); 31 ADDR = ((unsigned long *) addr) + (nr >> 5);
33 mask = 1 << (nr & 31); 32 mask = 1 << (nr & 31);
34 33
35 __asm__ __volatile__( 34 return ___set_bit(ADDR, mask) != 0;
36 "mov %%o7, %%g4\n\t"
37 "call ___set_bit\n\t"
38 " add %%o7, 8, %%o7\n"
39 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
40 : "0" (mask), "r" (ADDR)
41 : "memory", "cc");
42
43 return mask != 0;
44} 35}
45 36
46static inline void set_bit(unsigned long nr, volatile unsigned long *addr) 37static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
47{ 38{
48 register unsigned long mask asm("g2"); 39 unsigned long *ADDR, mask;
49 register unsigned long *ADDR asm("g1");
50 register int tmp1 asm("g3");
51 register int tmp2 asm("g4");
52 register int tmp3 asm("g5");
53 register int tmp4 asm("g7");
54 40
55 ADDR = ((unsigned long *) addr) + (nr >> 5); 41 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31); 42 mask = 1 << (nr & 31);
57 43
58 __asm__ __volatile__( 44 (void) ___set_bit(ADDR, mask);
59 "mov %%o7, %%g4\n\t"
60 "call ___set_bit\n\t"
61 " add %%o7, 8, %%o7\n"
62 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
63 : "0" (mask), "r" (ADDR)
64 : "memory", "cc");
65} 45}
66 46
67static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 47static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
68{ 48{
69 register unsigned long mask asm("g2"); 49 unsigned long *ADDR, mask;
70 register unsigned long *ADDR asm("g1");
71 register int tmp1 asm("g3");
72 register int tmp2 asm("g4");
73 register int tmp3 asm("g5");
74 register int tmp4 asm("g7");
75 50
76 ADDR = ((unsigned long *) addr) + (nr >> 5); 51 ADDR = ((unsigned long *) addr) + (nr >> 5);
77 mask = 1 << (nr & 31); 52 mask = 1 << (nr & 31);
78 53
79 __asm__ __volatile__( 54 return ___clear_bit(ADDR, mask) != 0;
80 "mov %%o7, %%g4\n\t"
81 "call ___clear_bit\n\t"
82 " add %%o7, 8, %%o7\n"
83 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
84 : "0" (mask), "r" (ADDR)
85 : "memory", "cc");
86
87 return mask != 0;
88} 55}
89 56
90static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) 57static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
91{ 58{
92 register unsigned long mask asm("g2"); 59 unsigned long *ADDR, mask;
93 register unsigned long *ADDR asm("g1");
94 register int tmp1 asm("g3");
95 register int tmp2 asm("g4");
96 register int tmp3 asm("g5");
97 register int tmp4 asm("g7");
98 60
99 ADDR = ((unsigned long *) addr) + (nr >> 5); 61 ADDR = ((unsigned long *) addr) + (nr >> 5);
100 mask = 1 << (nr & 31); 62 mask = 1 << (nr & 31);
101 63
102 __asm__ __volatile__( 64 (void) ___clear_bit(ADDR, mask);
103 "mov %%o7, %%g4\n\t"
104 "call ___clear_bit\n\t"
105 " add %%o7, 8, %%o7\n"
106 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
107 : "0" (mask), "r" (ADDR)
108 : "memory", "cc");
109} 65}
110 66
111static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 67static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
112{ 68{
113 register unsigned long mask asm("g2"); 69 unsigned long *ADDR, mask;
114 register unsigned long *ADDR asm("g1");
115 register int tmp1 asm("g3");
116 register int tmp2 asm("g4");
117 register int tmp3 asm("g5");
118 register int tmp4 asm("g7");
119 70
120 ADDR = ((unsigned long *) addr) + (nr >> 5); 71 ADDR = ((unsigned long *) addr) + (nr >> 5);
121 mask = 1 << (nr & 31); 72 mask = 1 << (nr & 31);
122 73
123 __asm__ __volatile__( 74 return ___change_bit(ADDR, mask) != 0;
124 "mov %%o7, %%g4\n\t"
125 "call ___change_bit\n\t"
126 " add %%o7, 8, %%o7\n"
127 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
128 : "0" (mask), "r" (ADDR)
129 : "memory", "cc");
130
131 return mask != 0;
132} 75}
133 76
134static inline void change_bit(unsigned long nr, volatile unsigned long *addr) 77static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
135{ 78{
136 register unsigned long mask asm("g2"); 79 unsigned long *ADDR, mask;
137 register unsigned long *ADDR asm("g1");
138 register int tmp1 asm("g3");
139 register int tmp2 asm("g4");
140 register int tmp3 asm("g5");
141 register int tmp4 asm("g7");
142 80
143 ADDR = ((unsigned long *) addr) + (nr >> 5); 81 ADDR = ((unsigned long *) addr) + (nr >> 5);
144 mask = 1 << (nr & 31); 82 mask = 1 << (nr & 31);
145 83
146 __asm__ __volatile__( 84 (void) ___change_bit(ADDR, mask);
147 "mov %%o7, %%g4\n\t"
148 "call ___change_bit\n\t"
149 " add %%o7, 8, %%o7\n"
150 : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
151 : "0" (mask), "r" (ADDR)
152 : "memory", "cc");
153} 85}
154 86
155#include <asm-generic/bitops/non-atomic.h> 87#include <asm-generic/bitops/non-atomic.h>
diff --git a/include/asm-sparc64/hw_irq.h b/include/asm-sparc64/hw_irq.h
index 599b3b073450..8e44a8360829 100644
--- a/include/asm-sparc64/hw_irq.h
+++ b/include/asm-sparc64/hw_irq.h
@@ -1,6 +1,4 @@
1#ifndef __ASM_SPARC64_HW_IRQ_H 1#ifndef __ASM_SPARC64_HW_IRQ_H
2#define __ASM_SPARC64_HW_IRQ_H 2#define __ASM_SPARC64_HW_IRQ_H
3 3
4extern void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq);
5
6#endif 4#endif
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index ced8cbde046d..0d3df76aa47f 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -5,6 +5,16 @@
5 5
6#ifdef CONFIG_SMP 6#ifdef CONFIG_SMP
7 7
8#ifdef CONFIG_MODULES
9# define PERCPU_MODULE_RESERVE 8192
10#else
11# define PERCPU_MODULE_RESERVE 0
12#endif
13
14#define PERCPU_ENOUGH_ROOM \
15 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
16 PERCPU_MODULE_RESERVE)
17
8extern void setup_per_cpu_areas(void); 18extern void setup_per_cpu_areas(void);
9 19
10extern unsigned long __per_cpu_base; 20extern unsigned long __per_cpu_base;
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index 2f6349e48717..02710f6a4560 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -1,100 +1,6 @@
1#ifndef _ASM_X86_64_UNWIND_H 1#ifndef _ASM_X86_64_UNWIND_H
2#define _ASM_X86_64_UNWIND_H 2#define _ASM_X86_64_UNWIND_H
3 3
4/*
5 * Copyright (C) 2002-2006 Novell, Inc.
6 * Jan Beulich <jbeulich@novell.com>
7 * This code is released under version 2 of the GNU GPL.
8 */
9
10#ifdef CONFIG_STACK_UNWIND
11
12#include <linux/sched.h>
13#include <asm/ptrace.h>
14#include <asm/uaccess.h>
15#include <asm/vsyscall.h>
16
17struct unwind_frame_info
18{
19 struct pt_regs regs;
20 struct task_struct *task;
21 unsigned call_frame:1;
22};
23
24#define UNW_PC(frame) (frame)->regs.rip
25#define UNW_SP(frame) (frame)->regs.rsp
26#ifdef CONFIG_FRAME_POINTER
27#define UNW_FP(frame) (frame)->regs.rbp
28#define FRAME_RETADDR_OFFSET 8
29#define FRAME_LINK_OFFSET 0
30#define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1))
31#define STACK_TOP(tsk) ((tsk)->thread.rsp0)
32#endif
33/* Might need to account for the special exception and interrupt handling
34 stacks here, since normally
35 EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
36 but the construct is needed only for getting across the stack switch to
37 the interrupt stack - thus considering the IRQ stack itself is unnecessary,
38 and the overhead of comparing against all exception handling stacks seems
39 not desirable. */
40#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
41
42#define UNW_REGISTER_INFO \
43 PTREGS_INFO(rax), \
44 PTREGS_INFO(rdx), \
45 PTREGS_INFO(rcx), \
46 PTREGS_INFO(rbx), \
47 PTREGS_INFO(rsi), \
48 PTREGS_INFO(rdi), \
49 PTREGS_INFO(rbp), \
50 PTREGS_INFO(rsp), \
51 PTREGS_INFO(r8), \
52 PTREGS_INFO(r9), \
53 PTREGS_INFO(r10), \
54 PTREGS_INFO(r11), \
55 PTREGS_INFO(r12), \
56 PTREGS_INFO(r13), \
57 PTREGS_INFO(r14), \
58 PTREGS_INFO(r15), \
59 PTREGS_INFO(rip)
60
61#define UNW_DEFAULT_RA(raItem, dataAlign) \
62 ((raItem).where == Memory && \
63 !((raItem).value * (dataAlign) + 8))
64
65static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
66 /*const*/ struct pt_regs *regs)
67{
68 info->regs = *regs;
69}
70
71static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
72{
73 extern const char thread_return[];
74
75 memset(&info->regs, 0, sizeof(info->regs));
76 info->regs.rip = (unsigned long)thread_return;
77 info->regs.cs = __KERNEL_CS;
78 __get_user(info->regs.rbp, (unsigned long *)info->task->thread.rsp);
79 info->regs.rsp = info->task->thread.rsp;
80 info->regs.ss = __KERNEL_DS;
81}
82
83extern int arch_unwind_init_running(struct unwind_frame_info *,
84 int (*callback)(struct unwind_frame_info *,
85 void *arg),
86 void *arg);
87
88static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
89{
90 return user_mode(&info->regs)
91 || (long)info->regs.rip >= 0
92 || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
93 || (long)info->regs.rsp >= 0;
94}
95
96#else
97
98#define UNW_PC(frame) ((void)(frame), 0UL) 4#define UNW_PC(frame) ((void)(frame), 0UL)
99#define UNW_SP(frame) ((void)(frame), 0UL) 5#define UNW_SP(frame) ((void)(frame), 0UL)
100 6
@@ -103,6 +9,4 @@ static inline int arch_unw_user_mode(const void *info)
103 return 0; 9 return 0;
104} 10}
105 11
106#endif
107
108#endif /* _ASM_X86_64_UNWIND_H */ 12#endif /* _ASM_X86_64_UNWIND_H */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 3ea1cd58de97..10eb56b2940a 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -133,7 +133,7 @@ struct cn_callback_data {
133struct cn_callback_entry { 133struct cn_callback_entry {
134 struct list_head callback_entry; 134 struct list_head callback_entry;
135 struct cn_callback *cb; 135 struct cn_callback *cb;
136 struct delayed_work work; 136 struct work_struct work;
137 struct cn_queue_dev *pdev; 137 struct cn_queue_dev *pdev;
138 138
139 struct cn_callback_id id; 139 struct cn_callback_id id;
diff --git a/include/linux/unwind.h b/include/linux/unwind.h
index 749928c161fb..7760860fa170 100644
--- a/include/linux/unwind.h
+++ b/include/linux/unwind.h
@@ -14,63 +14,6 @@
14 14
15struct module; 15struct module;
16 16
17#ifdef CONFIG_STACK_UNWIND
18
19#include <asm/unwind.h>
20
21#ifndef ARCH_UNWIND_SECTION_NAME
22#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
23#endif
24
25/*
26 * Initialize unwind support.
27 */
28extern void unwind_init(void);
29extern void unwind_setup(void);
30
31#ifdef CONFIG_MODULES
32
33extern void *unwind_add_table(struct module *,
34 const void *table_start,
35 unsigned long table_size);
36
37extern void unwind_remove_table(void *handle, int init_only);
38
39#endif
40
41extern int unwind_init_frame_info(struct unwind_frame_info *,
42 struct task_struct *,
43 /*const*/ struct pt_regs *);
44
45/*
46 * Prepare to unwind a blocked task.
47 */
48extern int unwind_init_blocked(struct unwind_frame_info *,
49 struct task_struct *);
50
51/*
52 * Prepare to unwind the currently running thread.
53 */
54extern int unwind_init_running(struct unwind_frame_info *,
55 asmlinkage int (*callback)(struct unwind_frame_info *,
56 void *arg),
57 void *arg);
58
59/*
60 * Unwind to previous to frame. Returns 0 if successful, negative
61 * number in case of an error.
62 */
63extern int unwind(struct unwind_frame_info *);
64
65/*
66 * Unwind until the return pointer is in user-land (or until an error
67 * occurs). Returns 0 if successful, negative number in case of
68 * error.
69 */
70extern int unwind_to_user(struct unwind_frame_info *);
71
72#else
73
74struct unwind_frame_info {}; 17struct unwind_frame_info {};
75 18
76static inline void unwind_init(void) {} 19static inline void unwind_init(void) {}
@@ -85,12 +28,12 @@ static inline void *unwind_add_table(struct module *mod,
85 return NULL; 28 return NULL;
86} 29}
87 30
88#endif
89
90static inline void unwind_remove_table(void *handle, int init_only) 31static inline void unwind_remove_table(void *handle, int init_only)
91{ 32{
92} 33}
93 34
35#endif
36
94static inline int unwind_init_frame_info(struct unwind_frame_info *info, 37static inline int unwind_init_frame_info(struct unwind_frame_info *info,
95 struct task_struct *tsk, 38 struct task_struct *tsk,
96 const struct pt_regs *regs) 39 const struct pt_regs *regs)
@@ -122,6 +65,4 @@ static inline int unwind_to_user(struct unwind_frame_info *info)
122 return -ENOSYS; 65 return -ENOSYS;
123} 66}
124 67
125#endif
126
127#endif /* _LINUX_UNWIND_H */ 68#endif /* _LINUX_UNWIND_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index edef8d50b26b..2a7b38d87018 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -8,16 +8,21 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <asm/atomic.h>
11 12
12struct workqueue_struct; 13struct workqueue_struct;
13 14
14struct work_struct; 15struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work); 16typedef void (*work_func_t)(struct work_struct *work);
16 17
18/*
19 * The first word is the work queue pointer and the flags rolled into
20 * one
21 */
22#define work_data_bits(work) ((unsigned long *)(&(work)->data))
23
17struct work_struct { 24struct work_struct {
18 /* the first word is the work queue pointer and the flags rolled into 25 atomic_long_t data;
19 * one */
20 unsigned long management;
21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ 27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23#define WORK_STRUCT_FLAG_MASK (3UL) 28#define WORK_STRUCT_FLAG_MASK (3UL)
@@ -26,6 +31,9 @@ struct work_struct {
26 work_func_t func; 31 work_func_t func;
27}; 32};
28 33
34#define WORK_DATA_INIT(autorelease) \
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36
29struct delayed_work { 37struct delayed_work {
30 struct work_struct work; 38 struct work_struct work;
31 struct timer_list timer; 39 struct timer_list timer;
@@ -36,13 +44,13 @@ struct execute_work {
36}; 44};
37 45
38#define __WORK_INITIALIZER(n, f) { \ 46#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \ 47 .data = WORK_DATA_INIT(0), \
40 .entry = { &(n).entry, &(n).entry }, \ 48 .entry = { &(n).entry, &(n).entry }, \
41 .func = (f), \ 49 .func = (f), \
42 } 50 }
43 51
44#define __WORK_INITIALIZER_NAR(n, f) { \ 52#define __WORK_INITIALIZER_NAR(n, f) { \
45 .management = (1 << WORK_STRUCT_NOAUTOREL), \ 53 .data = WORK_DATA_INIT(1), \
46 .entry = { &(n).entry, &(n).entry }, \ 54 .entry = { &(n).entry, &(n).entry }, \
47 .func = (f), \ 55 .func = (f), \
48 } 56 }
@@ -82,17 +90,21 @@ struct execute_work {
82 90
83/* 91/*
84 * initialize all of a work item in one go 92 * initialize all of a work item in one go
93 *
94 * NOTE! No point in using "atomic_long_set()": useing a direct
95 * assignment of the work data initializer allows the compiler
96 * to generate better code.
85 */ 97 */
86#define INIT_WORK(_work, _func) \ 98#define INIT_WORK(_work, _func) \
87 do { \ 99 do { \
88 (_work)->management = 0; \ 100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
89 INIT_LIST_HEAD(&(_work)->entry); \ 101 INIT_LIST_HEAD(&(_work)->entry); \
90 PREPARE_WORK((_work), (_func)); \ 102 PREPARE_WORK((_work), (_func)); \
91 } while (0) 103 } while (0)
92 104
93#define INIT_WORK_NAR(_work, _func) \ 105#define INIT_WORK_NAR(_work, _func) \
94 do { \ 106 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ 107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
96 INIT_LIST_HEAD(&(_work)->entry); \ 108 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \ 109 PREPARE_WORK((_work), (_func)); \
98 } while (0) 110 } while (0)
@@ -114,15 +126,15 @@ struct execute_work {
114 * @work: The work item in question 126 * @work: The work item in question
115 */ 127 */
116#define work_pending(work) \ 128#define work_pending(work) \
117 test_bit(WORK_STRUCT_PENDING, &(work)->management) 129 test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
118 130
119/** 131/**
120 * delayed_work_pending - Find out whether a delayable work item is currently 132 * delayed_work_pending - Find out whether a delayable work item is currently
121 * pending 133 * pending
122 * @work: The work item in question 134 * @work: The work item in question
123 */ 135 */
124#define delayed_work_pending(work) \ 136#define delayed_work_pending(w) \
125 test_bit(WORK_STRUCT_PENDING, &(work)->work.management) 137 work_pending(&(w)->work)
126 138
127/** 139/**
128 * work_release - Release a work item under execution 140 * work_release - Release a work item under execution
@@ -143,7 +155,7 @@ struct execute_work {
143 * This should also be used to release a delayed work item. 155 * This should also be used to release a delayed work item.
144 */ 156 */
145#define work_release(work) \ 157#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management) 158 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
147 159
148 160
149extern struct workqueue_struct *__create_workqueue(const char *name, 161extern struct workqueue_struct *__create_workqueue(const char *name,
@@ -188,7 +200,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
188 200
189 ret = del_timer_sync(&work->timer); 201 ret = del_timer_sync(&work->timer);
190 if (ret) 202 if (ret)
191 clear_bit(WORK_STRUCT_PENDING, &work->work.management); 203 work_release(&work->work);
192 return ret; 204 return ret;
193} 205}
194 206
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 14b72d868f03..5ae10dd2e32e 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -277,7 +277,7 @@ struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
277extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *); 277extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
278extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); 278extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
279extern void ax25_destroy_socket(ax25_cb *); 279extern void ax25_destroy_socket(ax25_cb *);
280extern ax25_cb *ax25_create_cb(void); 280extern ax25_cb * __must_check ax25_create_cb(void);
281extern void ax25_fillin_cb(ax25_cb *, ax25_dev *); 281extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
282extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *); 282extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
283 283
@@ -333,11 +333,25 @@ extern void ax25_ds_t3timer_expiry(ax25_cb *);
333extern void ax25_ds_idletimer_expiry(ax25_cb *); 333extern void ax25_ds_idletimer_expiry(ax25_cb *);
334 334
335/* ax25_iface.c */ 335/* ax25_iface.c */
336extern int ax25_protocol_register(unsigned int, int (*)(struct sk_buff *, ax25_cb *)); 336
337struct ax25_protocol {
338 struct ax25_protocol *next;
339 unsigned int pid;
340 int (*func)(struct sk_buff *, ax25_cb *);
341};
342
343extern void ax25_register_pid(struct ax25_protocol *ap);
337extern void ax25_protocol_release(unsigned int); 344extern void ax25_protocol_release(unsigned int);
338extern int ax25_linkfail_register(void (*)(ax25_cb *, int)); 345
339extern void ax25_linkfail_release(void (*)(ax25_cb *, int)); 346struct ax25_linkfail {
340extern int ax25_listen_register(ax25_address *, struct net_device *); 347 struct hlist_node lf_node;
348 void (*func)(ax25_cb *, int);
349};
350
351extern void ax25_linkfail_register(struct ax25_linkfail *lf);
352extern void ax25_linkfail_release(struct ax25_linkfail *lf);
353extern int __must_check ax25_listen_register(ax25_address *,
354 struct net_device *);
341extern void ax25_listen_release(ax25_address *, struct net_device *); 355extern void ax25_listen_release(ax25_address *, struct net_device *);
342extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); 356extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
343extern int ax25_listen_mine(ax25_address *, struct net_device *); 357extern int ax25_listen_mine(ax25_address *, struct net_device *);
@@ -415,7 +429,7 @@ extern unsigned long ax25_display_timer(struct timer_list *);
415/* ax25_uid.c */ 429/* ax25_uid.c */
416extern int ax25_uid_policy; 430extern int ax25_uid_policy;
417extern ax25_uid_assoc *ax25_findbyuid(uid_t); 431extern ax25_uid_assoc *ax25_findbyuid(uid_t);
418extern int ax25_uid_ioctl(int, struct sockaddr_ax25 *); 432extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
419extern struct file_operations ax25_uid_fops; 433extern struct file_operations ax25_uid_fops;
420extern void ax25_uid_free(void); 434extern void ax25_uid_free(void);
421 435
diff --git a/include/net/rose.h b/include/net/rose.h
index 012b09ed2401..4c05a88b921b 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -188,13 +188,13 @@ extern void rose_kick(struct sock *);
188extern void rose_enquiry_response(struct sock *); 188extern void rose_enquiry_response(struct sock *);
189 189
190/* rose_route.c */ 190/* rose_route.c */
191extern struct rose_neigh *rose_loopback_neigh; 191extern struct rose_neigh rose_loopback_neigh;
192extern struct file_operations rose_neigh_fops; 192extern struct file_operations rose_neigh_fops;
193extern struct file_operations rose_nodes_fops; 193extern struct file_operations rose_nodes_fops;
194extern struct file_operations rose_routes_fops; 194extern struct file_operations rose_routes_fops;
195 195
196extern int rose_add_loopback_neigh(void); 196extern void rose_add_loopback_neigh(void);
197extern int rose_add_loopback_node(rose_address *); 197extern int __must_check rose_add_loopback_node(rose_address *);
198extern void rose_del_loopback_node(rose_address *); 198extern void rose_del_loopback_node(rose_address *);
199extern void rose_rt_device_down(struct net_device *); 199extern void rose_rt_device_down(struct net_device *);
200extern void rose_link_device_down(struct net_device *); 200extern void rose_link_device_down(struct net_device *);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 3c2e10574b23..0bfa3328d686 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1639,7 +1639,14 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1639{ 1639{
1640 if (dev->dma_ops) 1640 if (dev->dma_ops)
1641 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 1641 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1642 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1642 else {
1643 dma_addr_t handle;
1644 void *ret;
1645
1646 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1647 *dma_handle = handle;
1648 return ret;
1649 }
1643} 1650}
1644 1651
1645/** 1652/**