aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild2
-rw-r--r--include/acpi/acmacros.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/asm-alpha/Kbuild5
-rw-r--r--include/asm-alpha/rwsem.h14
-rw-r--r--include/asm-arm/Kbuild1
-rw-r--r--include/asm-arm26/Kbuild1
-rw-r--r--include/asm-cris/Kbuild1
-rw-r--r--include/asm-frv/Kbuild1
-rw-r--r--include/asm-generic/Kbuild3
-rw-r--r--include/asm-generic/Kbuild.asm11
-rw-r--r--include/asm-generic/cputime.h2
-rw-r--r--include/asm-generic/mutex-null.h15
-rw-r--r--include/asm-generic/percpu.h2
-rw-r--r--include/asm-h8300/Kbuild1
-rw-r--r--include/asm-i386/Kbuild5
-rw-r--r--include/asm-i386/irqflags.h127
-rw-r--r--include/asm-i386/rwsem.h42
-rw-r--r--include/asm-i386/spinlock.h12
-rw-r--r--include/asm-i386/system.h20
-rw-r--r--include/asm-ia64/Kbuild7
-rw-r--r--include/asm-ia64/irq.h2
-rw-r--r--include/asm-ia64/percpu.h1
-rw-r--r--include/asm-ia64/rwsem.h18
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/Kbuild1
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/Kbuild1
-rw-r--r--include/asm-m68knommu/Kbuild1
-rw-r--r--include/asm-mips/Kbuild1
-rw-r--r--include/asm-parisc/Kbuild1
-rw-r--r--include/asm-powerpc/Kbuild10
-rw-r--r--include/asm-powerpc/i8259.h8
-rw-r--r--include/asm-powerpc/irq.h358
-rw-r--r--include/asm-powerpc/irqflags.h31
-rw-r--r--include/asm-powerpc/machdep.h2
-rw-r--r--include/asm-powerpc/mpic.h67
-rw-r--r--include/asm-powerpc/percpu.h1
-rw-r--r--include/asm-powerpc/prom.h98
-rw-r--r--include/asm-powerpc/rwsem.h18
-rw-r--r--include/asm-powerpc/spu.h1
-rw-r--r--include/asm-s390/Kbuild4
-rw-r--r--include/asm-s390/irqflags.h50
-rw-r--r--include/asm-s390/percpu.h1
-rw-r--r--include/asm-s390/rwsem.h31
-rw-r--r--include/asm-s390/semaphore.h3
-rw-r--r--include/asm-s390/system.h32
-rw-r--r--include/asm-sh/Kbuild1
-rw-r--r--include/asm-sh/rwsem.h18
-rw-r--r--include/asm-sh/system.h2
-rw-r--r--include/asm-sh64/Kbuild1
-rw-r--r--include/asm-sparc/Kbuild6
-rw-r--r--include/asm-sparc64/Kbuild10
-rw-r--r--include/asm-sparc64/percpu.h1
-rw-r--r--include/asm-um/Kbuild1
-rw-r--r--include/asm-v850/Kbuild1
-rw-r--r--include/asm-x86_64/Kbuild11
-rw-r--r--include/asm-x86_64/irqflags.h141
-rw-r--r--include/asm-x86_64/kdebug.h2
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/system.h38
-rw-r--r--include/asm-xtensa/Kbuild1
-rw-r--r--include/asm-xtensa/rwsem.h18
-rw-r--r--include/linux/Kbuild63
-rw-r--r--include/linux/byteorder/Kbuild2
-rw-r--r--include/linux/completion.h12
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/debug_locks.h69
-rw-r--r--include/linux/dmaengine.h43
-rw-r--r--include/linux/dvb/Kbuild2
-rw-r--r--include/linux/fs.h38
-rw-r--r--include/linux/hardirq.h27
-rw-r--r--include/linux/hdlc/Kbuild1
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/interrupt.h77
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/irqflags.h96
-rw-r--r--include/linux/isdn/Kbuild1
-rw-r--r--include/linux/kallsyms.h23
-rw-r--r--include/linux/lockdep.h353
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/mtd.h4
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mtd/onenand.h77
-rw-r--r--include/linux/mutex-debug.h18
-rw-r--r--include/linux/mutex.h37
-rw-r--r--include/linux/netfilter/Kbuild11
-rw-r--r--include/linux/netfilter_arp/Kbuild2
-rw-r--r--include/linux/netfilter_bridge/Kbuild4
-rw-r--r--include/linux/netfilter_ipv4/Kbuild21
-rw-r--r--include/linux/netfilter_ipv6/Kbuild6
-rw-r--r--include/linux/nfsd/Kbuild2
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/poison.h5
-rw-r--r--include/linux/raid/Kbuild1
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/rwsem-spinlock.h27
-rw-r--r--include/linux/rwsem.h83
-rw-r--r--include/linux/sched.h86
-rw-r--r--include/linux/seqlock.h12
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/spinlock.h63
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/spinlock_types.h47
-rw-r--r--include/linux/spinlock_types_up.h9
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/stacktrace.h20
-rw-r--r--include/linux/sunrpc/Kbuild1
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_ematch/Kbuild1
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/wait.h8
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/mtd/Kbuild2
-rw-r--r--include/mtd/mtd-abi.h2
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/ax25.h24
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h73
-rw-r--r--include/net/bluetooth/hci_core.h59
-rw-r--r--include/net/sock.h19
-rw-r--r--include/rdma/Kbuild1
-rw-r--r--include/scsi/Kbuild2
-rw-r--r--include/scsi/iscsi_if.h24
-rw-r--r--include/scsi/libiscsi.h15
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_iscsi.h48
-rw-r--r--include/scsi/scsi_transport_sas.h37
-rw-r--r--include/sound/Kbuild2
-rw-r--r--include/video/Kbuild1
141 files changed, 2418 insertions, 665 deletions
diff --git a/include/Kbuild b/include/Kbuild
new file mode 100644
index 000000000000..cb2534800b19
--- /dev/null
+++ b/include/Kbuild
@@ -0,0 +1,2 @@
1header-y += asm-generic/ linux/ scsi/ sound/ mtd/ rdma/ video/
2header-y += asm-$(ARCH)/
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index 4bb38068f40d..f1ac6109556e 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -726,7 +726,7 @@
726 726
727#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__) 727#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
728#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__) 728#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
729#define ACPI_FREE(a) acpi_os_free(a) 729#define ACPI_FREE(a) kfree(a)
730#define ACPI_MEM_TRACKING(a) 730#define ACPI_MEM_TRACKING(a)
731 731
732#else 732#else
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 89bc4a16c2e8..0cd63bce0ae4 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -143,8 +143,6 @@ void acpi_os_release_mutex(acpi_mutex handle);
143 */ 143 */
144void *acpi_os_allocate(acpi_size size); 144void *acpi_os_allocate(acpi_size size);
145 145
146void acpi_os_free(void *memory);
147
148acpi_status 146acpi_status
149acpi_os_map_memory(acpi_physical_address physical_address, 147acpi_os_map_memory(acpi_physical_address physical_address,
150 acpi_size size, void __iomem ** logical_address); 148 acpi_size size, void __iomem ** logical_address);
diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild
new file mode 100644
index 000000000000..e57fd57538b8
--- /dev/null
+++ b/include/asm-alpha/Kbuild
@@ -0,0 +1,5 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += console.h fpu.h sysinfo.h
4
5header-y += gentrap.h regdef.h pal.h reg.h
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
index fafdd4f7010a..1570c0b54336 100644
--- a/include/asm-alpha/rwsem.h
+++ b/include/asm-alpha/rwsem.h
@@ -36,20 +36,11 @@ struct rw_semaphore {
36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 spinlock_t wait_lock; 37 spinlock_t wait_lock;
38 struct list_head wait_list; 38 struct list_head wait_list;
39#if RWSEM_DEBUG
40 int debug;
41#endif
42}; 39};
43 40
44#if RWSEM_DEBUG
45#define __RWSEM_DEBUG_INIT , 0
46#else
47#define __RWSEM_DEBUG_INIT /* */
48#endif
49
50#define __RWSEM_INITIALIZER(name) \ 41#define __RWSEM_INITIALIZER(name) \
51 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 42 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
52 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } 43 LIST_HEAD_INIT((name).wait_list) }
53 44
54#define DECLARE_RWSEM(name) \ 45#define DECLARE_RWSEM(name) \
55 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 46 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -59,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
59 sem->count = RWSEM_UNLOCKED_VALUE; 50 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock); 51 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list); 52 INIT_LIST_HEAD(&sem->wait_list);
62#if RWSEM_DEBUG
63 sem->debug = 0;
64#endif
65} 53}
66 54
67static inline void __down_read(struct rw_semaphore *sem) 55static inline void __down_read(struct rw_semaphore *sem)
diff --git a/include/asm-arm/Kbuild b/include/asm-arm/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-arm/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-arm26/Kbuild b/include/asm-arm26/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-arm26/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-cris/Kbuild b/include/asm-cris/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-cris/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-frv/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
new file mode 100644
index 000000000000..70594b275a6e
--- /dev/null
+++ b/include/asm-generic/Kbuild
@@ -0,0 +1,3 @@
1header-y += atomic.h errno-base.h errno.h fcntl.h ioctl.h ipc.h mman.h \
2 signal.h statfs.h
3unifdef-y := resource.h siginfo.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
new file mode 100644
index 000000000000..d8d0bcecd23f
--- /dev/null
+++ b/include/asm-generic/Kbuild.asm
@@ -0,0 +1,11 @@
1unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \
2 ioctls.h ipcbuf.h irq.h mman.h msgbuf.h param.h poll.h \
3 posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \
4 sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \
5 statfs.h termbits.h termios.h timex.h types.h unistd.h user.h
6
7# These really shouldn't be exported
8unifdef-y += atomic.h io.h
9
10# These probably shouldn't be exported
11unifdef-y += elf.h page.h
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 6f178563e336..09204e40d663 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -24,7 +24,9 @@ typedef u64 cputime64_t;
24 24
25#define cputime64_zero (0ULL) 25#define cputime64_zero (0ULL)
26#define cputime64_add(__a, __b) ((__a) + (__b)) 26#define cputime64_add(__a, __b) ((__a) + (__b))
27#define cputime64_sub(__a, __b) ((__a) - (__b))
27#define cputime64_to_jiffies64(__ct) (__ct) 28#define cputime64_to_jiffies64(__ct) (__ct)
29#define jiffies64_to_cputime64(__jif) (__jif)
28#define cputime_to_cputime64(__ct) ((u64) __ct) 30#define cputime_to_cputime64(__ct) ((u64) __ct)
29 31
30 32
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index 5cf8b7ce0c45..254a126ede5c 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -10,15 +10,10 @@
10#ifndef _ASM_GENERIC_MUTEX_NULL_H 10#ifndef _ASM_GENERIC_MUTEX_NULL_H
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13/* extra parameter only needed for mutex debugging: */ 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#ifndef __IP__ 14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
15# define __IP__ 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#endif 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17 17#define __mutex_slowpath_needs_to_unlock() 1
18#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
19#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
20#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
21#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
22#define __mutex_slowpath_needs_to_unlock() 1
23 18
24#endif 19#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c74521157461..e160e04290fb 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -7,6 +7,8 @@
7 7
8extern unsigned long __per_cpu_offset[NR_CPUS]; 8extern unsigned long __per_cpu_offset[NR_CPUS];
9 9
10#define per_cpu_offset(x) (__per_cpu_offset[x])
11
10/* Separate out the type, so (int[3], foo) works. */ 12/* Separate out the type, so (int[3], foo) works. */
11#define DEFINE_PER_CPU(type, name) \ 13#define DEFINE_PER_CPU(type, name) \
12 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 14 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
diff --git a/include/asm-h8300/Kbuild b/include/asm-h8300/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-h8300/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
new file mode 100644
index 000000000000..c064a8e9170f
--- /dev/null
+++ b/include/asm-i386/Kbuild
@@ -0,0 +1,5 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += boot.h cpufeature.h debugreg.h ldt.h setup.h ucontext.h
4
5unifdef-y += mtrr.h vm86.h
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
new file mode 100644
index 000000000000..e1bdb97c07fa
--- /dev/null
+++ b/include/asm-i386/irqflags.h
@@ -0,0 +1,127 @@
1/*
2 * include/asm-i386/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "pushfl ; popl %0"
21 : "=g" (flags)
22 : /* no input */
23 );
24
25 return flags;
26}
27
28#define raw_local_save_flags(flags) \
29 do { (flags) = __raw_local_save_flags(); } while (0)
30
31static inline void raw_local_irq_restore(unsigned long flags)
32{
33 __asm__ __volatile__(
34 "pushl %0 ; popfl"
35 : /* no output */
36 :"g" (flags)
37 :"memory", "cc"
38 );
39}
40
41static inline void raw_local_irq_disable(void)
42{
43 __asm__ __volatile__("cli" : : : "memory");
44}
45
46static inline void raw_local_irq_enable(void)
47{
48 __asm__ __volatile__("sti" : : : "memory");
49}
50
51/*
52 * Used in the idle loop; sti takes one instruction cycle
53 * to complete:
54 */
55static inline void raw_safe_halt(void)
56{
57 __asm__ __volatile__("sti; hlt" : : : "memory");
58}
59
60/*
61 * Used when interrupts are already enabled or to
62 * shutdown the processor:
63 */
64static inline void halt(void)
65{
66 __asm__ __volatile__("hlt": : :"memory");
67}
68
69static inline int raw_irqs_disabled_flags(unsigned long flags)
70{
71 return !(flags & (1 << 9));
72}
73
74static inline int raw_irqs_disabled(void)
75{
76 unsigned long flags = __raw_local_save_flags();
77
78 return raw_irqs_disabled_flags(flags);
79}
80
81/*
82 * For spinlocks, etc:
83 */
84static inline unsigned long __raw_local_irq_save(void)
85{
86 unsigned long flags = __raw_local_save_flags();
87
88 raw_local_irq_disable();
89
90 return flags;
91}
92
93#define raw_local_irq_save(flags) \
94 do { (flags) = __raw_local_irq_save(); } while (0)
95
96#endif /* __ASSEMBLY__ */
97
98/*
99 * Do the CPU's IRQ-state tracing from assembly code. We call a
100 * C function, so save all the C-clobbered registers:
101 */
102#ifdef CONFIG_TRACE_IRQFLAGS
103
104# define TRACE_IRQS_ON \
105 pushl %eax; \
106 pushl %ecx; \
107 pushl %edx; \
108 call trace_hardirqs_on; \
109 popl %edx; \
110 popl %ecx; \
111 popl %eax;
112
113# define TRACE_IRQS_OFF \
114 pushl %eax; \
115 pushl %ecx; \
116 pushl %edx; \
117 call trace_hardirqs_off; \
118 popl %edx; \
119 popl %ecx; \
120 popl %eax;
121
122#else
123# define TRACE_IRQS_ON
124# define TRACE_IRQS_OFF
125#endif
126
127#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index be4ab859238e..2f07601562e7 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -40,6 +40,7 @@
40 40
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/spinlock.h> 42#include <linux/spinlock.h>
43#include <linux/lockdep.h>
43 44
44struct rwsem_waiter; 45struct rwsem_waiter;
45 46
@@ -61,36 +62,34 @@ struct rw_semaphore {
61#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
62 spinlock_t wait_lock; 63 spinlock_t wait_lock;
63 struct list_head wait_list; 64 struct list_head wait_list;
64#if RWSEM_DEBUG 65#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 int debug; 66 struct lockdep_map dep_map;
66#endif 67#endif
67}; 68};
68 69
69/* 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
70 * initialisation 71# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
71 */
72#if RWSEM_DEBUG
73#define __RWSEM_DEBUG_INIT , 0
74#else 72#else
75#define __RWSEM_DEBUG_INIT /* */ 73# define __RWSEM_DEP_MAP_INIT(lockname)
76#endif 74#endif
77 75
76
78#define __RWSEM_INITIALIZER(name) \ 77#define __RWSEM_INITIALIZER(name) \
79{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ 78{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEBUG_INIT } 79 __RWSEM_DEP_MAP_INIT(name) }
81 80
82#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
83 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
84 83
85static inline void init_rwsem(struct rw_semaphore *sem) 84extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
86{ 85 struct lock_class_key *key);
87 sem->count = RWSEM_UNLOCKED_VALUE; 86
88 spin_lock_init(&sem->wait_lock); 87#define init_rwsem(sem) \
89 INIT_LIST_HEAD(&sem->wait_list); 88do { \
90#if RWSEM_DEBUG 89 static struct lock_class_key __key; \
91 sem->debug = 0; 90 \
92#endif 91 __init_rwsem((sem), #sem, &__key); \
93} 92} while (0)
94 93
95/* 94/*
96 * lock for reading 95 * lock for reading
@@ -143,7 +142,7 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
143/* 142/*
144 * lock for writing 143 * lock for writing
145 */ 144 */
146static inline void __down_write(struct rw_semaphore *sem) 145static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
147{ 146{
148 int tmp; 147 int tmp;
149 148
@@ -167,6 +166,11 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
167 : "memory", "cc"); 166 : "memory", "cc");
168} 167}
169 168
169static inline void __down_write(struct rw_semaphore *sem)
170{
171 __down_write_nested(sem, 0);
172}
173
170/* 174/*
171 * trylock for writing -- returns 1 if successful, 0 if contention 175 * trylock for writing -- returns 1 if successful, 0 if contention
172 */ 176 */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 04ba30234c48..87c40f830653 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -31,6 +31,11 @@
31 "jmp 1b\n" \ 31 "jmp 1b\n" \
32 "3:\n\t" 32 "3:\n\t"
33 33
34/*
35 * NOTE: there's an irqs-on section here, which normally would have to be
36 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
37 * __raw_spin_lock_string_flags().
38 */
34#define __raw_spin_lock_string_flags \ 39#define __raw_spin_lock_string_flags \
35 "\n1:\t" \ 40 "\n1:\t" \
36 "lock ; decb %0\n\t" \ 41 "lock ; decb %0\n\t" \
@@ -63,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
63 "=m" (lock->slock) : : "memory"); 68 "=m" (lock->slock) : : "memory");
64} 69}
65 70
71/*
72 * It is easier for the lock validator if interrupts are not re-enabled
73 * in the middle of a lock-acquire. This is a performance feature anyway
74 * so we turn it off:
75 */
76#ifndef CONFIG_PROVE_LOCKING
66static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 77static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
67{ 78{
68 alternative_smp( 79 alternative_smp(
@@ -70,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
70 __raw_spin_lock_string_up, 81 __raw_spin_lock_string_up,
71 "=m" (lock->slock) : "r" (flags) : "memory"); 82 "=m" (lock->slock) : "r" (flags) : "memory");
72} 83}
84#endif
73 85
74static inline int __raw_spin_trylock(raw_spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
75{ 87{
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index cab0180567f9..db398d88b1d9 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
456 456
457#define set_wmb(var, value) do { var = value; wmb(); } while (0) 457#define set_wmb(var, value) do { var = value; wmb(); } while (0)
458 458
459/* interrupt control.. */ 459#include <linux/irqflags.h>
460#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
461#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
462#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
463#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
464/* used in the idle loop; sti takes one instruction cycle to complete */
465#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
466/* used when interrupts are already enabled or to shutdown the processor */
467#define halt() __asm__ __volatile__("hlt": : :"memory")
468
469#define irqs_disabled() \
470({ \
471 unsigned long flags; \
472 local_save_flags(flags); \
473 !(flags & (1<<9)); \
474})
475
476/* For spinlocks etc */
477#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
478 460
479/* 461/*
480 * disable hlt during certain critical i/o operations 462 * disable hlt during certain critical i/o operations
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
new file mode 100644
index 000000000000..85d6f8005eb4
--- /dev/null
+++ b/include/asm-ia64/Kbuild
@@ -0,0 +1,7 @@
1include include/asm-generic/Kbuild.asm
2
3header-y += break.h fpu.h fpswa.h gcc_intrin.h ia64regs.h \
4 intel_intrin.h intrinsics.h perfmon_default_smpl.h \
5 ptrace_offsets.h rse.h setup.h ucontext.h
6
7unifdef-y += perfmon.h
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index 8acb00190d5a..79479e2c6966 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,8 +14,6 @@
14#define NR_IRQS 256 14#define NR_IRQS 256
15#define NR_IRQ_VECTORS NR_IRQS 15#define NR_IRQ_VECTORS NR_IRQS
16 16
17#define IRQF_PERCPU 0x02000000
18
19static __inline__ int 17static __inline__ int
20irq_canonicalize (int irq) 18irq_canonicalize (int irq)
21{ 19{
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index 24d898b650c5..fbe5cf3ab8dc 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -36,6 +36,7 @@
36#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
37 37
38extern unsigned long __per_cpu_offset[NR_CPUS]; 38extern unsigned long __per_cpu_offset[NR_CPUS];
39#define per_cpu_offset(x) (__per_cpu_offset(x))
39 40
40/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ 41/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
41DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); 42DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h
index 1327c91ea39c..2d1640cc240a 100644
--- a/include/asm-ia64/rwsem.h
+++ b/include/asm-ia64/rwsem.h
@@ -33,9 +33,6 @@ struct rw_semaphore {
33 signed long count; 33 signed long count;
34 spinlock_t wait_lock; 34 spinlock_t wait_lock;
35 struct list_head wait_list; 35 struct list_head wait_list;
36#if RWSEM_DEBUG
37 int debug;
38#endif
39}; 36};
40 37
41#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) 38#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
@@ -45,19 +42,9 @@ struct rw_semaphore {
45#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 42#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
46#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 43#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
47 44
48/*
49 * initialization
50 */
51#if RWSEM_DEBUG
52#define __RWSEM_DEBUG_INIT , 0
53#else
54#define __RWSEM_DEBUG_INIT /* */
55#endif
56
57#define __RWSEM_INITIALIZER(name) \ 45#define __RWSEM_INITIALIZER(name) \
58 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
59 LIST_HEAD_INIT((name).wait_list) \ 47 LIST_HEAD_INIT((name).wait_list) }
60 __RWSEM_DEBUG_INIT }
61 48
62#define DECLARE_RWSEM(name) \ 49#define DECLARE_RWSEM(name) \
63 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -73,9 +60,6 @@ init_rwsem (struct rw_semaphore *sem)
73 sem->count = RWSEM_UNLOCKED_VALUE; 60 sem->count = RWSEM_UNLOCKED_VALUE;
74 spin_lock_init(&sem->wait_lock); 61 spin_lock_init(&sem->wait_lock);
75 INIT_LIST_HEAD(&sem->wait_list); 62 INIT_LIST_HEAD(&sem->wait_list);
76#if RWSEM_DEBUG
77 sem->debug = 0;
78#endif
79} 63}
80 64
81/* 65/*
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 8bc9869e5765..8adcde0934ca 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -68,7 +68,7 @@ struct thread_info {
68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
69 69
70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
71#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 71#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 72#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
73 73
74#endif /* !__ASSEMBLY */ 74#endif /* !__ASSEMBLY */
diff --git a/include/asm-m32r/Kbuild b/include/asm-m32r/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m32r/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 66c4742f09e7..311cebf44eff 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -18,7 +18,7 @@
18 * switch_to(prev, next) should switch from task `prev' to `next' 18 * switch_to(prev, next) should switch from task `prev' to `next'
19 * `prev' will never be the same as `next'. 19 * `prev' will never be the same as `next'.
20 * 20 *
21 * `next' and `prev' should be task_t, but it isn't always defined 21 * `next' and `prev' should be struct task_struct, but it isn't always defined
22 */ 22 */
23 23
24#define switch_to(prev, next, last) do { \ 24#define switch_to(prev, next, last) do { \
diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m68k/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-m68knommu/Kbuild b/include/asm-m68knommu/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-m68knommu/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-mips/Kbuild b/include/asm-mips/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-mips/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-parisc/Kbuild b/include/asm-parisc/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-parisc/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild
new file mode 100644
index 000000000000..ac61d7eb6021
--- /dev/null
+++ b/include/asm-powerpc/Kbuild
@@ -0,0 +1,10 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += a.out.h asm-compat.h bootx.h byteorder.h cputable.h elf.h \
4 nvram.h param.h posix_types.h ptrace.h seccomp.h signal.h \
5 termios.h types.h unistd.h
6
7header-y += auxvec.h ioctls.h mman.h sembuf.h siginfo.h stat.h errno.h \
8 ipcbuf.h msgbuf.h shmbuf.h socket.h termbits.h fcntl.h ipc.h \
9 poll.h shmparam.h sockios.h ucontext.h ioctl.h linkage.h \
10 resource.h sigcontext.h statfs.h
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
index 0392159e16e4..c80e113052cd 100644
--- a/include/asm-powerpc/i8259.h
+++ b/include/asm-powerpc/i8259.h
@@ -4,11 +4,13 @@
4 4
5#include <linux/irq.h> 5#include <linux/irq.h>
6 6
7extern struct hw_interrupt_type i8259_pic; 7#ifdef CONFIG_PPC_MERGE
8 8extern void i8259_init(struct device_node *node, unsigned long intack_addr);
9extern unsigned int i8259_irq(struct pt_regs *regs);
10#else
9extern void i8259_init(unsigned long intack_addr, int offset); 11extern void i8259_init(unsigned long intack_addr, int offset);
10extern int i8259_irq(struct pt_regs *regs); 12extern int i8259_irq(struct pt_regs *regs);
11extern int i8259_irq_cascade(struct pt_regs *regs, void *unused); 13#endif
12 14
13#endif /* __KERNEL__ */ 15#endif /* __KERNEL__ */
14#endif /* _ASM_POWERPC_I8259_H */ 16#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index eb5f33e1977a..e05754752028 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -9,26 +9,14 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/config.h>
12#include <linux/threads.h> 13#include <linux/threads.h>
14#include <linux/list.h>
15#include <linux/radix-tree.h>
13 16
14#include <asm/types.h> 17#include <asm/types.h>
15#include <asm/atomic.h> 18#include <asm/atomic.h>
16 19
17/* this number is used when no interrupt has been assigned */
18#define NO_IRQ (-1)
19
20/*
21 * These constants are used for passing information about interrupt
22 * signal polarity and level/edge sensing to the low-level PIC chip
23 * drivers.
24 */
25#define IRQ_SENSE_MASK 0x1
26#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
27#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
28
29#define IRQ_POLARITY_MASK 0x2
30#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
31#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
32 20
33#define get_irq_desc(irq) (&irq_desc[(irq)]) 21#define get_irq_desc(irq) (&irq_desc[(irq)])
34 22
@@ -36,50 +24,325 @@
36#define for_each_irq(i) \ 24#define for_each_irq(i) \
37 for ((i) = 0; (i) < NR_IRQS; ++(i)) 25 for ((i) = 0; (i) < NR_IRQS; ++(i))
38 26
39#ifdef CONFIG_PPC64 27extern atomic_t ppc_n_lost_interrupts;
40 28
41/* 29#ifdef CONFIG_PPC_MERGE
42 * Maximum number of interrupt sources that we can handle. 30
31/* This number is used when no interrupt has been assigned */
32#define NO_IRQ (0)
33
34/* This is a special irq number to return from get_irq() to tell that
35 * no interrupt happened _and_ ignore it (don't count it as bad). Some
36 * platforms like iSeries rely on that.
43 */ 37 */
38#define NO_IRQ_IGNORE ((unsigned int)-1)
39
40/* Total number of virq in the platform (make it a CONFIG_* option ? */
44#define NR_IRQS 512 41#define NR_IRQS 512
45 42
46/* Interrupt numbers are virtual in case they are sparsely 43/* Number of irqs reserved for the legacy controller */
47 * distributed by the hardware. 44#define NUM_ISA_INTERRUPTS 16
45
46/* This type is the placeholder for a hardware interrupt number. It has to
47 * be big enough to enclose whatever representation is used by a given
48 * platform.
49 */
50typedef unsigned long irq_hw_number_t;
51
52/* Interrupt controller "host" data structure. This could be defined as a
53 * irq domain controller. That is, it handles the mapping between hardware
54 * and virtual interrupt numbers for a given interrupt domain. The host
55 * structure is generally created by the PIC code for a given PIC instance
56 * (though a host can cover more than one PIC if they have a flat number
57 * model). It's the host callbacks that are responsible for setting the
58 * irq_chip on a given irq_desc after it's been mapped.
59 *
60 * The host code and data structures are fairly agnostic to the fact that
61 * we use an open firmware device-tree. We do have references to struct
62 * device_node in two places: in irq_find_host() to find the host matching
63 * a given interrupt controller node, and of course as an argument to its
64 * counterpart host->ops->match() callback. However, those are treated as
65 * generic pointers by the core and the fact that it's actually a device-node
66 * pointer is purely a convention between callers and implementation. This
67 * code could thus be used on other architectures by replacing those two
68 * by some sort of arch-specific void * "token" used to identify interrupt
69 * controllers.
48 */ 70 */
49extern unsigned int virt_irq_to_real_map[NR_IRQS]; 71struct irq_host;
72struct radix_tree_root;
50 73
51/* The maximum virtual IRQ number that we support. This 74/* Functions below are provided by the host and called whenever a new mapping
52 * can be set by the platform and will be reduced by the 75 * is created or an old mapping is disposed. The host can then proceed to
53 * value of __irq_offset_value. It defaults to and is 76 * whatever internal data structures management is required. It also needs
54 * capped by (NR_IRQS - 1). 77 * to setup the irq_desc when returning from map().
55 */ 78 */
56extern unsigned int virt_irq_max; 79struct irq_host_ops {
80 /* Match an interrupt controller device node to a host, returns
81 * 1 on a match
82 */
83 int (*match)(struct irq_host *h, struct device_node *node);
84
85 /* Create or update a mapping between a virtual irq number and a hw
86 * irq number. This can be called several times for the same mapping
87 * but with different flags, though unmap shall always be called
88 * before the virq->hw mapping is changed.
89 */
90 int (*map)(struct irq_host *h, unsigned int virq,
91 irq_hw_number_t hw, unsigned int flags);
92
93 /* Dispose of such a mapping */
94 void (*unmap)(struct irq_host *h, unsigned int virq);
95
96 /* Translate device-tree interrupt specifier from raw format coming
97 * from the firmware to a irq_hw_number_t (interrupt line number) and
98 * trigger flags that can be passed to irq_create_mapping().
99 * If no translation is provided, raw format is assumed to be one cell
100 * for interrupt line and default sense.
101 */
102 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
103 u32 *intspec, unsigned int intsize,
104 irq_hw_number_t *out_hwirq, unsigned int *out_flags);
105};
106
107struct irq_host {
108 struct list_head link;
109
110 /* type of reverse mapping technique */
111 unsigned int revmap_type;
112#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
113#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
114#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
115#define IRQ_HOST_MAP_TREE 3 /* radix tree */
116 union {
117 struct {
118 unsigned int size;
119 unsigned int *revmap;
120 } linear;
121 struct radix_tree_root tree;
122 } revmap_data;
123 struct irq_host_ops *ops;
124 void *host_data;
125 irq_hw_number_t inval_irq;
126};
127
128/* The main irq map itself is an array of NR_IRQ entries containing the
129 * associate host and irq number. An entry with a host of NULL is free.
130 * An entry can be allocated if it's free, the allocator always then sets
131 * hwirq first to the host's invalid irq number and then fills ops.
132 */
133struct irq_map_entry {
134 irq_hw_number_t hwirq;
135 struct irq_host *host;
136};
137
138extern struct irq_map_entry irq_map[NR_IRQS];
139
57 140
58/* Create a mapping for a real_irq if it doesn't already exist. 141/***
59 * Return the virtual irq as a convenience. 142 * irq_alloc_host - Allocate a new irq_host data structure
143 * @node: device-tree node of the interrupt controller
144 * @revmap_type: type of reverse mapping to use
145 * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
146 * @ops: map/unmap host callbacks
147 * @inval_irq: provide a hw number in that host space that is always invalid
148 *
149 * Allocates and initialize and irq_host structure. Note that in the case of
150 * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
151 * for all legacy interrupts except 0 (which is always the invalid irq for
152 * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
153 * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
154 * later during boot automatically (the reverse mapping will use the slow path
155 * until that happens).
156 */
157extern struct irq_host *irq_alloc_host(unsigned int revmap_type,
158 unsigned int revmap_arg,
159 struct irq_host_ops *ops,
160 irq_hw_number_t inval_irq);
161
162
163/***
164 * irq_find_host - Locates a host for a given device node
165 * @node: device-tree node of the interrupt controller
166 */
167extern struct irq_host *irq_find_host(struct device_node *node);
168
169
170/***
171 * irq_set_default_host - Set a "default" host
172 * @host: default host pointer
173 *
174 * For convenience, it's possible to set a "default" host that will be used
175 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
176 * platforms that want to manipulate a few hard coded interrupt numbers that
177 * aren't properly represented in the device-tree.
178 */
179extern void irq_set_default_host(struct irq_host *host);
180
181
182/***
183 * irq_set_virq_count - Set the maximum number of virt irqs
184 * @count: number of linux virtual irqs, capped with NR_IRQS
185 *
186 * This is mainly for use by platforms like iSeries who want to program
187 * the virtual irq number in the controller to avoid the reverse mapping
188 */
189extern void irq_set_virq_count(unsigned int count);
190
191
192/***
193 * irq_create_mapping - Map a hardware interrupt into linux virq space
194 * @host: host owning this hardware interrupt or NULL for default host
195 * @hwirq: hardware irq number in that host space
196 * @flags: flags passed to the controller. contains the trigger type among
197 * others. Use IRQ_TYPE_* defined in include/linux/irq.h
198 *
199 * Only one mapping per hardware interrupt is permitted. Returns a linux
200 * virq number. The flags can be used to provide sense information to the
201 * controller (typically extracted from the device-tree). If no information
202 * is passed, the controller defaults will apply (for example, xics can only
203 * do edge so flags are irrelevant for some pseries specific irqs).
204 *
205 * The device-tree generally contains the trigger info in an encoding that is
206 * specific to a given type of controller. In that case, you can directly use
207 * host->ops->trigger_xlate() to translate that.
208 *
209 * It is recommended that new PICs that don't have existing OF bindings chose
210 * to use a representation of triggers identical to linux.
211 */
212extern unsigned int irq_create_mapping(struct irq_host *host,
213 irq_hw_number_t hwirq,
214 unsigned int flags);
215
216
217/***
218 * irq_dispose_mapping - Unmap an interrupt
219 * @virq: linux virq number of the interrupt to unmap
220 */
221extern void irq_dispose_mapping(unsigned int virq);
222
223/***
224 * irq_find_mapping - Find a linux virq from an hw irq number.
225 * @host: host owning this hardware interrupt
226 * @hwirq: hardware irq number in that host space
227 *
228 * This is a slow path, for use by generic code. It's expected that an
229 * irq controller implementation directly calls the appropriate low level
230 * mapping function.
60 */ 231 */
61int virt_irq_create_mapping(unsigned int real_irq); 232extern unsigned int irq_find_mapping(struct irq_host *host,
62void virt_irq_init(void); 233 irq_hw_number_t hwirq);
63 234
64static inline unsigned int virt_irq_to_real(unsigned int virt_irq) 235
236/***
237 * irq_radix_revmap - Find a linux virq from a hw irq number.
238 * @host: host owning this hardware interrupt
239 * @hwirq: hardware irq number in that host space
240 *
241 * This is a fast path, for use by irq controller code that uses radix tree
242 * revmaps
243 */
244extern unsigned int irq_radix_revmap(struct irq_host *host,
245 irq_hw_number_t hwirq);
246
247/***
248 * irq_linear_revmap - Find a linux virq from a hw irq number.
249 * @host: host owning this hardware interrupt
250 * @hwirq: hardware irq number in that host space
251 *
252 * This is a fast path, for use by irq controller code that uses linear
253 * revmaps. It does fallback to the slow path if the revmap doesn't exist
254 * yet and will create the revmap entry with appropriate locking
255 */
256
257extern unsigned int irq_linear_revmap(struct irq_host *host,
258 irq_hw_number_t hwirq);
259
260
261
262/***
263 * irq_alloc_virt - Allocate virtual irq numbers
264 * @host: host owning these new virtual irqs
265 * @count: number of consecutive numbers to allocate
266 * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
267 *
268 * This is a low level function that is used internally by irq_create_mapping()
269 * and that can be used by some irq controllers implementations for things
270 * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
271 */
272extern unsigned int irq_alloc_virt(struct irq_host *host,
273 unsigned int count,
274 unsigned int hint);
275
276/***
277 * irq_free_virt - Free virtual irq numbers
278 * @virq: virtual irq number of the first interrupt to free
279 * @count: number of interrupts to free
280 *
281 * This function is the opposite of irq_alloc_virt. It will not clear reverse
282 * maps, this should be done previously by unmap'ing the interrupt. In fact,
283 * all interrupts covered by the range being freed should have been unmapped
284 * prior to calling this.
285 */
286extern void irq_free_virt(unsigned int virq, unsigned int count);
287
288
289/* -- OF helpers -- */
290
291/* irq_create_of_mapping - Map a hardware interrupt into linux virq space
292 * @controller: Device node of the interrupt controller
293 * @inspec: Interrupt specifier from the device-tree
294 * @intsize: Size of the interrupt specifier from the device-tree
295 *
296 * This function is identical to irq_create_mapping except that it takes
297 * as input informations straight from the device-tree (typically the results
298 * of the of_irq_map_*() functions
299 */
300extern unsigned int irq_create_of_mapping(struct device_node *controller,
301 u32 *intspec, unsigned int intsize);
302
303
304/* irq_of_parse_and_map - Parse nad Map an interrupt into linux virq space
305 * @device: Device node of the device whose interrupt is to be mapped
306 * @index: Index of the interrupt to map
307 *
308 * This function is a wrapper that chains of_irq_map_one() and
309 * irq_create_of_mapping() to make things easier to callers
310 */
311extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index);
312
313/* -- End OF helpers -- */
314
315/***
316 * irq_early_init - Init irq remapping subsystem
317 */
318extern void irq_early_init(void);
319
320static __inline__ int irq_canonicalize(int irq)
65{ 321{
66 return virt_irq_to_real_map[virt_irq]; 322 return irq;
67} 323}
68 324
69extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq); 325
326#else /* CONFIG_PPC_MERGE */
327
328/* This number is used when no interrupt has been assigned */
329#define NO_IRQ (-1)
330#define NO_IRQ_IGNORE (-2)
331
70 332
71/* 333/*
72 * List of interrupt controllers. 334 * These constants are used for passing information about interrupt
335 * signal polarity and level/edge sensing to the low-level PIC chip
336 * drivers.
73 */ 337 */
74#define IC_INVALID 0 338#define IRQ_SENSE_MASK 0x1
75#define IC_OPEN_PIC 1 339#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
76#define IC_PPC_XIC 2 340#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
77#define IC_CELL_PIC 3
78#define IC_ISERIES 4
79 341
80extern u64 ppc64_interrupt_controller; 342#define IRQ_POLARITY_MASK 0x2
343#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
344#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
81 345
82#else /* 32-bit */
83 346
84#if defined(CONFIG_40x) 347#if defined(CONFIG_40x)
85#include <asm/ibm4xx.h> 348#include <asm/ibm4xx.h>
@@ -512,16 +775,11 @@ extern u64 ppc64_interrupt_controller;
512 775
513#endif /* CONFIG_8260 */ 776#endif /* CONFIG_8260 */
514 777
515#endif 778#endif /* Whatever way too big #ifdef */
516 779
517#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 780#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
518/* pedantic: these are long because they are used with set_bit --RR */ 781/* pedantic: these are long because they are used with set_bit --RR */
519extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 782extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
520extern atomic_t ppc_n_lost_interrupts;
521
522#define virt_irq_create_mapping(x) (x)
523
524#endif
525 783
526/* 784/*
527 * Because many systems have two overlapping names spaces for 785 * Because many systems have two overlapping names spaces for
@@ -560,6 +818,7 @@ static __inline__ int irq_canonicalize(int irq)
560 irq = 9; 818 irq = 9;
561 return irq; 819 return irq;
562} 820}
821#endif /* CONFIG_PPC_MERGE */
563 822
564extern int distribute_irqs; 823extern int distribute_irqs;
565 824
@@ -579,9 +838,8 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
579 838
580extern void irq_ctx_init(void); 839extern void irq_ctx_init(void);
581extern void call_do_softirq(struct thread_info *tp); 840extern void call_do_softirq(struct thread_info *tp);
582extern int call___do_IRQ(int irq, struct pt_regs *regs, 841extern int call_handle_irq(int irq, void *p1, void *p2,
583 struct thread_info *tp); 842 struct thread_info *tp, void *func);
584
585#else 843#else
586#define irq_ctx_init() 844#define irq_ctx_init()
587 845
diff --git a/include/asm-powerpc/irqflags.h b/include/asm-powerpc/irqflags.h
new file mode 100644
index 000000000000..7970cbaeaa54
--- /dev/null
+++ b/include/asm-powerpc/irqflags.h
@@ -0,0 +1,31 @@
1/*
2 * include/asm-powerpc/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() macros from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13/*
14 * Get definitions for raw_local_save_flags(x), etc.
15 */
16#include <asm-powerpc/hw_irq.h>
17
18/*
19 * Do the CPU's IRQ-state tracing from assembly code. We call a
20 * C function, so save all the C-clobbered registers:
21 */
22#ifdef CONFIG_TRACE_IRQFLAGS
23
24#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS
25
26#else
27# define TRACE_IRQS_ON
28# define TRACE_IRQS_OFF
29#endif
30
31#endif
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index eba133d149a7..c17c13742401 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -97,7 +97,7 @@ struct machdep_calls {
97 void (*show_percpuinfo)(struct seq_file *m, int i); 97 void (*show_percpuinfo)(struct seq_file *m, int i);
98 98
99 void (*init_IRQ)(void); 99 void (*init_IRQ)(void);
100 int (*get_irq)(struct pt_regs *); 100 unsigned int (*get_irq)(struct pt_regs *);
101#ifdef CONFIG_KEXEC 101#ifdef CONFIG_KEXEC
102 void (*kexec_cpu_down)(int crash_shutdown, int secondary); 102 void (*kexec_cpu_down)(int crash_shutdown, int secondary);
103#endif 103#endif
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index f0d22ac34b96..eb241c99c457 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -114,9 +114,6 @@
114#define MPIC_VEC_TIMER_1 248 114#define MPIC_VEC_TIMER_1 248
115#define MPIC_VEC_TIMER_0 247 115#define MPIC_VEC_TIMER_0 247
116 116
117/* Type definition of the cascade handler */
118typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
119
120#ifdef CONFIG_MPIC_BROKEN_U3 117#ifdef CONFIG_MPIC_BROKEN_U3
121/* Fixup table entry */ 118/* Fixup table entry */
122struct mpic_irq_fixup 119struct mpic_irq_fixup
@@ -132,10 +129,19 @@ struct mpic_irq_fixup
132/* The instance data of a given MPIC */ 129/* The instance data of a given MPIC */
133struct mpic 130struct mpic
134{ 131{
132 /* The device node of the interrupt controller */
133 struct device_node *of_node;
134
135 /* The remapper for this MPIC */
136 struct irq_host *irqhost;
137
135 /* The "linux" controller struct */ 138 /* The "linux" controller struct */
136 hw_irq_controller hc_irq; 139 struct irq_chip hc_irq;
140#ifdef CONFIG_MPIC_BROKEN_U3
141 struct irq_chip hc_ht_irq;
142#endif
137#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
138 hw_irq_controller hc_ipi; 144 struct irq_chip hc_ipi;
139#endif 145#endif
140 const char *name; 146 const char *name;
141 /* Flags */ 147 /* Flags */
@@ -144,20 +150,12 @@ struct mpic
144 unsigned int isu_size; 150 unsigned int isu_size;
145 unsigned int isu_shift; 151 unsigned int isu_shift;
146 unsigned int isu_mask; 152 unsigned int isu_mask;
147 /* Offset of irq vector numbers */
148 unsigned int irq_offset;
149 unsigned int irq_count; 153 unsigned int irq_count;
150 /* Offset of ipi vector numbers */
151 unsigned int ipi_offset;
152 /* Number of sources */ 154 /* Number of sources */
153 unsigned int num_sources; 155 unsigned int num_sources;
154 /* Number of CPUs */ 156 /* Number of CPUs */
155 unsigned int num_cpus; 157 unsigned int num_cpus;
156 /* cascade handler */ 158 /* default senses array */
157 mpic_cascade_t cascade;
158 void *cascade_data;
159 unsigned int cascade_vec;
160 /* senses array */
161 unsigned char *senses; 159 unsigned char *senses;
162 unsigned int senses_count; 160 unsigned int senses_count;
163 161
@@ -213,14 +211,11 @@ struct mpic
213 * The values in the array start at the first source of the MPIC, 211 * The values in the array start at the first source of the MPIC,
214 * that is senses[0] correspond to linux irq "irq_offset". 212 * that is senses[0] correspond to linux irq "irq_offset".
215 */ 213 */
216extern struct mpic *mpic_alloc(unsigned long phys_addr, 214extern struct mpic *mpic_alloc(struct device_node *node,
215 unsigned long phys_addr,
217 unsigned int flags, 216 unsigned int flags,
218 unsigned int isu_size, 217 unsigned int isu_size,
219 unsigned int irq_offset,
220 unsigned int irq_count, 218 unsigned int irq_count,
221 unsigned int ipi_offset,
222 unsigned char *senses,
223 unsigned int senses_num,
224 const char *name); 219 const char *name);
225 220
226/* Assign ISUs, to call before mpic_init() 221/* Assign ISUs, to call before mpic_init()
@@ -232,22 +227,27 @@ extern struct mpic *mpic_alloc(unsigned long phys_addr,
232extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 227extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
233 unsigned long phys_addr); 228 unsigned long phys_addr);
234 229
230/* Set default sense codes
231 *
232 * @mpic: controller
233 * @senses: array of sense codes
234 * @count: size of above array
235 *
236 * Optionally provide an array (indexed on hardware interrupt numbers
237 * for this MPIC) of default sense codes for the chip. Those are linux
238 * sense codes IRQ_TYPE_*
239 *
240 * The driver gets ownership of the pointer, don't dispose of it or
241 * anything like that. __init only.
242 */
243extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
244
245
235/* Initialize the controller. After this has been called, none of the above 246/* Initialize the controller. After this has been called, none of the above
236 * should be called again for this mpic 247 * should be called again for this mpic
237 */ 248 */
238extern void mpic_init(struct mpic *mpic); 249extern void mpic_init(struct mpic *mpic);
239 250
240/* Setup a cascade. Currently, only one cascade is supported this
241 * way, though you can always do a normal request_irq() and add
242 * other cascades this way. You should call this _after_ having
243 * added all the ISUs
244 *
245 * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
246 * @handler: cascade handler function
247 */
248extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
249 void *data);
250
251/* 251/*
252 * All of the following functions must only be used after the 252 * All of the following functions must only be used after the
253 * ISUs have been assigned and the controller fully initialized 253 * ISUs have been assigned and the controller fully initialized
@@ -284,9 +284,9 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
284void smp_mpic_message_pass(int target, int msg); 284void smp_mpic_message_pass(int target, int msg);
285 285
286/* Fetch interrupt from a given mpic */ 286/* Fetch interrupt from a given mpic */
287extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 287extern unsigned int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
288/* This one gets to the primary mpic */ 288/* This one gets to the primary mpic */
289extern int mpic_get_irq(struct pt_regs *regs); 289extern unsigned int mpic_get_irq(struct pt_regs *regs);
290 290
291/* Set the EPIC clock ratio */ 291/* Set the EPIC clock ratio */
292void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio); 292void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
@@ -294,8 +294,5 @@ void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
294/* Enable/Disable EPIC serial interrupt mode */ 294/* Enable/Disable EPIC serial interrupt mode */
295void mpic_set_serial_int(struct mpic *mpic, int enable); 295void mpic_set_serial_int(struct mpic *mpic, int enable);
296 296
297/* global mpic for pSeries */
298extern struct mpic *pSeries_mpic;
299
300#endif /* __KERNEL__ */ 297#endif /* __KERNEL__ */
301#endif /* _ASM_POWERPC_MPIC_H */ 298#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index faa1fc703053..2f2e3024fa61 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -14,6 +14,7 @@
14 14
15#define __per_cpu_offset(cpu) (paca[cpu].data_offset) 15#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
16#define __my_cpu_offset() get_paca()->data_offset 16#define __my_cpu_offset() get_paca()->data_offset
17#define per_cpu_offset(x) (__per_cpu_offset(x))
17 18
18/* Separate out the type, so (int[3], foo) works. */ 19/* Separate out the type, so (int[3], foo) works. */
19#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 010d186d095b..b095a285c84b 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -64,11 +64,6 @@ struct boot_param_header
64typedef u32 phandle; 64typedef u32 phandle;
65typedef u32 ihandle; 65typedef u32 ihandle;
66 66
67struct interrupt_info {
68 int line;
69 int sense; /* +ve/-ve logic, edge or level, etc. */
70};
71
72struct property { 67struct property {
73 char *name; 68 char *name;
74 int length; 69 int length;
@@ -81,8 +76,6 @@ struct device_node {
81 char *type; 76 char *type;
82 phandle node; 77 phandle node;
83 phandle linux_phandle; 78 phandle linux_phandle;
84 int n_intrs;
85 struct interrupt_info *intrs;
86 char *full_name; 79 char *full_name;
87 80
88 struct property *properties; 81 struct property *properties;
@@ -167,8 +160,8 @@ extern void unflatten_device_tree(void);
167extern void early_init_devtree(void *); 160extern void early_init_devtree(void *);
168extern int device_is_compatible(struct device_node *device, const char *); 161extern int device_is_compatible(struct device_node *device, const char *);
169extern int machine_is_compatible(const char *compat); 162extern int machine_is_compatible(const char *compat);
170extern unsigned char *get_property(struct device_node *node, const char *name, 163extern void *get_property(struct device_node *node, const char *name,
171 int *lenp); 164 int *lenp);
172extern void print_properties(struct device_node *node); 165extern void print_properties(struct device_node *node);
173extern int prom_n_addr_cells(struct device_node* np); 166extern int prom_n_addr_cells(struct device_node* np);
174extern int prom_n_size_cells(struct device_node* np); 167extern int prom_n_size_cells(struct device_node* np);
@@ -204,6 +197,15 @@ extern int release_OF_resource(struct device_node* node, int index);
204 */ 197 */
205 198
206 199
200/* Helper to read a big number */
201static inline u64 of_read_number(u32 *cell, int size)
202{
203 u64 r = 0;
204 while (size--)
205 r = (r << 32) | *(cell++);
206 return r;
207}
208
207/* Translate an OF address block into a CPU physical address 209/* Translate an OF address block into a CPU physical address
208 */ 210 */
209#define OF_BAD_ADDR ((u64)-1) 211#define OF_BAD_ADDR ((u64)-1)
@@ -240,5 +242,83 @@ extern void kdump_move_device_tree(void);
240/* CPU OF node matching */ 242/* CPU OF node matching */
241struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 243struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
242 244
245
246/*
247 * OF interrupt mapping
248 */
249
250/* This structure is returned when an interrupt is mapped. The controller
251 * field needs to be put() after use
252 */
253
254#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
255
256struct of_irq {
257 struct device_node *controller; /* Interrupt controller node */
258 u32 size; /* Specifier size */
259 u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
260};
261
262/***
263 * of_irq_map_init - Initialize the irq remapper
264 * @flags: flags defining workarounds to enable
265 *
266 * Some machines have bugs in the device-tree which require certain workarounds
267 * to be applied. Call this before any interrupt mapping attempts to enable
268 * those workarounds.
269 */
270#define OF_IMAP_OLDWORLD_MAC 0x00000001
271#define OF_IMAP_NO_PHANDLE 0x00000002
272
273extern void of_irq_map_init(unsigned int flags);
274
275/***
276 * of_irq_map_raw - Low level interrupt tree parsing
277 * @parent: the device interrupt parent
278 * @intspec: interrupt specifier ("interrupts" property of the device)
279 * @addr: address specifier (start of "reg" property of the device)
280 * @out_irq: structure of_irq filled by this function
281 *
282 * Returns 0 on success and a negative number on error
283 *
284 * This function is a low-level interrupt tree walking function. It
285 * can be used to do a partial walk with synthetized reg and interrupts
286 * properties, for example when resolving PCI interrupts when no device
287 * node exist for the parent.
288 *
289 */
290
291extern int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 *addr,
292 struct of_irq *out_irq);
293
294
295/***
296 * of_irq_map_one - Resolve an interrupt for a device
297 * @device: the device whose interrupt is to be resolved
298 * @index: index of the interrupt to resolve
299 * @out_irq: structure of_irq filled by this function
300 *
301 * This function resolves an interrupt, walking the tree, for a given
302 * device-tree node. It's the high level pendant to of_irq_map_raw().
303 * It also implements the workarounds for OldWolrd Macs.
304 */
305extern int of_irq_map_one(struct device_node *device, int index,
306 struct of_irq *out_irq);
307
308/***
309 * of_irq_map_pci - Resolve the interrupt for a PCI device
310 * @pdev: the device whose interrupt is to be resolved
311 * @out_irq: structure of_irq filled by this function
312 *
313 * This function resolves the PCI interrupt for a given PCI device. If a
314 * device-node exists for a given pci_dev, it will use normal OF tree
315 * walking. If not, it will implement standard swizzling and walk up the
316 * PCI tree until an device-node is found, at which point it will finish
317 * resolving using the OF tree walking.
318 */
319struct pci_dev;
320extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
321
322
243#endif /* __KERNEL__ */ 323#endif /* __KERNEL__ */
244#endif /* _POWERPC_PROM_H */ 324#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
index 2c2fe9647595..e929145e1e46 100644
--- a/include/asm-powerpc/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -28,24 +28,11 @@ struct rw_semaphore {
28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29 spinlock_t wait_lock; 29 spinlock_t wait_lock;
30 struct list_head wait_list; 30 struct list_head wait_list;
31#if RWSEM_DEBUG
32 int debug;
33#endif
34}; 31};
35 32
36/*
37 * initialisation
38 */
39#if RWSEM_DEBUG
40#define __RWSEM_DEBUG_INIT , 0
41#else
42#define __RWSEM_DEBUG_INIT /* */
43#endif
44
45#define __RWSEM_INITIALIZER(name) \ 33#define __RWSEM_INITIALIZER(name) \
46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 34 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
47 LIST_HEAD_INIT((name).wait_list) \ 35 LIST_HEAD_INIT((name).wait_list) }
48 __RWSEM_DEBUG_INIT }
49 36
50#define DECLARE_RWSEM(name) \ 37#define DECLARE_RWSEM(name) \
51 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 38 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -60,9 +47,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
60 sem->count = RWSEM_UNLOCKED_VALUE; 47 sem->count = RWSEM_UNLOCKED_VALUE;
61 spin_lock_init(&sem->wait_lock); 48 spin_lock_init(&sem->wait_lock);
62 INIT_LIST_HEAD(&sem->wait_list); 49 INIT_LIST_HEAD(&sem->wait_list);
63#if RWSEM_DEBUG
64 sem->debug = 0;
65#endif
66} 50}
67 51
68/* 52/*
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 9609d3ee8798..c02d105d8294 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -117,6 +117,7 @@ struct spu {
117 struct list_head sched_list; 117 struct list_head sched_list;
118 int number; 118 int number;
119 int nid; 119 int nid;
120 unsigned int irqs[3];
120 u32 isrc; 121 u32 isrc;
121 u32 node; 122 u32 node;
122 u64 flags; 123 u64 flags;
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
new file mode 100644
index 000000000000..ed8955f49e47
--- /dev/null
+++ b/include/asm-s390/Kbuild
@@ -0,0 +1,4 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += cmb.h debug.h
4header-y += dasd.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h
diff --git a/include/asm-s390/irqflags.h b/include/asm-s390/irqflags.h
new file mode 100644
index 000000000000..65f4db627e7a
--- /dev/null
+++ b/include/asm-s390/irqflags.h
@@ -0,0 +1,50 @@
1/*
2 * include/asm-s390/irqflags.h
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef __ASM_IRQFLAGS_H
9#define __ASM_IRQFLAGS_H
10
11#ifdef __KERNEL__
12
13/* interrupt control.. */
14#define raw_local_irq_enable() ({ \
15 unsigned long __dummy; \
16 __asm__ __volatile__ ( \
17 "stosm 0(%1),0x03" \
18 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
19 })
20
21#define raw_local_irq_disable() ({ \
22 unsigned long __flags; \
23 __asm__ __volatile__ ( \
24 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
25 __flags; \
26 })
27
28#define raw_local_save_flags(x) \
29 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
30
31#define raw_local_irq_restore(x) \
32 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
33
34#define raw_irqs_disabled() \
35({ \
36 unsigned long flags; \
37 local_save_flags(flags); \
38 !((flags >> __FLAG_SHIFT) & 3); \
39})
40
41static inline int raw_irqs_disabled_flags(unsigned long flags)
42{
43 return !((flags >> __FLAG_SHIFT) & 3);
44}
45
46/* For spinlocks etc */
47#define raw_local_irq_save(x) ((x) = raw_local_irq_disable())
48
49#endif /* __KERNEL__ */
50#endif /* __ASM_IRQFLAGS_H */
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index d9a8cca9b653..28b3517e787c 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -42,6 +42,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
43#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 43#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
44#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 44#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
45#define per_cpu_offset(x) (__per_cpu_offset[x])
45 46
46/* A macro to avoid #include hell... */ 47/* A macro to avoid #include hell... */
47#define percpu_modcopy(pcpudst, src, size) \ 48#define percpu_modcopy(pcpudst, src, size) \
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 0422a085dd56..13ec16965150 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -61,6 +61,9 @@ struct rw_semaphore {
61 signed long count; 61 signed long count;
62 spinlock_t wait_lock; 62 spinlock_t wait_lock;
63 struct list_head wait_list; 63 struct list_head wait_list;
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
64}; 67};
65 68
66#ifndef __s390x__ 69#ifndef __s390x__
@@ -80,8 +83,16 @@ struct rw_semaphore {
80/* 83/*
81 * initialisation 84 * initialisation
82 */ 85 */
86
87#ifdef CONFIG_DEBUG_LOCK_ALLOC
88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89#else
90# define __RWSEM_DEP_MAP_INIT(lockname)
91#endif
92
83#define __RWSEM_INITIALIZER(name) \ 93#define __RWSEM_INITIALIZER(name) \
84{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } 94{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
95 __RWSEM_DEP_MAP_INIT(name) }
85 96
86#define DECLARE_RWSEM(name) \ 97#define DECLARE_RWSEM(name) \
87 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -93,6 +104,17 @@ static inline void init_rwsem(struct rw_semaphore *sem)
93 INIT_LIST_HEAD(&sem->wait_list); 104 INIT_LIST_HEAD(&sem->wait_list);
94} 105}
95 106
107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108 struct lock_class_key *key);
109
110#define init_rwsem(sem) \
111do { \
112 static struct lock_class_key __key; \
113 \
114 __init_rwsem((sem), #sem, &__key); \
115} while (0)
116
117
96/* 118/*
97 * lock for reading 119 * lock for reading
98 */ 120 */
@@ -155,7 +177,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
155/* 177/*
156 * lock for writing 178 * lock for writing
157 */ 179 */
158static inline void __down_write(struct rw_semaphore *sem) 180static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
159{ 181{
160 signed long old, new, tmp; 182 signed long old, new, tmp;
161 183
@@ -181,6 +203,11 @@ static inline void __down_write(struct rw_semaphore *sem)
181 rwsem_down_write_failed(sem); 203 rwsem_down_write_failed(sem);
182} 204}
183 205
206static inline void __down_write(struct rw_semaphore *sem)
207{
208 __down_write_nested(sem, 0);
209}
210
184/* 211/*
185 * trylock for writing -- returns 1 if successful, 0 if contention 212 * trylock for writing -- returns 1 if successful, 0 if contention
186 */ 213 */
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 702cf436698c..32cdc69f39f4 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -37,7 +37,8 @@ struct semaphore {
37 37
38static inline void sema_init (struct semaphore *sem, int val) 38static inline void sema_init (struct semaphore *sem, int val)
39{ 39{
40 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val); 40 atomic_set(&sem->count, val);
41 init_waitqueue_head(&sem->wait);
41} 42}
42 43
43static inline void init_MUTEX (struct semaphore *sem) 44static inline void init_MUTEX (struct semaphore *sem)
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 71a0732cd518..9ab186ffde23 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -301,34 +301,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
301#define set_mb(var, value) do { var = value; mb(); } while (0) 301#define set_mb(var, value) do { var = value; mb(); } while (0)
302#define set_wmb(var, value) do { var = value; wmb(); } while (0) 302#define set_wmb(var, value) do { var = value; wmb(); } while (0)
303 303
304/* interrupt control.. */
305#define local_irq_enable() ({ \
306 unsigned long __dummy; \
307 __asm__ __volatile__ ( \
308 "stosm 0(%1),0x03" \
309 : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
310 })
311
312#define local_irq_disable() ({ \
313 unsigned long __flags; \
314 __asm__ __volatile__ ( \
315 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
316 __flags; \
317 })
318
319#define local_save_flags(x) \
320 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) )
321
322#define local_irq_restore(x) \
323 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory")
324
325#define irqs_disabled() \
326({ \
327 unsigned long flags; \
328 local_save_flags(flags); \
329 !((flags >> __FLAG_SHIFT) & 3); \
330})
331
332#ifdef __s390x__ 304#ifdef __s390x__
333 305
334#define __ctl_load(array, low, high) ({ \ 306#define __ctl_load(array, low, high) ({ \
@@ -442,8 +414,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
442 }) 414 })
443#endif /* __s390x__ */ 415#endif /* __s390x__ */
444 416
445/* For spinlocks etc */ 417#include <linux/irqflags.h>
446#define local_irq_save(x) ((x) = local_irq_disable())
447 418
448/* 419/*
449 * Use to set psw mask except for the first byte which 420 * Use to set psw mask except for the first byte which
@@ -482,4 +453,3 @@ extern void (*_machine_power_off)(void);
482#endif /* __KERNEL__ */ 453#endif /* __KERNEL__ */
483 454
484#endif 455#endif
485
diff --git a/include/asm-sh/Kbuild b/include/asm-sh/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-sh/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h
index 0262d3d1e5e0..9d2aea5e8488 100644
--- a/include/asm-sh/rwsem.h
+++ b/include/asm-sh/rwsem.h
@@ -25,24 +25,11 @@ struct rw_semaphore {
25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 25#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
26 spinlock_t wait_lock; 26 spinlock_t wait_lock;
27 struct list_head wait_list; 27 struct list_head wait_list;
28#if RWSEM_DEBUG
29 int debug;
30#endif
31}; 28};
32 29
33/*
34 * initialisation
35 */
36#if RWSEM_DEBUG
37#define __RWSEM_DEBUG_INIT , 0
38#else
39#define __RWSEM_DEBUG_INIT /* */
40#endif
41
42#define __RWSEM_INITIALIZER(name) \ 30#define __RWSEM_INITIALIZER(name) \
43 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 31 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
44 LIST_HEAD_INIT((name).wait_list) \ 32 LIST_HEAD_INIT((name).wait_list) }
45 __RWSEM_DEBUG_INIT }
46 33
47#define DECLARE_RWSEM(name) \ 34#define DECLARE_RWSEM(name) \
48 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 35 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -57,9 +44,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
57 sem->count = RWSEM_UNLOCKED_VALUE; 44 sem->count = RWSEM_UNLOCKED_VALUE;
58 spin_lock_init(&sem->wait_lock); 45 spin_lock_init(&sem->wait_lock);
59 INIT_LIST_HEAD(&sem->wait_list); 46 INIT_LIST_HEAD(&sem->wait_list);
60#if RWSEM_DEBUG
61 sem->debug = 0;
62#endif
63} 47}
64 48
65/* 49/*
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index b752e5cbb830..ce2e60664a86 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -12,7 +12,7 @@
12 */ 12 */
13 13
14#define switch_to(prev, next, last) do { \ 14#define switch_to(prev, next, last) do { \
15 task_t *__last; \ 15 struct task_struct *__last; \
16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
diff --git a/include/asm-sh64/Kbuild b/include/asm-sh64/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-sh64/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-sparc/Kbuild b/include/asm-sparc/Kbuild
new file mode 100644
index 000000000000..e2a57fd7abfa
--- /dev/null
+++ b/include/asm-sparc/Kbuild
@@ -0,0 +1,6 @@
1include include/asm-generic/Kbuild.asm
2
3unifdef-y += fbio.h perfctr.h psr.h
4header-y += apc.h asi.h auxio.h bpp.h head.h ipc.h jsflash.h \
5 openpromio.h pbm.h pconf.h pgtsun4.h reg.h traps.h \
6 turbosparc.h vfc_ioctls.h winmacro.h
diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild
new file mode 100644
index 000000000000..c78d44bb195f
--- /dev/null
+++ b/include/asm-sparc64/Kbuild
@@ -0,0 +1,10 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := sparc
4ARCHDEF := defined __sparc__ && defined __arch64__
5ALTARCHDEF := defined __sparc__ && !defined __arch64__
6
7unifdef-y := fbio.h perfctr.h
8header-y += apb.h asi.h bbc.h bpp.h display7seg.h envctrl.h floppy.h \
9 ipc.h kdebug.h mostek.h openprom.h openpromio.h parport.h \
10 pconf.h psrcompat.h pstate.h reg.h uctx.h utrap.h watchdog.h
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index a6ece06b83db..ced8cbde046d 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -11,6 +11,7 @@ extern unsigned long __per_cpu_base;
11extern unsigned long __per_cpu_shift; 11extern unsigned long __per_cpu_shift;
12#define __per_cpu_offset(__cpu) \ 12#define __per_cpu_offset(__cpu) \
13 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 13 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
14#define per_cpu_offset(x) (__per_cpu_offset(x))
14 15
15/* Separate out the type, so (int[3], foo) works. */ 16/* Separate out the type, so (int[3], foo) works. */
16#define DEFINE_PER_CPU(type, name) \ 17#define DEFINE_PER_CPU(type, name) \
diff --git a/include/asm-um/Kbuild b/include/asm-um/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-um/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-v850/Kbuild b/include/asm-v850/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-v850/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
new file mode 100644
index 000000000000..dc4d101e8a16
--- /dev/null
+++ b/include/asm-x86_64/Kbuild
@@ -0,0 +1,11 @@
1include include/asm-generic/Kbuild.asm
2
3ALTARCH := i386
4ARCHDEF := defined __x86_64__
5ALTARCHDEF := defined __i386__
6
7header-y += boot.h bootsetup.h cpufeature.h debugreg.h ldt.h \
8 msr.h prctl.h setup.h sigcontext32.h ucontext.h \
9 vsyscall32.h
10
11unifdef-y += mce.h mtrr.h vsyscall.h
diff --git a/include/asm-x86_64/irqflags.h b/include/asm-x86_64/irqflags.h
new file mode 100644
index 000000000000..cce6937e87c0
--- /dev/null
+++ b/include/asm-x86_64/irqflags.h
@@ -0,0 +1,141 @@
1/*
2 * include/asm-x86_64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14/*
15 * Interrupt control:
16 */
17
18static inline unsigned long __raw_local_save_flags(void)
19{
20 unsigned long flags;
21
22 __asm__ __volatile__(
23 "# __raw_save_flags\n\t"
24 "pushfq ; popq %q0"
25 : "=g" (flags)
26 : /* no input */
27 : "memory"
28 );
29
30 return flags;
31}
32
33#define raw_local_save_flags(flags) \
34 do { (flags) = __raw_local_save_flags(); } while (0)
35
36static inline void raw_local_irq_restore(unsigned long flags)
37{
38 __asm__ __volatile__(
39 "pushq %0 ; popfq"
40 : /* no output */
41 :"g" (flags)
42 :"memory", "cc"
43 );
44}
45
46#ifdef CONFIG_X86_VSMP
47
48/*
49 * Interrupt control for the VSMP architecture:
50 */
51
52static inline void raw_local_irq_disable(void)
53{
54 unsigned long flags = __raw_local_save_flags();
55
56 raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
57}
58
59static inline void raw_local_irq_enable(void)
60{
61 unsigned long flags = __raw_local_save_flags();
62
63 raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
64}
65
66static inline int raw_irqs_disabled_flags(unsigned long flags)
67{
68 return !(flags & (1<<9)) || (flags & (1 << 18));
69}
70
71#else /* CONFIG_X86_VSMP */
72
73static inline void raw_local_irq_disable(void)
74{
75 __asm__ __volatile__("cli" : : : "memory");
76}
77
78static inline void raw_local_irq_enable(void)
79{
80 __asm__ __volatile__("sti" : : : "memory");
81}
82
83static inline int raw_irqs_disabled_flags(unsigned long flags)
84{
85 return !(flags & (1 << 9));
86}
87
88#endif
89
90/*
91 * For spinlocks, etc.:
92 */
93
94static inline unsigned long __raw_local_irq_save(void)
95{
96 unsigned long flags = __raw_local_save_flags();
97
98 raw_local_irq_disable();
99
100 return flags;
101}
102
103#define raw_local_irq_save(flags) \
104 do { (flags) = __raw_local_irq_save(); } while (0)
105
106static inline int raw_irqs_disabled(void)
107{
108 unsigned long flags = __raw_local_save_flags();
109
110 return raw_irqs_disabled_flags(flags);
111}
112
113/*
114 * Used in the idle loop; sti takes one instruction cycle
115 * to complete:
116 */
117static inline void raw_safe_halt(void)
118{
119 __asm__ __volatile__("sti; hlt" : : : "memory");
120}
121
122/*
123 * Used when interrupts are already enabled or to
124 * shutdown the processor:
125 */
126static inline void halt(void)
127{
128 __asm__ __volatile__("hlt": : :"memory");
129}
130
131#else /* __ASSEMBLY__: */
132# ifdef CONFIG_TRACE_IRQFLAGS
133# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
134# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
135# else
136# define TRACE_IRQS_ON
137# define TRACE_IRQS_OFF
138# endif
139#endif
140
141#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index cd52c7f33bca..2b0c088e2957 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -49,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str,
49 return atomic_notifier_call_chain(&die_chain, val, &args); 49 return atomic_notifier_call_chain(&die_chain, val, &args);
50} 50}
51 51
52extern int printk_address(unsigned long address); 52extern void printk_address(unsigned long address);
53extern void die(const char *,struct pt_regs *,long); 53extern void die(const char *,struct pt_regs *,long);
54extern void __die(const char *,struct pt_regs *,long); 54extern void __die(const char *,struct pt_regs *,long);
55extern void show_registers(struct pt_regs *regs); 55extern void show_registers(struct pt_regs *regs);
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 549eb929b2c0..08dd9f9dda81 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -14,6 +14,8 @@
14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 14#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
15#define __my_cpu_offset() read_pda(data_offset) 15#define __my_cpu_offset() read_pda(data_offset)
16 16
17#define per_cpu_offset(x) (__per_cpu_offset(x))
18
17/* Separate out the type, so (int[3], foo) works. */ 19/* Separate out the type, so (int[3], foo) works. */
18#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 68e559f3631c..f67f2873a922 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -244,43 +244,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
244 244
245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
246 246
247/* interrupt control.. */ 247#include <linux/irqflags.h>
248#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
249#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
250
251#ifdef CONFIG_X86_VSMP
252/* Interrupt control for VSMP architecture */
253#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
254#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
255
256#define irqs_disabled() \
257({ \
258 unsigned long flags; \
259 local_save_flags(flags); \
260 (flags & (1<<18)) || !(flags & (1<<9)); \
261})
262
263/* For spinlocks etc */
264#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
265#else /* CONFIG_X86_VSMP */
266#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
267#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
268
269#define irqs_disabled() \
270({ \
271 unsigned long flags; \
272 local_save_flags(flags); \
273 !(flags & (1<<9)); \
274})
275
276/* For spinlocks etc */
277#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
278#endif
279
280/* used in the idle loop; sti takes one instruction cycle to complete */
281#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
282/* used when interrupts are already enabled or to shutdown the processor */
283#define halt() __asm__ __volatile__("hlt": : :"memory")
284 248
285void cpu_idle_wait(void); 249void cpu_idle_wait(void);
286 250
diff --git a/include/asm-xtensa/Kbuild b/include/asm-xtensa/Kbuild
new file mode 100644
index 000000000000..c68e1680da01
--- /dev/null
+++ b/include/asm-xtensa/Kbuild
@@ -0,0 +1 @@
include include/asm-generic/Kbuild.asm
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index abcd86dc5ab9..0aad3a587551 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -31,24 +31,11 @@ struct rw_semaphore {
31#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 31#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
32 spinlock_t wait_lock; 32 spinlock_t wait_lock;
33 struct list_head wait_list; 33 struct list_head wait_list;
34#if RWSEM_DEBUG
35 int debug;
36#endif
37}; 34};
38 35
39/*
40 * initialisation
41 */
42#if RWSEM_DEBUG
43#define __RWSEM_DEBUG_INIT , 0
44#else
45#define __RWSEM_DEBUG_INIT /* */
46#endif
47
48#define __RWSEM_INITIALIZER(name) \ 36#define __RWSEM_INITIALIZER(name) \
49 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 37 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
50 LIST_HEAD_INIT((name).wait_list) \ 38 LIST_HEAD_INIT((name).wait_list) }
51 __RWSEM_DEBUG_INIT }
52 39
53#define DECLARE_RWSEM(name) \ 40#define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 41 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -63,9 +50,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
63 sem->count = RWSEM_UNLOCKED_VALUE; 50 sem->count = RWSEM_UNLOCKED_VALUE;
64 spin_lock_init(&sem->wait_lock); 51 spin_lock_init(&sem->wait_lock);
65 INIT_LIST_HEAD(&sem->wait_list); 52 INIT_LIST_HEAD(&sem->wait_list);
66#if RWSEM_DEBUG
67 sem->debug = 0;
68#endif
69} 53}
70 54
71/* 55/*
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644
index 000000000000..2b8a7d68fae3
--- /dev/null
+++ b/include/linux/Kbuild
@@ -0,0 +1,63 @@
1header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \
2 netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \
3 netfilter_ipv6/
4
5header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \
6 atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \
7 atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \
8 atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \
9 awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \
10 bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \
11 consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \
12 dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \
13 fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \
14 fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \
15 hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \
16 if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \
17 if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \
18 if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \
19 in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \
20 ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \
21 jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \
22 mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \
23 nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \
24 pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \
25 pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \
26 qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \
27 resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \
28 sound.h stddef.h synclink.h telephony.h termios.h ticable.h \
29 times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \
30 utsname.h video_decoder.h video_encoder.h videotext.h vt.h \
31 wavefront.h wireless.h xattr.h x25.h zorro_ids.h
32
33unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \
34 atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \
35 capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \
36 cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \
37 dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \
38 ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \
39 filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \
40 genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \
41 i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \
42 if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \
43 if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \
44 inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \
45 ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \
46 joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \
47 llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \
48 msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \
49 netfilter_bridge.h netfilter_decnet.h netfilter.h \
50 netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \
51 netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \
52 n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \
53 pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \
54 random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \
55 route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \
56 selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \
57 signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \
58 soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \
59 udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \
60 usbdevice_fs.h user.h videodev2.h videodev.h wait.h \
61 wanrouter.h watchdog.h xfrm.h zftape.h
62
63objhdr-y := version.h
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
new file mode 100644
index 000000000000..84a57d4fb212
--- /dev/null
+++ b/include/linux/byteorder/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y += generic.h swabb.h swab.h
2header-y += big_endian.h little_endian.h pdp_endian.h
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 90663ad217f9..251c41e3ddd5 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -21,6 +21,18 @@ struct completion {
21#define DECLARE_COMPLETION(work) \ 21#define DECLARE_COMPLETION(work) \
22 struct completion work = COMPLETION_INITIALIZER(work) 22 struct completion work = COMPLETION_INITIALIZER(work)
23 23
24/*
25 * Lockdep needs to run a non-constant initializer for on-stack
26 * completions - so we use the _ONSTACK() variant for those that
27 * are on the kernel stack:
28 */
29#ifdef CONFIG_LOCKDEP
30# define DECLARE_COMPLETION_ONSTACK(work) \
31 struct completion work = ({ init_completion(&work); work; })
32#else
33# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
34#endif
35
24static inline void init_completion(struct completion *x) 36static inline void init_completion(struct completion *x)
25{ 37{
26 x->done = 0; 38 x->done = 0;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0dd1610a94a9..471781ffeab1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -114,6 +114,18 @@ struct dentry {
114 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ 114 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
115}; 115};
116 116
117/*
118 * dentry->d_lock spinlock nesting subclasses:
119 *
120 * 0: normal
121 * 1: nested
122 */
123enum dentry_d_lock_class
124{
125 DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
126 DENTRY_D_LOCK_NESTED
127};
128
117struct dentry_operations { 129struct dentry_operations {
118 int (*d_revalidate)(struct dentry *, struct nameidata *); 130 int (*d_revalidate)(struct dentry *, struct nameidata *);
119 int (*d_hash) (struct dentry *, struct qstr *); 131 int (*d_hash) (struct dentry *, struct qstr *);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
new file mode 100644
index 000000000000..6a7047851e48
--- /dev/null
+++ b/include/linux/debug_locks.h
@@ -0,0 +1,69 @@
1#ifndef __LINUX_DEBUG_LOCKING_H
2#define __LINUX_DEBUG_LOCKING_H
3
4extern int debug_locks;
5extern int debug_locks_silent;
6
7/*
8 * Generic 'turn off all lock debugging' function:
9 */
10extern int debug_locks_off(void);
11
12/*
13 * In the debug case we carry the caller's instruction pointer into
14 * other functions, but we dont want the function argument overhead
15 * in the nondebug case - hence these macros:
16 */
17#define _RET_IP_ (unsigned long)__builtin_return_address(0)
18#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
19
20#define DEBUG_LOCKS_WARN_ON(c) \
21({ \
22 int __ret = 0; \
23 \
24 if (unlikely(c)) { \
25 if (debug_locks_off()) \
26 WARN_ON(1); \
27 __ret = 1; \
28 } \
29 __ret; \
30})
31
32#ifdef CONFIG_SMP
33# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
34#else
35# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
36#endif
37
38#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
39 extern void locking_selftest(void);
40#else
41# define locking_selftest() do { } while (0)
42#endif
43
44#ifdef CONFIG_LOCKDEP
45extern void debug_show_all_locks(void);
46extern void debug_show_held_locks(struct task_struct *task);
47extern void debug_check_no_locks_freed(const void *from, unsigned long len);
48extern void debug_check_no_locks_held(struct task_struct *task);
49#else
50static inline void debug_show_all_locks(void)
51{
52}
53
54static inline void debug_show_held_locks(struct task_struct *task)
55{
56}
57
58static inline void
59debug_check_no_locks_freed(const void *from, unsigned long len)
60{
61}
62
63static inline void
64debug_check_no_locks_held(struct task_struct *task)
65{
66}
67#endif
68
69#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 272010a6078a..c94d8f1d62e5 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -44,7 +44,7 @@ enum dma_event {
44}; 44};
45 45
46/** 46/**
47 * typedef dma_cookie_t 47 * typedef dma_cookie_t - an opaque DMA cookie
48 * 48 *
49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
50 */ 50 */
@@ -80,14 +80,14 @@ struct dma_chan_percpu {
80 80
81/** 81/**
82 * struct dma_chan - devices supply DMA channels, clients use them 82 * struct dma_chan - devices supply DMA channels, clients use them
83 * @client: ptr to the client user of this chan, will be NULL when unused 83 * @client: ptr to the client user of this chan, will be %NULL when unused
84 * @device: ptr to the dma device who supplies this channel, always !NULL 84 * @device: ptr to the dma device who supplies this channel, always !%NULL
85 * @cookie: last cookie value returned to client 85 * @cookie: last cookie value returned to client
86 * @chan_id: 86 * @chan_id: channel ID for sysfs
87 * @class_dev: 87 * @class_dev: class device for sysfs
88 * @refcount: kref, used in "bigref" slow-mode 88 * @refcount: kref, used in "bigref" slow-mode
89 * @slow_ref: 89 * @slow_ref: indicates that the DMA channel is free
90 * @rcu: 90 * @rcu: the DMA channel's RCU head
91 * @client_node: used to add this to the client chan list 91 * @client_node: used to add this to the client chan list
92 * @device_node: used to add this to the device chan list 92 * @device_node: used to add this to the device chan list
93 * @local: per-cpu pointer to a struct dma_chan_percpu 93 * @local: per-cpu pointer to a struct dma_chan_percpu
@@ -162,10 +162,17 @@ struct dma_client {
162 * @chancnt: how many DMA channels are supported 162 * @chancnt: how many DMA channels are supported
163 * @channels: the list of struct dma_chan 163 * @channels: the list of struct dma_chan
164 * @global_node: list_head for global dma_device_list 164 * @global_node: list_head for global dma_device_list
165 * @refcount: 165 * @refcount: reference count
166 * @done: 166 * @done: IO completion struct
167 * @dev_id: 167 * @dev_id: unique device ID
168 * Other func ptrs: used to make use of this device's capabilities 168 * @device_alloc_chan_resources: allocate resources and return the
169 * number of allocated descriptors
170 * @device_free_chan_resources: release DMA channel's resources
171 * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
172 * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
173 * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
174 * @device_memcpy_complete: poll the status of an IOAT DMA transaction
175 * @device_memcpy_issue_pending: push appended descriptors to hardware
169 */ 176 */
170struct dma_device { 177struct dma_device {
171 178
@@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client,
211 * Both @dest and @src must be mappable to a bus address according to the 218 * Both @dest and @src must be mappable to a bus address according to the
212 * DMA mapping API rules for streaming mappings. 219 * DMA mapping API rules for streaming mappings.
213 * Both @dest and @src must stay memory resident (kernel memory or locked 220 * Both @dest and @src must stay memory resident (kernel memory or locked
214 * user space pages) 221 * user space pages).
215 */ 222 */
216static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 223static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
217 void *dest, void *src, size_t len) 224 void *dest, void *src, size_t len)
@@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
225} 232}
226 233
227/** 234/**
228 * dma_async_memcpy_buf_to_pg - offloaded copy 235 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
229 * @chan: DMA channel to offload copy to 236 * @chan: DMA channel to offload copy to
230 * @page: destination page 237 * @page: destination page
231 * @offset: offset in page to copy to 238 * @offset: offset in page to copy to
@@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
250} 257}
251 258
252/** 259/**
253 * dma_async_memcpy_buf_to_pg - offloaded copy 260 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
254 * @chan: DMA channel to offload copy to 261 * @chan: DMA channel to offload copy to
255 * @dest_page: destination page 262 * @dest_pg: destination page
256 * @dest_off: offset in page to copy to 263 * @dest_off: offset in page to copy to
257 * @src_page: source page 264 * @src_pg: source page
258 * @src_off: offset in page to copy from 265 * @src_off: offset in page to copy from
259 * @len: length 266 * @len: length
260 * 267 *
261 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 268 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
262 * address according to the DMA mapping API rules for streaming mappings. 269 * address according to the DMA mapping API rules for streaming mappings.
263 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 270 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
264 * (kernel memory or locked user space pages) 271 * (kernel memory or locked user space pages).
265 */ 272 */
266static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, 273static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
267 struct page *dest_pg, unsigned int dest_off, struct page *src_pg, 274 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
@@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
278 285
279/** 286/**
280 * dma_async_memcpy_issue_pending - flush pending copies to HW 287 * dma_async_memcpy_issue_pending - flush pending copies to HW
281 * @chan: 288 * @chan: target DMA channel
282 * 289 *
283 * This allows drivers to push copies to HW in batches, 290 * This allows drivers to push copies to HW in batches,
284 * reducing MMIO writes where possible. 291 * reducing MMIO writes where possible.
diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild
new file mode 100644
index 000000000000..63973af72fd5
--- /dev/null
+++ b/include/linux/dvb/Kbuild
@@ -0,0 +1,2 @@
1header-y += ca.h frontend.h net.h osd.h version.h
2unifdef-y := audio.h dmx.h video.h
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0c5e1c5b03db..8f3ab56765a5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -436,6 +436,21 @@ struct block_device {
436}; 436};
437 437
438/* 438/*
439 * bdev->bd_mutex nesting subclasses for the lock validator:
440 *
441 * 0: normal
442 * 1: 'whole'
443 * 2: 'partition'
444 */
445enum bdev_bd_mutex_lock_class
446{
447 BD_MUTEX_NORMAL,
448 BD_MUTEX_WHOLE,
449 BD_MUTEX_PARTITION
450};
451
452
453/*
439 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache 454 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
440 * radix trees 455 * radix trees
441 */ 456 */
@@ -543,6 +558,25 @@ struct inode {
543}; 558};
544 559
545/* 560/*
561 * inode->i_mutex nesting subclasses for the lock validator:
562 *
563 * 0: the object of the current VFS operation
564 * 1: parent
565 * 2: child/target
566 * 3: quota file
567 *
568 * The locking order between these classes is
569 * parent -> child -> normal -> quota
570 */
571enum inode_i_mutex_lock_class
572{
573 I_MUTEX_NORMAL,
574 I_MUTEX_PARENT,
575 I_MUTEX_CHILD,
576 I_MUTEX_QUOTA
577};
578
579/*
546 * NOTE: in a 32bit arch with a preemptable kernel and 580 * NOTE: in a 32bit arch with a preemptable kernel and
547 * an UP compile the i_size_read/write must be atomic 581 * an UP compile the i_size_read/write must be atomic
548 * with respect to the local cpu (unlike with preempt disabled), 582 * with respect to the local cpu (unlike with preempt disabled),
@@ -1276,6 +1310,8 @@ struct file_system_type {
1276 struct module *owner; 1310 struct module *owner;
1277 struct file_system_type * next; 1311 struct file_system_type * next;
1278 struct list_head fs_supers; 1312 struct list_head fs_supers;
1313 struct lock_class_key s_lock_key;
1314 struct lock_class_key s_umount_key;
1279}; 1315};
1280 1316
1281extern int get_sb_bdev(struct file_system_type *fs_type, 1317extern int get_sb_bdev(struct file_system_type *fs_type,
@@ -1407,6 +1443,7 @@ extern void bd_set_size(struct block_device *, loff_t size);
1407extern void bd_forget(struct inode *inode); 1443extern void bd_forget(struct inode *inode);
1408extern void bdput(struct block_device *); 1444extern void bdput(struct block_device *);
1409extern struct block_device *open_by_devnum(dev_t, unsigned); 1445extern struct block_device *open_by_devnum(dev_t, unsigned);
1446extern struct block_device *open_partition_by_devnum(dev_t, unsigned);
1410extern const struct file_operations def_blk_fops; 1447extern const struct file_operations def_blk_fops;
1411extern const struct address_space_operations def_blk_aops; 1448extern const struct address_space_operations def_blk_aops;
1412extern const struct file_operations def_chr_fops; 1449extern const struct file_operations def_chr_fops;
@@ -1417,6 +1454,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
1417extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); 1454extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
1418extern int blkdev_get(struct block_device *, mode_t, unsigned); 1455extern int blkdev_get(struct block_device *, mode_t, unsigned);
1419extern int blkdev_put(struct block_device *); 1456extern int blkdev_put(struct block_device *);
1457extern int blkdev_put_partition(struct block_device *);
1420extern int bd_claim(struct block_device *, void *); 1458extern int bd_claim(struct block_device *, void *);
1421extern void bd_release(struct block_device *); 1459extern void bd_release(struct block_device *);
1422#ifdef CONFIG_SYSFS 1460#ifdef CONFIG_SYSFS
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 114ae583cca9..50d8b5744cf6 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/smp_lock.h> 5#include <linux/smp_lock.h>
6#include <linux/lockdep.h>
6#include <asm/hardirq.h> 7#include <asm/hardirq.h>
7#include <asm/system.h> 8#include <asm/system.h>
8 9
@@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq);
86# define synchronize_irq(irq) barrier() 87# define synchronize_irq(irq) barrier()
87#endif 88#endif
88 89
89#define nmi_enter() irq_enter()
90#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
91
92struct task_struct; 90struct task_struct;
93 91
94#ifndef CONFIG_VIRT_CPU_ACCOUNTING 92#ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
97} 95}
98#endif 96#endif
99 97
98/*
99 * It is safe to do non-atomic ops on ->hardirq_context,
100 * because NMI handlers may not preempt and the ops are
101 * always balanced, so the interrupted value of ->hardirq_context
102 * will always be restored.
103 */
100#define irq_enter() \ 104#define irq_enter() \
101 do { \ 105 do { \
102 account_system_vtime(current); \ 106 account_system_vtime(current); \
103 add_preempt_count(HARDIRQ_OFFSET); \ 107 add_preempt_count(HARDIRQ_OFFSET); \
108 trace_hardirq_enter(); \
109 } while (0)
110
111/*
112 * Exit irq context without processing softirqs:
113 */
114#define __irq_exit() \
115 do { \
116 trace_hardirq_exit(); \
117 account_system_vtime(current); \
118 sub_preempt_count(HARDIRQ_OFFSET); \
104 } while (0) 119 } while (0)
105 120
121/*
122 * Exit irq context and process softirqs if needed:
123 */
106extern void irq_exit(void); 124extern void irq_exit(void);
107 125
126#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
127#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
128
108#endif /* LINUX_HARDIRQ_H */ 129#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
new file mode 100644
index 000000000000..1fb26448faa9
--- /dev/null
+++ b/include/linux/hdlc/Kbuild
@@ -0,0 +1 @@
header-y += ioctl.h
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 07d7305f131e..e4bccbcc2750 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -91,6 +91,7 @@ struct hrtimer_base {
91 ktime_t (*get_softirq_time)(void); 91 ktime_t (*get_softirq_time)(void);
92 struct hrtimer *curr_timer; 92 struct hrtimer *curr_timer;
93 ktime_t softirq_time; 93 ktime_t softirq_time;
94 struct lock_class_key lock_key;
94}; 95};
95 96
96/* 97/*
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 285316c836b5..dc7abef10965 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1359,7 +1359,7 @@ extern struct semaphore ide_cfg_sem;
1359 * ide_drive_t->hwif: constant, no locking 1359 * ide_drive_t->hwif: constant, no locking
1360 */ 1360 */
1361 1361
1362#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0) 1362#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
1363 1363
1364extern struct bus_type ide_bus_type; 1364extern struct bus_type ide_bus_type;
1365 1365
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f559a719dbe8..826803449db7 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -66,7 +66,7 @@ struct idr {
66 .id_free = NULL, \ 66 .id_free = NULL, \
67 .layers = 0, \ 67 .layers = 0, \
68 .id_free_cnt = 0, \ 68 .id_free_cnt = 0, \
69 .lock = SPIN_LOCK_UNLOCKED, \ 69 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
70} 70}
71#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 71#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
72 72
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3a256957fb56..60aac2cea0cf 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/file.h> 4#include <linux/file.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/irqflags.h>
7#include <linux/lockdep.h>
6 8
7#define INIT_FDTABLE \ 9#define INIT_FDTABLE \
8{ \ 10{ \
@@ -21,7 +23,7 @@
21 .count = ATOMIC_INIT(1), \ 23 .count = ATOMIC_INIT(1), \
22 .fdt = &init_files.fdtab, \ 24 .fdt = &init_files.fdtab, \
23 .fdtab = INIT_FDTABLE, \ 25 .fdtab = INIT_FDTABLE, \
24 .file_lock = SPIN_LOCK_UNLOCKED, \ 26 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \
25 .next_fd = 0, \ 27 .next_fd = 0, \
26 .close_on_exec_init = { { 0, } }, \ 28 .close_on_exec_init = { { 0, } }, \
27 .open_fds_init = { { 0, } }, \ 29 .open_fds_init = { { 0, } }, \
@@ -36,7 +38,7 @@
36 .user_id = 0, \ 38 .user_id = 0, \
37 .next = NULL, \ 39 .next = NULL, \
38 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ 40 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
39 .ctx_lock = SPIN_LOCK_UNLOCKED, \ 41 .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
40 .reqs_active = 0U, \ 42 .reqs_active = 0U, \
41 .max_reqs = ~0U, \ 43 .max_reqs = ~0U, \
42} 44}
@@ -48,7 +50,7 @@
48 .mm_users = ATOMIC_INIT(2), \ 50 .mm_users = ATOMIC_INIT(2), \
49 .mm_count = ATOMIC_INIT(1), \ 51 .mm_count = ATOMIC_INIT(1), \
50 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ 52 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
51 .page_table_lock = SPIN_LOCK_UNLOCKED, \ 53 .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
52 .mmlist = LIST_HEAD_INIT(name.mmlist), \ 54 .mmlist = LIST_HEAD_INIT(name.mmlist), \
53 .cpu_vm_mask = CPU_MASK_ALL, \ 55 .cpu_vm_mask = CPU_MASK_ALL, \
54} 56}
@@ -69,7 +71,7 @@
69#define INIT_SIGHAND(sighand) { \ 71#define INIT_SIGHAND(sighand) { \
70 .count = ATOMIC_INIT(1), \ 72 .count = ATOMIC_INIT(1), \
71 .action = { { { .sa_handler = NULL, } }, }, \ 73 .action = { { { .sa_handler = NULL, } }, }, \
72 .siglock = SPIN_LOCK_UNLOCKED, \ 74 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
73} 75}
74 76
75extern struct group_info init_groups; 77extern struct group_info init_groups;
@@ -119,12 +121,13 @@ extern struct group_info init_groups;
119 .list = LIST_HEAD_INIT(tsk.pending.list), \ 121 .list = LIST_HEAD_INIT(tsk.pending.list), \
120 .signal = {{0}}}, \ 122 .signal = {{0}}}, \
121 .blocked = {{0}}, \ 123 .blocked = {{0}}, \
122 .alloc_lock = SPIN_LOCK_UNLOCKED, \ 124 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
123 .journal_info = NULL, \ 125 .journal_info = NULL, \
124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 126 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
125 .fs_excl = ATOMIC_INIT(0), \ 127 .fs_excl = ATOMIC_INIT(0), \
126 .pi_lock = SPIN_LOCK_UNLOCKED, \ 128 .pi_lock = SPIN_LOCK_UNLOCKED, \
127 INIT_RT_MUTEXES(tsk) \ 129 INIT_TRACE_IRQFLAGS \
130 INIT_LOCKDEP \
128} 131}
129 132
130 133
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index da3e0dbe61d4..d5afee95fd43 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -10,6 +10,7 @@
10#include <linux/irqreturn.h> 10#include <linux/irqreturn.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h>
13#include <asm/atomic.h> 14#include <asm/atomic.h>
14#include <asm/ptrace.h> 15#include <asm/ptrace.h>
15#include <asm/system.h> 16#include <asm/system.h>
@@ -80,12 +81,64 @@ extern int request_irq(unsigned int,
80 unsigned long, const char *, void *); 81 unsigned long, const char *, void *);
81extern void free_irq(unsigned int, void *); 82extern void free_irq(unsigned int, void *);
82 83
84/*
85 * On lockdep we dont want to enable hardirqs in hardirq
86 * context. Use local_irq_enable_in_hardirq() to annotate
87 * kernel code that has to do this nevertheless (pretty much
88 * the only valid case is for old/broken hardware that is
89 * insanely slow).
90 *
91 * NOTE: in theory this might break fragile code that relies
92 * on hardirq delivery - in practice we dont seem to have such
93 * places left. So the only effect should be slightly increased
94 * irqs-off latencies.
95 */
96#ifdef CONFIG_LOCKDEP
97# define local_irq_enable_in_hardirq() do { } while (0)
98#else
99# define local_irq_enable_in_hardirq() local_irq_enable()
100#endif
83 101
84#ifdef CONFIG_GENERIC_HARDIRQS 102#ifdef CONFIG_GENERIC_HARDIRQS
85extern void disable_irq_nosync(unsigned int irq); 103extern void disable_irq_nosync(unsigned int irq);
86extern void disable_irq(unsigned int irq); 104extern void disable_irq(unsigned int irq);
87extern void enable_irq(unsigned int irq); 105extern void enable_irq(unsigned int irq);
88 106
107/*
108 * Special lockdep variants of irq disabling/enabling.
109 * These should be used for locking constructs that
110 * know that a particular irq context which is disabled,
111 * and which is the only irq-context user of a lock,
112 * that it's safe to take the lock in the irq-disabled
113 * section without disabling hardirqs.
114 *
115 * On !CONFIG_LOCKDEP they are equivalent to the normal
116 * irq disable/enable methods.
117 */
118static inline void disable_irq_nosync_lockdep(unsigned int irq)
119{
120 disable_irq_nosync(irq);
121#ifdef CONFIG_LOCKDEP
122 local_irq_disable();
123#endif
124}
125
126static inline void disable_irq_lockdep(unsigned int irq)
127{
128 disable_irq(irq);
129#ifdef CONFIG_LOCKDEP
130 local_irq_disable();
131#endif
132}
133
134static inline void enable_irq_lockdep(unsigned int irq)
135{
136#ifdef CONFIG_LOCKDEP
137 local_irq_enable();
138#endif
139 enable_irq(irq);
140}
141
89/* IRQ wakeup (PM) control: */ 142/* IRQ wakeup (PM) control: */
90extern int set_irq_wake(unsigned int irq, unsigned int on); 143extern int set_irq_wake(unsigned int irq, unsigned int on);
91 144
@@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)
99 return set_irq_wake(irq, 0); 152 return set_irq_wake(irq, 0);
100} 153}
101 154
102#endif 155#else /* !CONFIG_GENERIC_HARDIRQS */
156/*
157 * NOTE: non-genirq architectures, if they want to support the lock
158 * validator need to define the methods below in their asm/irq.h
159 * files, under an #ifdef CONFIG_LOCKDEP section.
160 */
161# ifndef CONFIG_LOCKDEP
162# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
163# define disable_irq_lockdep(irq) disable_irq(irq)
164# define enable_irq_lockdep(irq) enable_irq(irq)
165# endif
166
167#endif /* CONFIG_GENERIC_HARDIRQS */
103 168
104#ifndef __ARCH_SET_SOFTIRQ_PENDING 169#ifndef __ARCH_SET_SOFTIRQ_PENDING
105#define set_softirq_pending(x) (local_softirq_pending() = (x)) 170#define set_softirq_pending(x) (local_softirq_pending() = (x))
@@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
135#define save_and_cli(x) save_and_cli(&x) 200#define save_and_cli(x) save_and_cli(&x)
136#endif /* CONFIG_SMP */ 201#endif /* CONFIG_SMP */
137 202
138/* SoftIRQ primitives. */ 203extern void local_bh_disable(void);
139#define local_bh_disable() \ 204extern void __local_bh_enable(void);
140 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) 205extern void _local_bh_enable(void);
141#define __local_bh_enable() \
142 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
143
144extern void local_bh_enable(void); 206extern void local_bh_enable(void);
207extern void local_bh_enable_ip(unsigned long ip);
145 208
146/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 209/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
147 frequency threaded job scheduling. For almost all the purposes 210 frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 87a9fc039b47..5612dfeeae50 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -55,6 +55,7 @@ struct resource_list {
55#define IORESOURCE_IRQ_LOWEDGE (1<<1) 55#define IORESOURCE_IRQ_LOWEDGE (1<<1)
56#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) 56#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
57#define IORESOURCE_IRQ_LOWLEVEL (1<<3) 57#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
58#define IORESOURCE_IRQ_SHAREABLE (1<<4)
58 59
59/* ISA PnP DMA specific bits (IORESOURCE_BITS) */ 60/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
60#define IORESOURCE_DMA_TYPE_MASK (3<<0) 61#define IORESOURCE_DMA_TYPE_MASK (3<<0)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 000000000000..412e025bc5c7
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,96 @@
1/*
2 * include/linux/irqflags.h
3 *
4 * IRQ flags tracing: follow the state of the hardirq and softirq flags and
5 * provide callbacks for transitions between ON and OFF states.
6 *
7 * This file gets included from lowlevel asm headers too, to provide
8 * wrapped versions of the local_irq_*() APIs, based on the
9 * raw_local_irq_*() macros from the lowlevel headers.
10 */
11#ifndef _LINUX_TRACE_IRQFLAGS_H
12#define _LINUX_TRACE_IRQFLAGS_H
13
14#ifdef CONFIG_TRACE_IRQFLAGS
15 extern void trace_hardirqs_on(void);
16 extern void trace_hardirqs_off(void);
17 extern void trace_softirqs_on(unsigned long ip);
18 extern void trace_softirqs_off(unsigned long ip);
19# define trace_hardirq_context(p) ((p)->hardirq_context)
20# define trace_softirq_context(p) ((p)->softirq_context)
21# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
22# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
23# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
24# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
25# define trace_softirq_enter() do { current->softirq_context++; } while (0)
26# define trace_softirq_exit() do { current->softirq_context--; } while (0)
27# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
28#else
29# define trace_hardirqs_on() do { } while (0)
30# define trace_hardirqs_off() do { } while (0)
31# define trace_softirqs_on(ip) do { } while (0)
32# define trace_softirqs_off(ip) do { } while (0)
33# define trace_hardirq_context(p) 0
34# define trace_softirq_context(p) 0
35# define trace_hardirqs_enabled(p) 0
36# define trace_softirqs_enabled(p) 0
37# define trace_hardirq_enter() do { } while (0)
38# define trace_hardirq_exit() do { } while (0)
39# define trace_softirq_enter() do { } while (0)
40# define trace_softirq_exit() do { } while (0)
41# define INIT_TRACE_IRQFLAGS
42#endif
43
44#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
45
46#include <asm/irqflags.h>
47
48#define local_irq_enable() \
49 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
50#define local_irq_disable() \
51 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
52#define local_irq_save(flags) \
53 do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
54
55#define local_irq_restore(flags) \
56 do { \
57 if (raw_irqs_disabled_flags(flags)) { \
58 raw_local_irq_restore(flags); \
59 trace_hardirqs_off(); \
60 } else { \
61 trace_hardirqs_on(); \
62 raw_local_irq_restore(flags); \
63 } \
64 } while (0)
65#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
66/*
67 * The local_irq_*() APIs are equal to the raw_local_irq*()
68 * if !TRACE_IRQFLAGS.
69 */
70# define raw_local_irq_disable() local_irq_disable()
71# define raw_local_irq_enable() local_irq_enable()
72# define raw_local_irq_save(flags) local_irq_save(flags)
73# define raw_local_irq_restore(flags) local_irq_restore(flags)
74#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
75
76#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
77#define safe_halt() \
78 do { \
79 trace_hardirqs_on(); \
80 raw_safe_halt(); \
81 } while (0)
82
83#define local_save_flags(flags) raw_local_save_flags(flags)
84
85#define irqs_disabled() \
86({ \
87 unsigned long flags; \
88 \
89 raw_local_save_flags(flags); \
90 raw_irqs_disabled_flags(flags); \
91})
92
93#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
94#endif /* CONFIG_X86 */
95
96#endif
diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild
new file mode 100644
index 000000000000..991cdb29ab2e
--- /dev/null
+++ b/include/linux/isdn/Kbuild
@@ -0,0 +1 @@
header-y += capicmd.h
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 54e2549f96ba..849043ce4ed6 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -57,10 +57,25 @@ do { \
57#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr) 57#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
58#endif 58#endif
59 59
60#define print_symbol(fmt, addr) \ 60static inline void print_symbol(const char *fmt, unsigned long addr)
61do { \ 61{
62 __check_printsym_format(fmt, ""); \ 62 __check_printsym_format(fmt, "");
63 __print_symbol(fmt, addr); \ 63 __print_symbol(fmt, (unsigned long)
64 __builtin_extract_return_addr((void *)addr));
65}
66
67#ifndef CONFIG_64BIT
68#define print_ip_sym(ip) \
69do { \
70 printk("[<%08lx>]", ip); \
71 print_symbol(" %s\n", ip); \
64} while(0) 72} while(0)
73#else
74#define print_ip_sym(ip) \
75do { \
76 printk("[<%016lx>]", ip); \
77 print_symbol(" %s\n", ip); \
78} while(0)
79#endif
65 80
66#endif /*_LINUX_KALLSYMS_H*/ 81#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 000000000000..316e0fb8d7b1
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,353 @@
1/*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * see Documentation/lockdep-design.txt for more details.
7 */
8#ifndef __LINUX_LOCKDEP_H
9#define __LINUX_LOCKDEP_H
10
11#include <linux/linkage.h>
12#include <linux/list.h>
13#include <linux/debug_locks.h>
14#include <linux/stacktrace.h>
15
16#ifdef CONFIG_LOCKDEP
17
18/*
19 * Lock-class usage-state bits:
20 */
21enum lock_usage_bit
22{
23 LOCK_USED = 0,
24 LOCK_USED_IN_HARDIRQ,
25 LOCK_USED_IN_SOFTIRQ,
26 LOCK_ENABLED_SOFTIRQS,
27 LOCK_ENABLED_HARDIRQS,
28 LOCK_USED_IN_HARDIRQ_READ,
29 LOCK_USED_IN_SOFTIRQ_READ,
30 LOCK_ENABLED_SOFTIRQS_READ,
31 LOCK_ENABLED_HARDIRQS_READ,
32 LOCK_USAGE_STATES
33};
34
35/*
36 * Usage-state bitmasks:
37 */
38#define LOCKF_USED (1 << LOCK_USED)
39#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
40#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
41#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
42#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
43
44#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
45#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
46
47#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
48#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
49#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
50#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
51
52#define LOCKF_ENABLED_IRQS_READ \
53 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
54#define LOCKF_USED_IN_IRQ_READ \
55 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
56
57#define MAX_LOCKDEP_SUBCLASSES 8UL
58
59/*
60 * Lock-classes are keyed via unique addresses, by embedding the
61 * lockclass-key into the kernel (or module) .data section. (For
62 * static locks we use the lock address itself as the key.)
63 */
64struct lockdep_subclass_key {
65 char __one_byte;
66} __attribute__ ((__packed__));
67
68struct lock_class_key {
69 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
70};
71
72/*
73 * The lock-class itself:
74 */
75struct lock_class {
76 /*
77 * class-hash:
78 */
79 struct list_head hash_entry;
80
81 /*
82 * global list of all lock-classes:
83 */
84 struct list_head lock_entry;
85
86 struct lockdep_subclass_key *key;
87 unsigned int subclass;
88
89 /*
90 * IRQ/softirq usage tracking bits:
91 */
92 unsigned long usage_mask;
93 struct stack_trace usage_traces[LOCK_USAGE_STATES];
94
95 /*
96 * These fields represent a directed graph of lock dependencies,
97 * to every node we attach a list of "forward" and a list of
98 * "backward" graph nodes.
99 */
100 struct list_head locks_after, locks_before;
101
102 /*
103 * Generation counter, when doing certain classes of graph walking,
104 * to ensure that we check one node only once:
105 */
106 unsigned int version;
107
108 /*
109 * Statistics counter:
110 */
111 unsigned long ops;
112
113 const char *name;
114 int name_version;
115};
116
117/*
118 * Map the lock object (the lock instance) to the lock-class object.
119 * This is embedded into specific lock instances:
120 */
121struct lockdep_map {
122 struct lock_class_key *key;
123 struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
124 const char *name;
125};
126
127/*
128 * Every lock has a list of other locks that were taken after it.
129 * We only grow the list, never remove from it:
130 */
131struct lock_list {
132 struct list_head entry;
133 struct lock_class *class;
134 struct stack_trace trace;
135};
136
137/*
138 * We record lock dependency chains, so that we can cache them:
139 */
140struct lock_chain {
141 struct list_head entry;
142 u64 chain_key;
143};
144
145struct held_lock {
146 /*
147 * One-way hash of the dependency chain up to this point. We
148 * hash the hashes step by step as the dependency chain grows.
149 *
150 * We use it for dependency-caching and we skip detection
151 * passes and dependency-updates if there is a cache-hit, so
152 * it is absolutely critical for 100% coverage of the validator
153 * to have a unique key value for every unique dependency path
154 * that can occur in the system, to make a unique hash value
155 * as likely as possible - hence the 64-bit width.
156 *
157 * The task struct holds the current hash value (initialized
158 * with zero), here we store the previous hash value:
159 */
160 u64 prev_chain_key;
161 struct lock_class *class;
162 unsigned long acquire_ip;
163 struct lockdep_map *instance;
164
165 /*
166 * The lock-stack is unified in that the lock chains of interrupt
167 * contexts nest ontop of process context chains, but we 'separate'
168 * the hashes by starting with 0 if we cross into an interrupt
169 * context, and we also keep do not add cross-context lock
170 * dependencies - the lock usage graph walking covers that area
171 * anyway, and we'd just unnecessarily increase the number of
172 * dependencies otherwise. [Note: hardirq and softirq contexts
173 * are separated from each other too.]
174 *
175 * The following field is used to detect when we cross into an
176 * interrupt context:
177 */
178 int irq_context;
179 int trylock;
180 int read;
181 int check;
182 int hardirqs_off;
183};
184
185/*
186 * Initialization, self-test and debugging-output methods:
187 */
188extern void lockdep_init(void);
189extern void lockdep_info(void);
190extern void lockdep_reset(void);
191extern void lockdep_reset_lock(struct lockdep_map *lock);
192extern void lockdep_free_key_range(void *start, unsigned long size);
193
194extern void lockdep_off(void);
195extern void lockdep_on(void);
196extern int lockdep_internal(void);
197
198/*
199 * These methods are used by specific locking variants (spinlocks,
200 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
201 * to lockdep:
202 */
203
204extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
205 struct lock_class_key *key);
206
207/*
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
212 */
213#define lockdep_set_class(lock, key) \
214 lockdep_init_map(&(lock)->dep_map, #key, key)
215#define lockdep_set_class_and_name(lock, key, name) \
216 lockdep_init_map(&(lock)->dep_map, name, key)
217
218/*
219 * Acquire a lock.
220 *
221 * Values for "read":
222 *
223 * 0: exclusive (write) acquire
224 * 1: read-acquire (no recursion allowed)
225 * 2: read-acquire with same-instance recursion allowed
226 *
227 * Values for check:
228 *
229 * 0: disabled
230 * 1: simple checks (freeing, held-at-exit-time, etc.)
231 * 2: full validation
232 */
233extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
234 int trylock, int read, int check, unsigned long ip);
235
236extern void lock_release(struct lockdep_map *lock, int nested,
237 unsigned long ip);
238
239# define INIT_LOCKDEP .lockdep_recursion = 0,
240
241#else /* !LOCKDEP */
242
243static inline void lockdep_off(void)
244{
245}
246
247static inline void lockdep_on(void)
248{
249}
250
251static inline int lockdep_internal(void)
252{
253 return 0;
254}
255
256# define lock_acquire(l, s, t, r, c, i) do { } while (0)
257# define lock_release(l, n, i) do { } while (0)
258# define lockdep_init() do { } while (0)
259# define lockdep_info() do { } while (0)
260# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
261# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
262# define lockdep_set_class_and_name(lock, key, name) \
263 do { (void)(key); } while (0)
264# define INIT_LOCKDEP
265# define lockdep_reset() do { debug_locks = 1; } while (0)
266# define lockdep_free_key_range(start, size) do { } while (0)
267/*
268 * The class key takes no space if lockdep is disabled:
269 */
270struct lock_class_key { };
271#endif /* !LOCKDEP */
272
273#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
274extern void early_init_irq_lock_class(void);
275#else
276# define early_init_irq_lock_class() do { } while (0)
277#endif
278
279#ifdef CONFIG_TRACE_IRQFLAGS
280extern void early_boot_irqs_off(void);
281extern void early_boot_irqs_on(void);
282#else
283# define early_boot_irqs_off() do { } while (0)
284# define early_boot_irqs_on() do { } while (0)
285#endif
286
287/*
288 * For trivial one-depth nesting of a lock-class, the following
289 * global define can be used. (Subsystems with multiple levels
290 * of nesting should define their own lock-nesting subclasses.)
291 */
292#define SINGLE_DEPTH_NESTING 1
293
294/*
295 * Map the dependency ops to NOP or to real lockdep ops, depending
296 * on the per lock-class debug mode:
297 */
298
299#ifdef CONFIG_DEBUG_LOCK_ALLOC
300# ifdef CONFIG_PROVE_LOCKING
301# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
302# else
303# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
304# endif
305# define spin_release(l, n, i) lock_release(l, n, i)
306#else
307# define spin_acquire(l, s, t, i) do { } while (0)
308# define spin_release(l, n, i) do { } while (0)
309#endif
310
311#ifdef CONFIG_DEBUG_LOCK_ALLOC
312# ifdef CONFIG_PROVE_LOCKING
313# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
314# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
315# else
316# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
317# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
318# endif
319# define rwlock_release(l, n, i) lock_release(l, n, i)
320#else
321# define rwlock_acquire(l, s, t, i) do { } while (0)
322# define rwlock_acquire_read(l, s, t, i) do { } while (0)
323# define rwlock_release(l, n, i) do { } while (0)
324#endif
325
326#ifdef CONFIG_DEBUG_LOCK_ALLOC
327# ifdef CONFIG_PROVE_LOCKING
328# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
329# else
330# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
331# endif
332# define mutex_release(l, n, i) lock_release(l, n, i)
333#else
334# define mutex_acquire(l, s, t, i) do { } while (0)
335# define mutex_release(l, n, i) do { } while (0)
336#endif
337
338#ifdef CONFIG_DEBUG_LOCK_ALLOC
339# ifdef CONFIG_PROVE_LOCKING
340# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
341# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
342# else
343# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
344# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
345# endif
346# define rwsem_release(l, n, i) lock_release(l, n, i)
347#else
348# define rwsem_acquire(l, s, t, i) do { } while (0)
349# define rwsem_acquire_read(l, s, t, i) do { } while (0)
350# define rwsem_release(l, n, i) do { } while (0)
351#endif
352
353#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75179529e399..990957e0929f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
14#include <linux/prio_tree.h> 14#include <linux/prio_tree.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/debug_locks.h>
17 18
18struct mempolicy; 19struct mempolicy;
19struct anon_vma; 20struct anon_vma;
@@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
1034} 1035}
1035#endif /* CONFIG_PROC_FS */ 1036#endif /* CONFIG_PROC_FS */
1036 1037
1037static inline void
1038debug_check_no_locks_freed(const void *from, unsigned long len)
1039{
1040 mutex_debug_check_no_locks_freed(from, len);
1041 rt_mutex_debug_check_no_locks_freed(from, len);
1042}
1043
1044#ifndef CONFIG_DEBUG_PAGEALLOC 1038#ifndef CONFIG_DEBUG_PAGEALLOC
1045static inline void 1039static inline void
1046kernel_map_pages(struct page *page, int numpages, int enable) 1040kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 27e748eb72b0..656b588a9f96 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -150,6 +150,10 @@ struct zone {
150 unsigned long lowmem_reserve[MAX_NR_ZONES]; 150 unsigned long lowmem_reserve[MAX_NR_ZONES];
151 151
152#ifdef CONFIG_NUMA 152#ifdef CONFIG_NUMA
153 /*
154 * zone reclaim becomes active if more unmapped pages exist.
155 */
156 unsigned long min_unmapped_ratio;
153 struct per_cpu_pageset *pageset[NR_CPUS]; 157 struct per_cpu_pageset *pageset[NR_CPUS];
154#else 158#else
155 struct per_cpu_pageset pageset[NR_CPUS]; 159 struct per_cpu_pageset pageset[NR_CPUS];
@@ -414,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
414 void __user *, size_t *, loff_t *); 418 void __user *, size_t *, loff_t *);
415int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, 419int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
416 void __user *, size_t *, loff_t *); 420 void __user *, size_t *, loff_t *);
421int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
422 struct file *, void __user *, size_t *, loff_t *);
417 423
418#include <linux/topology.h> 424#include <linux/topology.h>
419/* Returns the number of the current Node. */ 425/* Returns the number of the current Node. */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9e9dc7c24d95..d06c74fb8c26 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -358,6 +358,7 @@ static inline int module_is_live(struct module *mod)
358/* Is this address in a module? (second is with no locks, for oops) */ 358/* Is this address in a module? (second is with no locks, for oops) */
359struct module *module_text_address(unsigned long addr); 359struct module *module_text_address(unsigned long addr);
360struct module *__module_text_address(unsigned long addr); 360struct module *__module_text_address(unsigned long addr);
361int is_module_address(unsigned long addr);
361 362
362/* Returns module and fills in value, defined and namebuf, or NULL if 363/* Returns module and fills in value, defined and namebuf, or NULL if
363 symnum out of range. */ 364 symnum out of range. */
@@ -496,6 +497,11 @@ static inline struct module *__module_text_address(unsigned long addr)
496 return NULL; 497 return NULL;
497} 498}
498 499
500static inline int is_module_address(unsigned long addr)
501{
502 return 0;
503}
504
499/* Get/put a kernel symbol (calls should be symmetric) */ 505/* Get/put a kernel symbol (calls should be symmetric) */
500#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); }) 506#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
501#define symbol_put(x) do { } while(0) 507#define symbol_put(x) do { } while(0)
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 7a7fbe87fef0..1221b7c44158 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,21 +19,21 @@
19 19
20/** 20/**
21 * struct nand_bbt_descr - bad block table descriptor 21 * struct nand_bbt_descr - bad block table descriptor
22 * @param options options for this descriptor 22 * @options: options for this descriptor
23 * @param pages the page(s) where we find the bbt, used with 23 * @pages: the page(s) where we find the bbt, used with
24 * option BBT_ABSPAGE when bbt is searched, 24 * option BBT_ABSPAGE when bbt is searched,
25 * then we store the found bbts pages here. 25 * then we store the found bbts pages here.
26 * Its an array and supports up to 8 chips now 26 * Its an array and supports up to 8 chips now
27 * @param offs offset of the pattern in the oob area of the page 27 * @offs: offset of the pattern in the oob area of the page
28 * @param veroffs offset of the bbt version counter in the oob are of the page 28 * @veroffs: offset of the bbt version counter in the oob area of the page
29 * @param version version read from the bbt page during scan 29 * @version: version read from the bbt page during scan
30 * @param len length of the pattern, if 0 no pattern check is performed 30 * @len: length of the pattern, if 0 no pattern check is performed
31 * @param maxblocks maximum number of blocks to search for a bbt. This number of 31 * @maxblocks: maximum number of blocks to search for a bbt. This
32 * blocks is reserved at the end of the device 32 * number of blocks is reserved at the end of the device
33 * where the tables are written. 33 * where the tables are written.
34 * @param reserved_block_code if non-0, this pattern denotes a reserved 34 * @reserved_block_code: if non-0, this pattern denotes a reserved
35 * (rather than bad) block in the stored bbt 35 * (rather than bad) block in the stored bbt
36 * @param pattern pattern to identify bad block table or factory marked 36 * @pattern: pattern to identify bad block table or factory marked
37 * good / bad blocks, can be NULL, if len = 0 37 * good / bad blocks, can be NULL, if len = 0
38 * 38 *
39 * Descriptor for the bad block table marker and the descriptor for the 39 * Descriptor for the bad block table marker and the descriptor for the
@@ -93,12 +93,15 @@ struct nand_bbt_descr {
93#define ONENAND_BADBLOCK_POS 0 93#define ONENAND_BADBLOCK_POS 0
94 94
95/** 95/**
96 * struct bbt_info - [GENERIC] Bad Block Table data structure 96 * struct bbm_info - [GENERIC] Bad Block Table data structure
97 * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry 97 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
98 * @param badblockpos [INTERN] position of the bad block marker in the oob area 98 * @badblockpos: [INTERN] position of the bad block marker in the oob area
99 * @param bbt [INTERN] bad block table pointer 99 * @options: options for this descriptor
100 * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan 100 * @bbt: [INTERN] bad block table pointer
101 * @param priv [OPTIONAL] pointer to private bbm date 101 * @isbad_bbt: function to determine if a block is bad
102 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
103 * initial bad block scan
104 * @priv: [OPTIONAL] pointer to private bbm date
102 */ 105 */
103struct bbm_info { 106struct bbm_info {
104 int bbt_erase_shift; 107 int bbt_erase_shift;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9b7a2b525d63..94a443d45258 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -77,11 +77,11 @@ typedef enum {
77 * 77 *
78 * @len: number of bytes to write/read. When a data buffer is given 78 * @len: number of bytes to write/read. When a data buffer is given
79 * (datbuf != NULL) this is the number of data bytes. When 79 * (datbuf != NULL) this is the number of data bytes. When
80 + no data buffer is available this is the number of oob bytes. 80 * no data buffer is available this is the number of oob bytes.
81 * 81 *
82 * @retlen: number of bytes written/read. When a data buffer is given 82 * @retlen: number of bytes written/read. When a data buffer is given
83 * (datbuf != NULL) this is the number of data bytes. When 83 * (datbuf != NULL) this is the number of data bytes. When
84 + no data buffer is available this is the number of oob bytes. 84 * no data buffer is available this is the number of oob bytes.
85 * 85 *
86 * @ooblen: number of oob bytes per page 86 * @ooblen: number of oob bytes per page
87 * @ooboffs: offset of oob data in the oob area (only relevant when 87 * @ooboffs: offset of oob data in the oob area (only relevant when
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 66559272ebcb..0b4cd2fa64aa 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -202,7 +202,7 @@ typedef enum {
202struct nand_chip; 202struct nand_chip;
203 203
204/** 204/**
205 * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices 205 * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
206 * @lock: protection lock 206 * @lock: protection lock
207 * @active: the mtd device which holds the controller currently 207 * @active: the mtd device which holds the controller currently
208 * @wq: wait queue to sleep on if a NAND operation is in progress 208 * @wq: wait queue to sleep on if a NAND operation is in progress
@@ -223,12 +223,15 @@ struct nand_hw_control {
223 * @total: total number of ecc bytes per page 223 * @total: total number of ecc bytes per page
224 * @prepad: padding information for syndrome based ecc generators 224 * @prepad: padding information for syndrome based ecc generators
225 * @postpad: padding information for syndrome based ecc generators 225 * @postpad: padding information for syndrome based ecc generators
226 * @layout: ECC layout control struct pointer
226 * @hwctl: function to control hardware ecc generator. Must only 227 * @hwctl: function to control hardware ecc generator. Must only
227 * be provided if an hardware ECC is available 228 * be provided if an hardware ECC is available
228 * @calculate: function for ecc calculation or readback from ecc hardware 229 * @calculate: function for ecc calculation or readback from ecc hardware
229 * @correct: function for ecc correction, matching to ecc generator (sw/hw) 230 * @correct: function for ecc correction, matching to ecc generator (sw/hw)
230 * @read_page: function to read a page according to the ecc generator requirements 231 * @read_page: function to read a page according to the ecc generator requirements
231 * @write_page: function to write a page according to the ecc generator requirements 232 * @write_page: function to write a page according to the ecc generator requirements
233 * @read_oob: function to read chip OOB data
234 * @write_oob: function to write chip OOB data
232 */ 235 */
233struct nand_ecc_ctrl { 236struct nand_ecc_ctrl {
234 nand_ecc_modes_t mode; 237 nand_ecc_modes_t mode;
@@ -300,11 +303,15 @@ struct nand_buffers {
300 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip 303 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
301 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready 304 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
302 * @ecc: [BOARDSPECIFIC] ecc control ctructure 305 * @ecc: [BOARDSPECIFIC] ecc control ctructure
306 * @buffers: buffer structure for read/write
307 * @hwcontrol: platform-specific hardware control structure
308 * @ops: oob operation operands
303 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support 309 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
304 * @scan_bbt: [REPLACEABLE] function to scan bad block table 310 * @scan_bbt: [REPLACEABLE] function to scan bad block table
305 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) 311 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
306 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress 312 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
307 * @state: [INTERN] the current state of the NAND device 313 * @state: [INTERN] the current state of the NAND device
314 * @oob_poi: poison value buffer
308 * @page_shift: [INTERN] number of address bits in a page (column address bits) 315 * @page_shift: [INTERN] number of address bits in a page (column address bits)
309 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 316 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
310 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry 317 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
@@ -400,7 +407,6 @@ struct nand_chip {
400 407
401/** 408/**
402 * struct nand_flash_dev - NAND Flash Device ID Structure 409 * struct nand_flash_dev - NAND Flash Device ID Structure
403 *
404 * @name: Identify the device type 410 * @name: Identify the device type
405 * @id: device ID code 411 * @id: device ID code
406 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 412 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
@@ -519,9 +525,8 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
519 525
520/** 526/**
521 * struct platform_nand_chip - chip level device structure 527 * struct platform_nand_chip - chip level device structure
522 *
523 * @nr_chips: max. number of chips to scan for 528 * @nr_chips: max. number of chips to scan for
524 * @chip_offs: chip number offset 529 * @chip_offset: chip number offset
525 * @nr_partitions: number of partitions pointed to by partitions (or zero) 530 * @nr_partitions: number of partitions pointed to by partitions (or zero)
526 * @partitions: mtd partition list 531 * @partitions: mtd partition list
527 * @chip_delay: R/B delay value in us 532 * @chip_delay: R/B delay value in us
@@ -542,11 +547,10 @@ struct platform_nand_chip {
542 547
543/** 548/**
544 * struct platform_nand_ctrl - controller level device structure 549 * struct platform_nand_ctrl - controller level device structure
545 *
546 * @hwcontrol: platform specific hardware control structure 550 * @hwcontrol: platform specific hardware control structure
547 * @dev_ready: platform specific function to read ready/busy pin 551 * @dev_ready: platform specific function to read ready/busy pin
548 * @select_chip: platform specific chip select function 552 * @select_chip: platform specific chip select function
549 * @priv_data: private data to transport driver specific settings 553 * @priv: private data to transport driver specific settings
550 * 554 *
551 * All fields are optional and depend on the hardware driver requirements 555 * All fields are optional and depend on the hardware driver requirements
552 */ 556 */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9ce9a48db444..1f4972155249 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
23/* Free resources held by the OneNAND device */ 23/* Free resources held by the OneNAND device */
24extern void onenand_release(struct mtd_info *mtd); 24extern void onenand_release(struct mtd_info *mtd);
25 25
26/** 26/*
27 * onenand_state_t - chip states 27 * onenand_state_t - chip states
28 * Enumeration for OneNAND flash chip state 28 * Enumeration for OneNAND flash chip state
29 */ 29 */
@@ -42,9 +42,9 @@ typedef enum {
42 42
43/** 43/**
44 * struct onenand_bufferram - OneNAND BufferRAM Data 44 * struct onenand_bufferram - OneNAND BufferRAM Data
45 * @param block block address in BufferRAM 45 * @block: block address in BufferRAM
46 * @param page page address in BufferRAM 46 * @page: page address in BufferRAM
47 * @param valid valid flag 47 * @valid: valid flag
48 */ 48 */
49struct onenand_bufferram { 49struct onenand_bufferram {
50 int block; 50 int block;
@@ -54,32 +54,43 @@ struct onenand_bufferram {
54 54
55/** 55/**
56 * struct onenand_chip - OneNAND Private Flash Chip Data 56 * struct onenand_chip - OneNAND Private Flash Chip Data
57 * @param base [BOARDSPECIFIC] address to access OneNAND 57 * @base: [BOARDSPECIFIC] address to access OneNAND
58 * @param chipsize [INTERN] the size of one chip for multichip arrays 58 * @chipsize: [INTERN] the size of one chip for multichip arrays
59 * @param device_id [INTERN] device ID 59 * @device_id: [INTERN] device ID
60 * @param verstion_id [INTERN] version ID 60 * @density_mask: chip density, used for DDP devices
61 * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about 61 * @verstion_id: [INTERN] version ID
62 * @param erase_shift [INTERN] number of address bits in a block 62 * @options: [BOARDSPECIFIC] various chip options. They can
63 * @param page_shift [INTERN] number of address bits in a page 63 * partly be set to inform onenand_scan about
64 * @param ppb_shift [INTERN] number of address bits in a pages per block 64 * @erase_shift: [INTERN] number of address bits in a block
65 * @param page_mask [INTERN] a page per block mask 65 * @page_shift: [INTERN] number of address bits in a page
66 * @param bufferam_index [INTERN] BufferRAM index 66 * @ppb_shift: [INTERN] number of address bits in a pages per block
67 * @param bufferam [INTERN] BufferRAM info 67 * @page_mask: [INTERN] a page per block mask
68 * @param readw [REPLACEABLE] hardware specific function for read short 68 * @bufferram_index: [INTERN] BufferRAM index
69 * @param writew [REPLACEABLE] hardware specific function for write short 69 * @bufferram: [INTERN] BufferRAM info
70 * @param command [REPLACEABLE] hardware specific function for writing commands to the chip 70 * @readw: [REPLACEABLE] hardware specific function for read short
71 * @param wait [REPLACEABLE] hardware specific function for wait on ready 71 * @writew: [REPLACEABLE] hardware specific function for write short
72 * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area 72 * @command: [REPLACEABLE] hardware specific function for writing
73 * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area 73 * commands to the chip
74 * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND 74 * @wait: [REPLACEABLE] hardware specific function for wait on ready
75 * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND 75 * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
76 * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table 76 * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
77 * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip 77 * @read_word: [REPLACEABLE] hardware specific function for read
78 * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress 78 * register of OneNAND
79 * @param state [INTERN] the current state of the OneNAND device 79 * @write_word: [REPLACEABLE] hardware specific function for write
80 * @param ecclayout [REPLACEABLE] the default ecc placement scheme 80 * register of OneNAND
81 * @param bbm [REPLACEABLE] pointer to Bad Block Management 81 * @mmcontrol: sync burst read function
82 * @param priv [OPTIONAL] pointer to private chip date 82 * @block_markbad: function to mark a block as bad
83 * @scan_bbt: [REPLACEALBE] hardware specific function for scanning
84 * Bad block Table
85 * @chip_lock: [INTERN] spinlock used to protect access to this
86 * structure and the chip
87 * @wq: [INTERN] wait queue to sleep on if a OneNAND
88 * operation is in progress
89 * @state: [INTERN] the current state of the OneNAND device
90 * @page_buf: data buffer
91 * @ecclayout: [REPLACEABLE] the default ecc placement scheme
92 * @bbm: [REPLACEABLE] pointer to Bad Block Management
93 * @priv: [OPTIONAL] pointer to private chip date
83 */ 94 */
84struct onenand_chip { 95struct onenand_chip {
85 void __iomem *base; 96 void __iomem *base;
@@ -147,9 +158,9 @@ struct onenand_chip {
147#define ONENAND_MFR_SAMSUNG 0xec 158#define ONENAND_MFR_SAMSUNG 0xec
148 159
149/** 160/**
150 * struct nand_manufacturers - NAND Flash Manufacturer ID Structure 161 * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
151 * @param name: Manufacturer name 162 * @name: Manufacturer name
152 * @param id: manufacturer ID code of device. 163 * @id: manufacturer ID code of device.
153*/ 164*/
154struct onenand_manufacturers { 165struct onenand_manufacturers {
155 int id; 166 int id;
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 8b5769f00467..2537285e1064 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -2,22 +2,22 @@
2#define __LINUX_MUTEX_DEBUG_H 2#define __LINUX_MUTEX_DEBUG_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h>
5 6
6/* 7/*
7 * Mutexes - debugging helpers: 8 * Mutexes - debugging helpers:
8 */ 9 */
9 10
10#define __DEBUG_MUTEX_INITIALIZER(lockname) \ 11#define __DEBUG_MUTEX_INITIALIZER(lockname) \
11 , .held_list = LIST_HEAD_INIT(lockname.held_list), \ 12 , .magic = &lockname
12 .name = #lockname , .magic = &lockname
13 13
14#define mutex_init(sem) __mutex_init(sem, __FUNCTION__) 14#define mutex_init(mutex) \
15do { \
16 static struct lock_class_key __key; \
17 \
18 __mutex_init((mutex), #mutex, &__key); \
19} while (0)
15 20
16extern void FASTCALL(mutex_destroy(struct mutex *lock)); 21extern void FASTCALL(mutex_destroy(struct mutex *lock));
17 22
18extern void mutex_debug_show_all_locks(void);
19extern void mutex_debug_show_held_locks(struct task_struct *filter);
20extern void mutex_debug_check_no_locks_held(struct task_struct *task);
21extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
22
23#endif 23#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1ac507fa20d..27c48daa3183 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/spinlock_types.h> 14#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/lockdep.h>
16 17
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18 19
@@ -50,11 +51,12 @@ struct mutex {
50 struct list_head wait_list; 51 struct list_head wait_list;
51#ifdef CONFIG_DEBUG_MUTEXES 52#ifdef CONFIG_DEBUG_MUTEXES
52 struct thread_info *owner; 53 struct thread_info *owner;
53 struct list_head held_list;
54 unsigned long acquire_ip;
55 const char *name; 54 const char *name;
56 void *magic; 55 void *magic;
57#endif 56#endif
57#ifdef CONFIG_DEBUG_LOCK_ALLOC
58 struct lockdep_map dep_map;
59#endif
58}; 60};
59 61
60/* 62/*
@@ -74,24 +76,34 @@ struct mutex_waiter {
74# include <linux/mutex-debug.h> 76# include <linux/mutex-debug.h>
75#else 77#else
76# define __DEBUG_MUTEX_INITIALIZER(lockname) 78# define __DEBUG_MUTEX_INITIALIZER(lockname)
77# define mutex_init(mutex) __mutex_init(mutex, NULL) 79# define mutex_init(mutex) \
80do { \
81 static struct lock_class_key __key; \
82 \
83 __mutex_init((mutex), #mutex, &__key); \
84} while (0)
78# define mutex_destroy(mutex) do { } while (0) 85# define mutex_destroy(mutex) do { } while (0)
79# define mutex_debug_show_all_locks() do { } while (0) 86#endif
80# define mutex_debug_show_held_locks(p) do { } while (0) 87
81# define mutex_debug_check_no_locks_held(task) do { } while (0) 88#ifdef CONFIG_DEBUG_LOCK_ALLOC
82# define mutex_debug_check_no_locks_freed(from, len) do { } while (0) 89# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
90 , .dep_map = { .name = #lockname }
91#else
92# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
83#endif 93#endif
84 94
85#define __MUTEX_INITIALIZER(lockname) \ 95#define __MUTEX_INITIALIZER(lockname) \
86 { .count = ATOMIC_INIT(1) \ 96 { .count = ATOMIC_INIT(1) \
87 , .wait_lock = SPIN_LOCK_UNLOCKED \ 97 , .wait_lock = SPIN_LOCK_UNLOCKED \
88 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 98 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
89 __DEBUG_MUTEX_INITIALIZER(lockname) } 99 __DEBUG_MUTEX_INITIALIZER(lockname) \
100 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
90 101
91#define DEFINE_MUTEX(mutexname) \ 102#define DEFINE_MUTEX(mutexname) \
92 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 103 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
93 104
94extern void fastcall __mutex_init(struct mutex *lock, const char *name); 105extern void __mutex_init(struct mutex *lock, const char *name,
106 struct lock_class_key *key);
95 107
96/*** 108/***
97 * mutex_is_locked - is the mutex locked 109 * mutex_is_locked - is the mutex locked
@@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
110 */ 122 */
111extern void fastcall mutex_lock(struct mutex *lock); 123extern void fastcall mutex_lock(struct mutex *lock);
112extern int fastcall mutex_lock_interruptible(struct mutex *lock); 124extern int fastcall mutex_lock_interruptible(struct mutex *lock);
125
126#ifdef CONFIG_DEBUG_LOCK_ALLOC
127extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
128#else
129# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
130#endif
131
113/* 132/*
114 * NOTE: mutex_trylock() follows the spin_trylock() convention, 133 * NOTE: mutex_trylock() follows the spin_trylock() convention,
115 * not the down_trylock() convention! 134 * not the down_trylock() convention!
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
new file mode 100644
index 000000000000..d06311acd448
--- /dev/null
+++ b/include/linux/netfilter/Kbuild
@@ -0,0 +1,11 @@
1header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \
2 nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \
3 xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \
4 xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \
5 xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \
6 xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \
7 xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \
8 xt_tcpmss.h xt_tcpudp.h
9
10unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \
11 nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h
diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild
new file mode 100644
index 000000000000..198ec5e7b17d
--- /dev/null
+++ b/include/linux/netfilter_arp/Kbuild
@@ -0,0 +1,2 @@
1header-y := arpt_mangle.h
2unifdef-y := arp_tables.h
diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild
new file mode 100644
index 000000000000..5b1aba6abbad
--- /dev/null
+++ b/include/linux/netfilter_bridge/Kbuild
@@ -0,0 +1,4 @@
1header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \
2 ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \
3 ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h
4unifdef-y := ebtables.h ebt_802_3.h
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
new file mode 100644
index 000000000000..04e4d2721689
--- /dev/null
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -0,0 +1,21 @@
1
2header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \
3 ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \
4 ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \
5 ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \
6 ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \
7 ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \
8 ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \
9 ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \
10 ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \
11 ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \
12 ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \
13 ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \
14 ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \
15 ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \
16 ipt_ULOG.h
17
18unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \
19 ip_conntrack_pptp.h ip_conntrack_proto_gre.h \
20 ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \
21 ip_tables.h
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
new file mode 100644
index 000000000000..913ddbf55b4b
--- /dev/null
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -0,0 +1,6 @@
1header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \
2 ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \
3 ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \
4 ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \
5 ip6t_physdev.h ip6t_rt.h
6unifdef-y := ip6_tables.h
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
new file mode 100644
index 000000000000..c8c545665885
--- /dev/null
+++ b/include/linux/nfsd/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h
2
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 51dbab9710c7..7ff386a6ae87 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -65,7 +65,7 @@ struct raw_notifier_head {
65 } while (0) 65 } while (0)
66 66
67#define ATOMIC_NOTIFIER_INIT(name) { \ 67#define ATOMIC_NOTIFIER_INIT(name) { \
68 .lock = SPIN_LOCK_UNLOCKED, \ 68 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
69 .head = NULL } 69 .head = NULL }
70#define BLOCKING_NOTIFIER_INIT(name) { \ 70#define BLOCKING_NOTIFIER_INIT(name) { \
71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ 71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
diff --git a/include/linux/poison.h b/include/linux/poison.h
index a5347c02432e..3e628f990fdf 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -44,6 +44,11 @@
44 44
45/********** drivers/atm/ **********/ 45/********** drivers/atm/ **********/
46#define ATM_POISON_FREE 0x12 46#define ATM_POISON_FREE 0x12
47#define ATM_POISON 0xdeadbeef
48
49/********** net/ **********/
50#define NEIGHBOR_DEAD 0xdeadbeef
51#define NETFILTER_LINK_POISON 0xdead57ac
47 52
48/********** kernel/mutexes **********/ 53/********** kernel/mutexes **********/
49#define MUTEX_DEBUG_INIT 0x11 54#define MUTEX_DEBUG_INIT 0x11
diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild
new file mode 100644
index 000000000000..73fa27a8d552
--- /dev/null
+++ b/include/linux/raid/Kbuild
@@ -0,0 +1 @@
header-y += md_p.h md_u.h
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa4a3b82ba70..5d41dee82f80 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -29,8 +29,6 @@ struct rt_mutex {
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
31 int save_state; 31 int save_state;
32 struct list_head held_list_entry;
33 unsigned long acquire_ip;
34 const char *name, *file; 32 const char *name, *file;
35 int line; 33 int line;
36 void *magic; 34 void *magic;
@@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
98 96
99extern void rt_mutex_unlock(struct rt_mutex *lock); 97extern void rt_mutex_unlock(struct rt_mutex *lock);
100 98
101#ifdef CONFIG_DEBUG_RT_MUTEXES
102# define INIT_RT_MUTEX_DEBUG(tsk) \
103 .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
104 .held_list_lock = SPIN_LOCK_UNLOCKED
105#else
106# define INIT_RT_MUTEX_DEBUG(tsk)
107#endif
108
109#ifdef CONFIG_RT_MUTEXES 99#ifdef CONFIG_RT_MUTEXES
110# define INIT_RT_MUTEXES(tsk) \ 100# define INIT_RT_MUTEXES(tsk) \
111 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ 101 .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index f30f805080ae..ae1fcadd598e 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,30 +32,37 @@ struct rw_semaphore {
32 __s32 activity; 32 __s32 activity;
33 spinlock_t wait_lock; 33 spinlock_t wait_lock;
34 struct list_head wait_list; 34 struct list_head wait_list;
35#if RWSEM_DEBUG 35#ifdef CONFIG_DEBUG_LOCK_ALLOC
36 int debug; 36 struct lockdep_map dep_map;
37#endif 37#endif
38}; 38};
39 39
40/* 40#ifdef CONFIG_DEBUG_LOCK_ALLOC
41 * initialisation 41# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
42 */
43#if RWSEM_DEBUG
44#define __RWSEM_DEBUG_INIT , 0
45#else 42#else
46#define __RWSEM_DEBUG_INIT /* */ 43# define __RWSEM_DEP_MAP_INIT(lockname)
47#endif 44#endif
48 45
49#define __RWSEM_INITIALIZER(name) \ 46#define __RWSEM_INITIALIZER(name) \
50{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } 47{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
51 48
52#define DECLARE_RWSEM(name) \ 49#define DECLARE_RWSEM(name) \
53 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
54 51
55extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); 52extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
53 struct lock_class_key *key);
54
55#define init_rwsem(sem) \
56do { \
57 static struct lock_class_key __key; \
58 \
59 __init_rwsem((sem), #sem, &__key); \
60} while (0)
61
56extern void FASTCALL(__down_read(struct rw_semaphore *sem)); 62extern void FASTCALL(__down_read(struct rw_semaphore *sem));
57extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem)); 63extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
58extern void FASTCALL(__down_write(struct rw_semaphore *sem)); 64extern void FASTCALL(__down_write(struct rw_semaphore *sem));
65extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
59extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem)); 66extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
60extern void FASTCALL(__up_read(struct rw_semaphore *sem)); 67extern void FASTCALL(__up_read(struct rw_semaphore *sem));
61extern void FASTCALL(__up_write(struct rw_semaphore *sem)); 68extern void FASTCALL(__up_write(struct rw_semaphore *sem));
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f99fe90732ab..658afb37c3f5 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -9,8 +9,6 @@
9 9
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11 11
12#define RWSEM_DEBUG 0
13
14#ifdef __KERNEL__ 12#ifdef __KERNEL__
15 13
16#include <linux/types.h> 14#include <linux/types.h>
@@ -26,89 +24,58 @@ struct rw_semaphore;
26#include <asm/rwsem.h> /* use an arch-specific implementation */ 24#include <asm/rwsem.h> /* use an arch-specific implementation */
27#endif 25#endif
28 26
29#ifndef rwsemtrace
30#if RWSEM_DEBUG
31extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
32#else
33#define rwsemtrace(SEM,FMT)
34#endif
35#endif
36
37/* 27/*
38 * lock for reading 28 * lock for reading
39 */ 29 */
40static inline void down_read(struct rw_semaphore *sem) 30extern void down_read(struct rw_semaphore *sem);
41{
42 might_sleep();
43 rwsemtrace(sem,"Entering down_read");
44 __down_read(sem);
45 rwsemtrace(sem,"Leaving down_read");
46}
47 31
48/* 32/*
49 * trylock for reading -- returns 1 if successful, 0 if contention 33 * trylock for reading -- returns 1 if successful, 0 if contention
50 */ 34 */
51static inline int down_read_trylock(struct rw_semaphore *sem) 35extern int down_read_trylock(struct rw_semaphore *sem);
52{
53 int ret;
54 rwsemtrace(sem,"Entering down_read_trylock");
55 ret = __down_read_trylock(sem);
56 rwsemtrace(sem,"Leaving down_read_trylock");
57 return ret;
58}
59 36
60/* 37/*
61 * lock for writing 38 * lock for writing
62 */ 39 */
63static inline void down_write(struct rw_semaphore *sem) 40extern void down_write(struct rw_semaphore *sem);
64{
65 might_sleep();
66 rwsemtrace(sem,"Entering down_write");
67 __down_write(sem);
68 rwsemtrace(sem,"Leaving down_write");
69}
70 41
71/* 42/*
72 * trylock for writing -- returns 1 if successful, 0 if contention 43 * trylock for writing -- returns 1 if successful, 0 if contention
73 */ 44 */
74static inline int down_write_trylock(struct rw_semaphore *sem) 45extern int down_write_trylock(struct rw_semaphore *sem);
75{
76 int ret;
77 rwsemtrace(sem,"Entering down_write_trylock");
78 ret = __down_write_trylock(sem);
79 rwsemtrace(sem,"Leaving down_write_trylock");
80 return ret;
81}
82 46
83/* 47/*
84 * release a read lock 48 * release a read lock
85 */ 49 */
86static inline void up_read(struct rw_semaphore *sem) 50extern void up_read(struct rw_semaphore *sem);
87{
88 rwsemtrace(sem,"Entering up_read");
89 __up_read(sem);
90 rwsemtrace(sem,"Leaving up_read");
91}
92 51
93/* 52/*
94 * release a write lock 53 * release a write lock
95 */ 54 */
96static inline void up_write(struct rw_semaphore *sem) 55extern void up_write(struct rw_semaphore *sem);
97{
98 rwsemtrace(sem,"Entering up_write");
99 __up_write(sem);
100 rwsemtrace(sem,"Leaving up_write");
101}
102 56
103/* 57/*
104 * downgrade write lock to read lock 58 * downgrade write lock to read lock
105 */ 59 */
106static inline void downgrade_write(struct rw_semaphore *sem) 60extern void downgrade_write(struct rw_semaphore *sem);
107{ 61
108 rwsemtrace(sem,"Entering downgrade_write"); 62#ifdef CONFIG_DEBUG_LOCK_ALLOC
109 __downgrade_write(sem); 63/*
110 rwsemtrace(sem,"Leaving downgrade_write"); 64 * nested locking:
111} 65 */
66extern void down_read_nested(struct rw_semaphore *sem, int subclass);
67extern void down_write_nested(struct rw_semaphore *sem, int subclass);
68/*
69 * Take/release a lock when not the owner will release it:
70 */
71extern void down_read_non_owner(struct rw_semaphore *sem);
72extern void up_read_non_owner(struct rw_semaphore *sem);
73#else
74# define down_read_nested(sem, subclass) down_read(sem)
75# define down_write_nested(sem, subclass) down_write(sem)
76# define down_read_non_owner(sem) down_read(sem)
77# define up_read_non_owner(sem) up_read(sem)
78#endif
112 79
113#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
114#endif /* _LINUX_RWSEM_H */ 81#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf723308ed4..1c876e27ff93 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
184extern rwlock_t tasklist_lock; 184extern rwlock_t tasklist_lock;
185extern spinlock_t mmlist_lock; 185extern spinlock_t mmlist_lock;
186 186
187typedef struct task_struct task_t; 187struct task_struct;
188 188
189extern void sched_init(void); 189extern void sched_init(void);
190extern void sched_init_smp(void); 190extern void sched_init_smp(void);
191extern void init_idle(task_t *idle, int cpu); 191extern void init_idle(struct task_struct *idle, int cpu);
192 192
193extern cpumask_t nohz_cpu_mask; 193extern cpumask_t nohz_cpu_mask;
194 194
@@ -383,7 +383,7 @@ struct signal_struct {
383 wait_queue_head_t wait_chldexit; /* for wait4() */ 383 wait_queue_head_t wait_chldexit; /* for wait4() */
384 384
385 /* current thread group signal load-balancing target: */ 385 /* current thread group signal load-balancing target: */
386 task_t *curr_target; 386 struct task_struct *curr_target;
387 387
388 /* shared signal handling: */ 388 /* shared signal handling: */
389 struct sigpending shared_pending; 389 struct sigpending shared_pending;
@@ -534,7 +534,6 @@ extern struct user_struct *find_user(uid_t);
534extern struct user_struct root_user; 534extern struct user_struct root_user;
535#define INIT_USER (&root_user) 535#define INIT_USER (&root_user)
536 536
537typedef struct prio_array prio_array_t;
538struct backing_dev_info; 537struct backing_dev_info;
539struct reclaim_state; 538struct reclaim_state;
540 539
@@ -699,7 +698,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
699 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) 698 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
700 699
701#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 700#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
702extern void prefetch_stack(struct task_struct*); 701extern void prefetch_stack(struct task_struct *t);
703#else 702#else
704static inline void prefetch_stack(struct task_struct *t) { } 703static inline void prefetch_stack(struct task_struct *t) { }
705#endif 704#endif
@@ -715,6 +714,8 @@ enum sleep_type {
715 SLEEP_INTERRUPTED, 714 SLEEP_INTERRUPTED,
716}; 715};
717 716
717struct prio_array;
718
718struct task_struct { 719struct task_struct {
719 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 720 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
720 struct thread_info *thread_info; 721 struct thread_info *thread_info;
@@ -732,7 +733,7 @@ struct task_struct {
732 int load_weight; /* for niceness load balancing purposes */ 733 int load_weight; /* for niceness load balancing purposes */
733 int prio, static_prio, normal_prio; 734 int prio, static_prio, normal_prio;
734 struct list_head run_list; 735 struct list_head run_list;
735 prio_array_t *array; 736 struct prio_array *array;
736 737
737 unsigned short ioprio; 738 unsigned short ioprio;
738 unsigned int btrace_seq; 739 unsigned int btrace_seq;
@@ -865,16 +866,34 @@ struct task_struct {
865 struct plist_head pi_waiters; 866 struct plist_head pi_waiters;
866 /* Deadlock detection and priority inheritance handling */ 867 /* Deadlock detection and priority inheritance handling */
867 struct rt_mutex_waiter *pi_blocked_on; 868 struct rt_mutex_waiter *pi_blocked_on;
868# ifdef CONFIG_DEBUG_RT_MUTEXES
869 spinlock_t held_list_lock;
870 struct list_head held_list_head;
871# endif
872#endif 869#endif
873 870
874#ifdef CONFIG_DEBUG_MUTEXES 871#ifdef CONFIG_DEBUG_MUTEXES
875 /* mutex deadlock detection */ 872 /* mutex deadlock detection */
876 struct mutex_waiter *blocked_on; 873 struct mutex_waiter *blocked_on;
877#endif 874#endif
875#ifdef CONFIG_TRACE_IRQFLAGS
876 unsigned int irq_events;
877 int hardirqs_enabled;
878 unsigned long hardirq_enable_ip;
879 unsigned int hardirq_enable_event;
880 unsigned long hardirq_disable_ip;
881 unsigned int hardirq_disable_event;
882 int softirqs_enabled;
883 unsigned long softirq_disable_ip;
884 unsigned int softirq_disable_event;
885 unsigned long softirq_enable_ip;
886 unsigned int softirq_enable_event;
887 int hardirq_context;
888 int softirq_context;
889#endif
890#ifdef CONFIG_LOCKDEP
891# define MAX_LOCK_DEPTH 30UL
892 u64 curr_chain_key;
893 int lockdep_depth;
894 struct held_lock held_locks[MAX_LOCK_DEPTH];
895 unsigned int lockdep_recursion;
896#endif
878 897
879/* journalling filesystem info */ 898/* journalling filesystem info */
880 void *journal_info; 899 void *journal_info;
@@ -1013,9 +1032,9 @@ static inline void put_task_struct(struct task_struct *t)
1013#define used_math() tsk_used_math(current) 1032#define used_math() tsk_used_math(current)
1014 1033
1015#ifdef CONFIG_SMP 1034#ifdef CONFIG_SMP
1016extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); 1035extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
1017#else 1036#else
1018static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) 1037static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1019{ 1038{
1020 if (!cpu_isset(0, new_mask)) 1039 if (!cpu_isset(0, new_mask))
1021 return -EINVAL; 1040 return -EINVAL;
@@ -1024,7 +1043,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
1024#endif 1043#endif
1025 1044
1026extern unsigned long long sched_clock(void); 1045extern unsigned long long sched_clock(void);
1027extern unsigned long long current_sched_time(const task_t *current_task); 1046extern unsigned long long
1047current_sched_time(const struct task_struct *current_task);
1028 1048
1029/* sched_exec is called by processes performing an exec */ 1049/* sched_exec is called by processes performing an exec */
1030#ifdef CONFIG_SMP 1050#ifdef CONFIG_SMP
@@ -1042,27 +1062,27 @@ static inline void idle_task_exit(void) {}
1042extern void sched_idle_next(void); 1062extern void sched_idle_next(void);
1043 1063
1044#ifdef CONFIG_RT_MUTEXES 1064#ifdef CONFIG_RT_MUTEXES
1045extern int rt_mutex_getprio(task_t *p); 1065extern int rt_mutex_getprio(struct task_struct *p);
1046extern void rt_mutex_setprio(task_t *p, int prio); 1066extern void rt_mutex_setprio(struct task_struct *p, int prio);
1047extern void rt_mutex_adjust_pi(task_t *p); 1067extern void rt_mutex_adjust_pi(struct task_struct *p);
1048#else 1068#else
1049static inline int rt_mutex_getprio(task_t *p) 1069static inline int rt_mutex_getprio(struct task_struct *p)
1050{ 1070{
1051 return p->normal_prio; 1071 return p->normal_prio;
1052} 1072}
1053# define rt_mutex_adjust_pi(p) do { } while (0) 1073# define rt_mutex_adjust_pi(p) do { } while (0)
1054#endif 1074#endif
1055 1075
1056extern void set_user_nice(task_t *p, long nice); 1076extern void set_user_nice(struct task_struct *p, long nice);
1057extern int task_prio(const task_t *p); 1077extern int task_prio(const struct task_struct *p);
1058extern int task_nice(const task_t *p); 1078extern int task_nice(const struct task_struct *p);
1059extern int can_nice(const task_t *p, const int nice); 1079extern int can_nice(const struct task_struct *p, const int nice);
1060extern int task_curr(const task_t *p); 1080extern int task_curr(const struct task_struct *p);
1061extern int idle_cpu(int cpu); 1081extern int idle_cpu(int cpu);
1062extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 1082extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1063extern task_t *idle_task(int cpu); 1083extern struct task_struct *idle_task(int cpu);
1064extern task_t *curr_task(int cpu); 1084extern struct task_struct *curr_task(int cpu);
1065extern void set_curr_task(int cpu, task_t *p); 1085extern void set_curr_task(int cpu, struct task_struct *p);
1066 1086
1067void yield(void); 1087void yield(void);
1068 1088
@@ -1119,8 +1139,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1119#else 1139#else
1120 static inline void kick_process(struct task_struct *tsk) { } 1140 static inline void kick_process(struct task_struct *tsk) { }
1121#endif 1141#endif
1122extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); 1142extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
1123extern void FASTCALL(sched_exit(task_t * p)); 1143extern void FASTCALL(sched_exit(struct task_struct * p));
1124 1144
1125extern int in_group_p(gid_t); 1145extern int in_group_p(gid_t);
1126extern int in_egroup_p(gid_t); 1146extern int in_egroup_p(gid_t);
@@ -1225,17 +1245,17 @@ extern NORET_TYPE void do_group_exit(int);
1225extern void daemonize(const char *, ...); 1245extern void daemonize(const char *, ...);
1226extern int allow_signal(int); 1246extern int allow_signal(int);
1227extern int disallow_signal(int); 1247extern int disallow_signal(int);
1228extern task_t *child_reaper; 1248extern struct task_struct *child_reaper;
1229 1249
1230extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 1250extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1231extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 1251extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1232task_t *fork_idle(int); 1252struct task_struct *fork_idle(int);
1233 1253
1234extern void set_task_comm(struct task_struct *tsk, char *from); 1254extern void set_task_comm(struct task_struct *tsk, char *from);
1235extern void get_task_comm(char *to, struct task_struct *tsk); 1255extern void get_task_comm(char *to, struct task_struct *tsk);
1236 1256
1237#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1238extern void wait_task_inactive(task_t * p); 1258extern void wait_task_inactive(struct task_struct * p);
1239#else 1259#else
1240#define wait_task_inactive(p) do { } while (0) 1260#define wait_task_inactive(p) do { } while (0)
1241#endif 1261#endif
@@ -1261,13 +1281,13 @@ extern void wait_task_inactive(task_t * p);
1261/* de_thread depends on thread_group_leader not being a pid based check */ 1281/* de_thread depends on thread_group_leader not being a pid based check */
1262#define thread_group_leader(p) (p == p->group_leader) 1282#define thread_group_leader(p) (p == p->group_leader)
1263 1283
1264static inline task_t *next_thread(const task_t *p) 1284static inline struct task_struct *next_thread(const struct task_struct *p)
1265{ 1285{
1266 return list_entry(rcu_dereference(p->thread_group.next), 1286 return list_entry(rcu_dereference(p->thread_group.next),
1267 task_t, thread_group); 1287 struct task_struct, thread_group);
1268} 1288}
1269 1289
1270static inline int thread_group_empty(task_t *p) 1290static inline int thread_group_empty(struct task_struct *p)
1271{ 1291{
1272 return list_empty(&p->thread_group); 1292 return list_empty(&p->thread_group);
1273} 1293}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7bc5c7c12b54..46000936f8f1 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -38,9 +38,17 @@ typedef struct {
38 * These macros triggered gcc-3.x compile-time problems. We think these are 38 * These macros triggered gcc-3.x compile-time problems. We think these are
39 * OK now. Be cautious. 39 * OK now. Be cautious.
40 */ 40 */
41#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } 41#define __SEQLOCK_UNLOCKED(lockname) \
42#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) 42 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43 43
44#define SEQLOCK_UNLOCKED \
45 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46
47#define seqlock_init(x) \
48 do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
49
50#define DEFINE_SEQLOCK(x) \
51 seqlock_t x = __SEQLOCK_UNLOCKED(x)
44 52
45/* Lock out other writers and update the count. 53/* Lock out other writers and update the count.
46 * Acts like a normal spin_lock/unlock. 54 * Acts like a normal spin_lock/unlock.
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fc1104a2cfa9..058cba70818a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -216,10 +216,11 @@ struct uart_port {
216 unsigned char __iomem *membase; /* read/write[bwl] */ 216 unsigned char __iomem *membase; /* read/write[bwl] */
217 unsigned int irq; /* irq number */ 217 unsigned int irq; /* irq number */
218 unsigned int uartclk; /* base uart clock */ 218 unsigned int uartclk; /* base uart clock */
219 unsigned char fifosize; /* tx fifo size */ 219 unsigned int fifosize; /* tx fifo size */
220 unsigned char x_char; /* xon/xoff char */ 220 unsigned char x_char; /* xon/xoff char */
221 unsigned char regshift; /* reg offset shift */ 221 unsigned char regshift; /* reg offset shift */
222 unsigned char iotype; /* io access style */ 222 unsigned char iotype; /* io access style */
223 unsigned char unused1;
223 224
224#define UPIO_PORT (0) 225#define UPIO_PORT (0)
225#define UPIO_HUB6 (1) 226#define UPIO_HUB6 (1)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 57d7d4965f9a..3597b4f14389 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -604,9 +604,12 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
604 return list_->qlen; 604 return list_->qlen;
605} 605}
606 606
607extern struct lock_class_key skb_queue_lock_key;
608
607static inline void skb_queue_head_init(struct sk_buff_head *list) 609static inline void skb_queue_head_init(struct sk_buff_head *list)
608{ 610{
609 spin_lock_init(&list->lock); 611 spin_lock_init(&list->lock);
612 lockdep_set_class(&list->lock, &skb_queue_lock_key);
610 list->prev = list->next = (struct sk_buff *)list; 613 list->prev = list->next = (struct sk_buff *)list;
611 list->qlen = 0; 614 list->qlen = 0;
612} 615}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index ae23beef9cc9..31473db92d3b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#if defined(CONFIG_SMP) 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
87#else 87#else
88# include <linux/spinlock_up.h> 88# include <linux/spinlock_up.h>
89#endif 89#endif
90 90
91#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 91#ifdef CONFIG_DEBUG_SPINLOCK
92#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 92 extern void __spin_lock_init(spinlock_t *lock, const char *name,
93 struct lock_class_key *key);
94# define spin_lock_init(lock) \
95do { \
96 static struct lock_class_key __key; \
97 \
98 __spin_lock_init((lock), #lock, &__key); \
99} while (0)
100
101#else
102# define spin_lock_init(lock) \
103 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
104#endif
105
106#ifdef CONFIG_DEBUG_SPINLOCK
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
118#endif
93 119
94#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
95 121
@@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
113#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 139#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
114 extern int _raw_spin_trylock(spinlock_t *lock); 140 extern int _raw_spin_trylock(spinlock_t *lock);
115 extern void _raw_spin_unlock(spinlock_t *lock); 141 extern void _raw_spin_unlock(spinlock_t *lock);
116
117 extern void _raw_read_lock(rwlock_t *lock); 142 extern void _raw_read_lock(rwlock_t *lock);
118 extern int _raw_read_trylock(rwlock_t *lock); 143 extern int _raw_read_trylock(rwlock_t *lock);
119 extern void _raw_read_unlock(rwlock_t *lock); 144 extern void _raw_read_unlock(rwlock_t *lock);
@@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
121 extern int _raw_write_trylock(rwlock_t *lock); 146 extern int _raw_write_trylock(rwlock_t *lock);
122 extern void _raw_write_unlock(rwlock_t *lock); 147 extern void _raw_write_unlock(rwlock_t *lock);
123#else 148#else
124# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
125# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
126# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 149# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
127# define _raw_spin_lock_flags(lock, flags) \ 150# define _raw_spin_lock_flags(lock, flags) \
128 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 151 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
152# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
153# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
129# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 154# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
130# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
131# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
132# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
133# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 155# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
156# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
157# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
134# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 158# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
159# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
135#endif 160#endif
136 161
137#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) 162#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
@@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
147#define write_trylock(lock) __cond_lock(_write_trylock(lock)) 172#define write_trylock(lock) __cond_lock(_write_trylock(lock))
148 173
149#define spin_lock(lock) _spin_lock(lock) 174#define spin_lock(lock) _spin_lock(lock)
175
176#ifdef CONFIG_DEBUG_LOCK_ALLOC
177# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
178#else
179# define spin_lock_nested(lock, subclass) _spin_lock(lock)
180#endif
181
150#define write_lock(lock) _write_lock(lock) 182#define write_lock(lock) _write_lock(lock)
151#define read_lock(lock) _read_lock(lock) 183#define read_lock(lock) _read_lock(lock)
152 184
@@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
172/* 204/*
173 * We inline the unlock functions in the nondebug case: 205 * We inline the unlock functions in the nondebug case:
174 */ 206 */
175#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 207#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
208 !defined(CONFIG_SMP)
176# define spin_unlock(lock) _spin_unlock(lock) 209# define spin_unlock(lock) _spin_unlock(lock)
177# define read_unlock(lock) _read_unlock(lock) 210# define read_unlock(lock) _read_unlock(lock)
178# define write_unlock(lock) _write_unlock(lock) 211# define write_unlock(lock) _write_unlock(lock)
179#else
180# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
181# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
182# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
183#endif
184
185#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
186# define spin_unlock_irq(lock) _spin_unlock_irq(lock) 212# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
187# define read_unlock_irq(lock) _read_unlock_irq(lock) 213# define read_unlock_irq(lock) _read_unlock_irq(lock)
188# define write_unlock_irq(lock) _write_unlock_irq(lock) 214# define write_unlock_irq(lock) _write_unlock_irq(lock)
189#else 215#else
216# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
217# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
218# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
190# define spin_unlock_irq(lock) \ 219# define spin_unlock_irq(lock) \
191 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) 220 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
192# define read_unlock_irq(lock) \ 221# define read_unlock_irq(lock) \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 78e6989ffb54..b2c4f8299464 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); 22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
24 __acquires(spinlock_t);
23void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); 25void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
24void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); 26void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
25void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); 27void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index cd81cee566f4..67faa044c5f5 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -49,6 +49,7 @@
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 50
51#define _spin_lock(lock) __LOCK(lock) 51#define _spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock)
52#define _read_lock(lock) __LOCK(lock) 53#define _read_lock(lock) __LOCK(lock)
53#define _write_lock(lock) __LOCK(lock) 54#define _write_lock(lock) __LOCK(lock)
54#define _spin_lock_bh(lock) __LOCK_BH(lock) 55#define _spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 9cb51e070390..dc5fb69e4de9 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,6 +9,8 @@
9 * Released under the General Public License (GPL). 9 * Released under the General Public License (GPL).
10 */ 10 */
11 11
12#include <linux/lockdep.h>
13
12#if defined(CONFIG_SMP) 14#if defined(CONFIG_SMP)
13# include <asm/spinlock_types.h> 15# include <asm/spinlock_types.h>
14#else 16#else
@@ -24,6 +26,9 @@ typedef struct {
24 unsigned int magic, owner_cpu; 26 unsigned int magic, owner_cpu;
25 void *owner; 27 void *owner;
26#endif 28#endif
29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map;
31#endif
27} spinlock_t; 32} spinlock_t;
28 33
29#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
@@ -37,31 +42,53 @@ typedef struct {
37 unsigned int magic, owner_cpu; 42 unsigned int magic, owner_cpu;
38 void *owner; 43 void *owner;
39#endif 44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
40} rwlock_t; 48} rwlock_t;
41 49
42#define RWLOCK_MAGIC 0xdeaf1eed 50#define RWLOCK_MAGIC 0xdeaf1eed
43 51
44#define SPINLOCK_OWNER_INIT ((void *)-1L) 52#define SPINLOCK_OWNER_INIT ((void *)-1L)
45 53
54#ifdef CONFIG_DEBUG_LOCK_ALLOC
55# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
56#else
57# define SPIN_DEP_MAP_INIT(lockname)
58#endif
59
60#ifdef CONFIG_DEBUG_LOCK_ALLOC
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
62#else
63# define RW_DEP_MAP_INIT(lockname)
64#endif
65
46#ifdef CONFIG_DEBUG_SPINLOCK 66#ifdef CONFIG_DEBUG_SPINLOCK
47# define SPIN_LOCK_UNLOCKED \ 67# define __SPIN_LOCK_UNLOCKED(lockname) \
48 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
49 .magic = SPINLOCK_MAGIC, \ 69 .magic = SPINLOCK_MAGIC, \
50 .owner = SPINLOCK_OWNER_INIT, \ 70 .owner = SPINLOCK_OWNER_INIT, \
51 .owner_cpu = -1 } 71 .owner_cpu = -1, \
52#define RW_LOCK_UNLOCKED \ 72 SPIN_DEP_MAP_INIT(lockname) }
73#define __RW_LOCK_UNLOCKED(lockname) \
53 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
54 .magic = RWLOCK_MAGIC, \ 75 .magic = RWLOCK_MAGIC, \
55 .owner = SPINLOCK_OWNER_INIT, \ 76 .owner = SPINLOCK_OWNER_INIT, \
56 .owner_cpu = -1 } 77 .owner_cpu = -1, \
78 RW_DEP_MAP_INIT(lockname) }
57#else 79#else
58# define SPIN_LOCK_UNLOCKED \ 80# define __SPIN_LOCK_UNLOCKED(lockname) \
59 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } 81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
60#define RW_LOCK_UNLOCKED \ 82 SPIN_DEP_MAP_INIT(lockname) }
61 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } 83#define __RW_LOCK_UNLOCKED(lockname) \
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
85 RW_DEP_MAP_INIT(lockname) }
62#endif 86#endif
63 87
64#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 88#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
65#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED 89#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
90
91#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
92#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
66 93
67#endif /* __LINUX_SPINLOCK_TYPES_H */ 94#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..27644af20b7c 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,10 +12,14 @@
12 * Released under the General Public License (GPL). 12 * Released under the General Public License (GPL).
13 */ 13 */
14 14
15#ifdef CONFIG_DEBUG_SPINLOCK 15#if defined(CONFIG_DEBUG_SPINLOCK) || \
16 defined(CONFIG_DEBUG_LOCK_ALLOC)
16 17
17typedef struct { 18typedef struct {
18 volatile unsigned int slock; 19 volatile unsigned int slock;
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
19} raw_spinlock_t; 23} raw_spinlock_t;
20 24
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 25#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;
30 34
31typedef struct { 35typedef struct {
32 /* no debug version on UP */ 36 /* no debug version on UP */
37#ifdef CONFIG_DEBUG_LOCK_ALLOC
38 struct lockdep_map dep_map;
39#endif
33} raw_rwlock_t; 40} raw_rwlock_t;
34 41
35#define __RAW_RW_LOCK_UNLOCKED { } 42#define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 31accf2f0b13..ea54c4c9a4ec 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21
22#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define __raw_spin_is_locked(x) ((x)->slock == 0)
23 22
24static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void __raw_spin_lock(raw_spinlock_t *lock)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
new file mode 100644
index 000000000000..9cc81e572224
--- /dev/null
+++ b/include/linux/stacktrace.h
@@ -0,0 +1,20 @@
1#ifndef __LINUX_STACKTRACE_H
2#define __LINUX_STACKTRACE_H
3
4#ifdef CONFIG_STACKTRACE
5struct stack_trace {
6 unsigned int nr_entries, max_entries;
7 unsigned long *entries;
8};
9
10extern void save_stack_trace(struct stack_trace *trace,
11 struct task_struct *task, int all_contexts,
12 unsigned int skip);
13
14extern void print_stack_trace(struct stack_trace *trace, int spaces);
15#else
16# define save_stack_trace(trace, task, all, skip) do { } while (0)
17# define print_stack_trace(trace) do { } while (0)
18#endif
19
20#endif
diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild
new file mode 100644
index 000000000000..0d1d768a27bf
--- /dev/null
+++ b/include/linux/sunrpc/Kbuild
@@ -0,0 +1 @@
unifdef-y := debug.h
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cf6ca6e377bd..5e59184c9096 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,6 +189,7 @@ extern long vm_total_pages;
189 189
190#ifdef CONFIG_NUMA 190#ifdef CONFIG_NUMA
191extern int zone_reclaim_mode; 191extern int zone_reclaim_mode;
192extern int sysctl_min_unmapped_ratio;
192extern int zone_reclaim(struct zone *, gfp_t, unsigned int); 193extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
193#else 194#else
194#define zone_reclaim_mode 0 195#define zone_reclaim_mode 0
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 46e4d8f2771f..e4b1a4d4dcf3 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,7 +188,7 @@ enum
188 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */ 188 VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
189 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */ 189 VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
190 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */ 190 VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
191 VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */ 191 VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
192 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ 192 VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
193 VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ 193 VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
194}; 194};
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
new file mode 100644
index 000000000000..5251a505b2f1
--- /dev/null
+++ b/include/linux/tc_act/Kbuild
@@ -0,0 +1 @@
header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h
diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild
new file mode 100644
index 000000000000..381e93018df6
--- /dev/null
+++ b/include/linux/tc_ematch/Kbuild
@@ -0,0 +1 @@
headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index dc7c621e4647..46919f9f5eb3 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
1#include <linux/version.h> 1#include <linux/utsrelease.h>
2#include <linux/module.h> 2#include <linux/module.h>
3 3
4/* Simply sanity version stamp for modules. */ 4/* Simply sanity version stamp for modules. */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 544e855c7c02..794be7af58ae 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -68,7 +68,7 @@ struct task_struct;
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69 69
70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = SPIN_LOCK_UNLOCKED, \ 71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
72 .task_list = { &(name).task_list, &(name).task_list } } 72 .task_list = { &(name).task_list, &(name).task_list } }
73 73
74#define DECLARE_WAIT_QUEUE_HEAD(name) \ 74#define DECLARE_WAIT_QUEUE_HEAD(name) \
@@ -77,9 +77,15 @@ struct task_struct;
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, } 78 { .flags = word, .bit_nr = bit, }
79 79
80/*
81 * lockdep: we want one lock-class for all waitqueue locks.
82 */
83extern struct lock_class_key waitqueue_lock_key;
84
80static inline void init_waitqueue_head(wait_queue_head_t *q) 85static inline void init_waitqueue_head(wait_queue_head_t *q)
81{ 86{
82 spin_lock_init(&q->lock); 87 spin_lock_init(&q->lock);
88 lockdep_set_class(&q->lock, &waitqueue_lock_key);
83 INIT_LIST_HEAD(&q->task_list); 89 INIT_LIST_HEAD(&q->task_list);
84} 90}
85 91
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 957c21c16d62..9bca3539a1e5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq);
63 63
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay);
66extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
67 69
68extern int FASTCALL(schedule_work(struct work_struct *work)); 70extern int FASTCALL(schedule_work(struct work_struct *work));
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild
new file mode 100644
index 000000000000..e1da2a5b2a57
--- /dev/null
+++ b/include/mtd/Kbuild
@@ -0,0 +1,2 @@
1unifdef-y := mtd-abi.h
2header-y := inftl-user.h jffs2-user.h mtd-user.h nftl-user.h
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 31329fce1ff5..1da3f7fa7993 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -133,7 +133,7 @@ struct nand_ecclayout {
133}; 133};
134 134
135/** 135/**
136 * struct mtd_ecc_stats - error correction status 136 * struct mtd_ecc_stats - error correction stats
137 * 137 *
138 * @corrected: number of corrected bits 138 * @corrected: number of corrected bits
139 * @failed: number of uncorrectable errors 139 * @failed: number of uncorrectable errors
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 5ba72d95280c..2fec827c8801 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -67,6 +67,9 @@ struct unix_skb_parms {
67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock)
68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) 68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) 69#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock)
70#define unix_state_wlock_nested(s) \
71 spin_lock_nested(&unix_sk(s)->lock, \
72 SINGLE_DEPTH_NESTING)
70#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) 73#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock)
71 74
72#ifdef __KERNEL__ 75#ifdef __KERNEL__
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 7cd528e9d668..69374cd1a857 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -182,14 +182,26 @@ typedef struct {
182 182
183typedef struct ax25_route { 183typedef struct ax25_route {
184 struct ax25_route *next; 184 struct ax25_route *next;
185 atomic_t ref; 185 atomic_t refcount;
186 ax25_address callsign; 186 ax25_address callsign;
187 struct net_device *dev; 187 struct net_device *dev;
188 ax25_digi *digipeat; 188 ax25_digi *digipeat;
189 char ip_mode; 189 char ip_mode;
190 struct timer_list timer;
191} ax25_route; 190} ax25_route;
192 191
192static inline void ax25_hold_route(ax25_route *ax25_rt)
193{
194 atomic_inc(&ax25_rt->refcount);
195}
196
197extern void __ax25_put_route(ax25_route *ax25_rt);
198
199static inline void ax25_put_route(ax25_route *ax25_rt)
200{
201 if (atomic_dec_and_test(&ax25_rt->refcount))
202 __ax25_put_route(ax25_rt);
203}
204
193typedef struct { 205typedef struct {
194 char slave; /* slave_mode? */ 206 char slave; /* slave_mode? */
195 struct timer_list slave_timer; /* timeout timer */ 207 struct timer_list slave_timer; /* timeout timer */
@@ -348,17 +360,11 @@ extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
348extern void ax25_rt_device_down(struct net_device *); 360extern void ax25_rt_device_down(struct net_device *);
349extern int ax25_rt_ioctl(unsigned int, void __user *); 361extern int ax25_rt_ioctl(unsigned int, void __user *);
350extern struct file_operations ax25_route_fops; 362extern struct file_operations ax25_route_fops;
363extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
351extern int ax25_rt_autobind(ax25_cb *, ax25_address *); 364extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
352extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *,
353 struct net_device *);
354extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); 365extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
355extern void ax25_rt_free(void); 366extern void ax25_rt_free(void);
356 367
357static inline void ax25_put_route(ax25_route *ax25_rt)
358{
359 atomic_dec(&ax25_rt->ref);
360}
361
362/* ax25_std_in.c */ 368/* ax25_std_in.c */
363extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); 369extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
364 370
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 911ceb5cd263..771d17783c18 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -175,6 +175,6 @@ extern int hci_sock_cleanup(void);
175extern int bt_sysfs_init(void); 175extern int bt_sysfs_init(void);
176extern void bt_sysfs_cleanup(void); 176extern void bt_sysfs_cleanup(void);
177 177
178extern struct class bt_class; 178extern struct class *bt_class;
179 179
180#endif /* __BLUETOOTH_H */ 180#endif /* __BLUETOOTH_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index b06a2d2f63d2..b2bdb1aa0429 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -54,7 +54,8 @@
54/* HCI device quirks */ 54/* HCI device quirks */
55enum { 55enum {
56 HCI_QUIRK_RESET_ON_INIT, 56 HCI_QUIRK_RESET_ON_INIT,
57 HCI_QUIRK_RAW_DEVICE 57 HCI_QUIRK_RAW_DEVICE,
58 HCI_QUIRK_FIXUP_BUFFER_SIZE
58}; 59};
59 60
60/* HCI device flags */ 61/* HCI device flags */
@@ -100,9 +101,10 @@ enum {
100#define HCIINQUIRY _IOR('H', 240, int) 101#define HCIINQUIRY _IOR('H', 240, int)
101 102
102/* HCI timeouts */ 103/* HCI timeouts */
103#define HCI_CONN_TIMEOUT (HZ * 40) 104#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
104#define HCI_DISCONN_TIMEOUT (HZ * 2) 105#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
105#define HCI_CONN_IDLE_TIMEOUT (HZ * 60) 106#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
107#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
106 108
107/* HCI Packet types */ 109/* HCI Packet types */
108#define HCI_COMMAND_PKT 0x01 110#define HCI_COMMAND_PKT 0x01
@@ -144,7 +146,7 @@ enum {
144#define LMP_TACCURACY 0x10 146#define LMP_TACCURACY 0x10
145#define LMP_RSWITCH 0x20 147#define LMP_RSWITCH 0x20
146#define LMP_HOLD 0x40 148#define LMP_HOLD 0x40
147#define LMP_SNIF 0x80 149#define LMP_SNIFF 0x80
148 150
149#define LMP_PARK 0x01 151#define LMP_PARK 0x01
150#define LMP_RSSI 0x02 152#define LMP_RSSI 0x02
@@ -159,13 +161,21 @@ enum {
159#define LMP_PSCHEME 0x02 161#define LMP_PSCHEME 0x02
160#define LMP_PCONTROL 0x04 162#define LMP_PCONTROL 0x04
161 163
164#define LMP_SNIFF_SUBR 0x02
165
166/* Connection modes */
167#define HCI_CM_ACTIVE 0x0000
168#define HCI_CM_HOLD 0x0001
169#define HCI_CM_SNIFF 0x0002
170#define HCI_CM_PARK 0x0003
171
162/* Link policies */ 172/* Link policies */
163#define HCI_LP_RSWITCH 0x0001 173#define HCI_LP_RSWITCH 0x0001
164#define HCI_LP_HOLD 0x0002 174#define HCI_LP_HOLD 0x0002
165#define HCI_LP_SNIFF 0x0004 175#define HCI_LP_SNIFF 0x0004
166#define HCI_LP_PARK 0x0008 176#define HCI_LP_PARK 0x0008
167 177
168/* Link mode */ 178/* Link modes */
169#define HCI_LM_ACCEPT 0x8000 179#define HCI_LM_ACCEPT 0x8000
170#define HCI_LM_MASTER 0x0001 180#define HCI_LM_MASTER 0x0001
171#define HCI_LM_AUTH 0x0002 181#define HCI_LM_AUTH 0x0002
@@ -191,7 +201,7 @@ struct hci_rp_read_loc_version {
191} __attribute__ ((packed)); 201} __attribute__ ((packed));
192 202
193#define OCF_READ_LOCAL_FEATURES 0x0003 203#define OCF_READ_LOCAL_FEATURES 0x0003
194struct hci_rp_read_loc_features { 204struct hci_rp_read_local_features {
195 __u8 status; 205 __u8 status;
196 __u8 features[8]; 206 __u8 features[8];
197} __attribute__ ((packed)); 207} __attribute__ ((packed));
@@ -375,17 +385,32 @@ struct hci_cp_change_conn_link_key {
375} __attribute__ ((packed)); 385} __attribute__ ((packed));
376 386
377#define OCF_READ_REMOTE_FEATURES 0x001B 387#define OCF_READ_REMOTE_FEATURES 0x001B
378struct hci_cp_read_rmt_features { 388struct hci_cp_read_remote_features {
379 __le16 handle; 389 __le16 handle;
380} __attribute__ ((packed)); 390} __attribute__ ((packed));
381 391
382#define OCF_READ_REMOTE_VERSION 0x001D 392#define OCF_READ_REMOTE_VERSION 0x001D
383struct hci_cp_read_rmt_version { 393struct hci_cp_read_remote_version {
384 __le16 handle; 394 __le16 handle;
385} __attribute__ ((packed)); 395} __attribute__ ((packed));
386 396
387/* Link Policy */ 397/* Link Policy */
388#define OGF_LINK_POLICY 0x02 398#define OGF_LINK_POLICY 0x02
399
400#define OCF_SNIFF_MODE 0x0003
401struct hci_cp_sniff_mode {
402 __le16 handle;
403 __le16 max_interval;
404 __le16 min_interval;
405 __le16 attempt;
406 __le16 timeout;
407} __attribute__ ((packed));
408
409#define OCF_EXIT_SNIFF_MODE 0x0004
410struct hci_cp_exit_sniff_mode {
411 __le16 handle;
412} __attribute__ ((packed));
413
389#define OCF_ROLE_DISCOVERY 0x0009 414#define OCF_ROLE_DISCOVERY 0x0009
390struct hci_cp_role_discovery { 415struct hci_cp_role_discovery {
391 __le16 handle; 416 __le16 handle;
@@ -406,7 +431,7 @@ struct hci_rp_read_link_policy {
406 __le16 policy; 431 __le16 policy;
407} __attribute__ ((packed)); 432} __attribute__ ((packed));
408 433
409#define OCF_SWITCH_ROLE 0x000B 434#define OCF_SWITCH_ROLE 0x000B
410struct hci_cp_switch_role { 435struct hci_cp_switch_role {
411 bdaddr_t bdaddr; 436 bdaddr_t bdaddr;
412 __u8 role; 437 __u8 role;
@@ -422,6 +447,14 @@ struct hci_rp_write_link_policy {
422 __le16 handle; 447 __le16 handle;
423} __attribute__ ((packed)); 448} __attribute__ ((packed));
424 449
450#define OCF_SNIFF_SUBRATE 0x0011
451struct hci_cp_sniff_subrate {
452 __le16 handle;
453 __le16 max_latency;
454 __le16 min_remote_timeout;
455 __le16 min_local_timeout;
456} __attribute__ ((packed));
457
425/* Status params */ 458/* Status params */
426#define OGF_STATUS_PARAM 0x05 459#define OGF_STATUS_PARAM 0x05
427 460
@@ -581,15 +614,15 @@ struct hci_ev_link_key_notify {
581 __u8 key_type; 614 __u8 key_type;
582} __attribute__ ((packed)); 615} __attribute__ ((packed));
583 616
584#define HCI_EV_RMT_FEATURES 0x0B 617#define HCI_EV_REMOTE_FEATURES 0x0B
585struct hci_ev_rmt_features { 618struct hci_ev_remote_features {
586 __u8 status; 619 __u8 status;
587 __le16 handle; 620 __le16 handle;
588 __u8 features[8]; 621 __u8 features[8];
589} __attribute__ ((packed)); 622} __attribute__ ((packed));
590 623
591#define HCI_EV_RMT_VERSION 0x0C 624#define HCI_EV_REMOTE_VERSION 0x0C
592struct hci_ev_rmt_version { 625struct hci_ev_remote_version {
593 __u8 status; 626 __u8 status;
594 __le16 handle; 627 __le16 handle;
595 __u8 lmp_ver; 628 __u8 lmp_ver;
@@ -610,6 +643,16 @@ struct hci_ev_pscan_rep_mode {
610 __u8 pscan_rep_mode; 643 __u8 pscan_rep_mode;
611} __attribute__ ((packed)); 644} __attribute__ ((packed));
612 645
646#define HCI_EV_SNIFF_SUBRATE 0x2E
647struct hci_ev_sniff_subrate {
648 __u8 status;
649 __le16 handle;
650 __le16 max_tx_latency;
651 __le16 max_rx_latency;
652 __le16 max_remote_timeout;
653 __le16 max_local_timeout;
654} __attribute__ ((packed));
655
613/* Internal events generated by Bluetooth stack */ 656/* Internal events generated by Bluetooth stack */
614#define HCI_EV_STACK_INTERNAL 0xFD 657#define HCI_EV_STACK_INTERNAL 0xFD
615struct hci_ev_stack_internal { 658struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index bb9f81dc8723..d84855fe7336 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -31,10 +31,7 @@
31#define HCI_PROTO_L2CAP 0 31#define HCI_PROTO_L2CAP 0
32#define HCI_PROTO_SCO 1 32#define HCI_PROTO_SCO 1
33 33
34#define HCI_INIT_TIMEOUT (HZ * 10)
35
36/* HCI Core structures */ 34/* HCI Core structures */
37
38struct inquiry_data { 35struct inquiry_data {
39 bdaddr_t bdaddr; 36 bdaddr_t bdaddr;
40 __u8 pscan_rep_mode; 37 __u8 pscan_rep_mode;
@@ -81,6 +78,10 @@ struct hci_dev {
81 __u16 link_policy; 78 __u16 link_policy;
82 __u16 link_mode; 79 __u16 link_mode;
83 80
81 __u32 idle_timeout;
82 __u16 sniff_min_interval;
83 __u16 sniff_max_interval;
84
84 unsigned long quirks; 85 unsigned long quirks;
85 86
86 atomic_t cmd_cnt; 87 atomic_t cmd_cnt;
@@ -123,7 +124,8 @@ struct hci_dev {
123 124
124 atomic_t promisc; 125 atomic_t promisc;
125 126
126 struct class_device class_dev; 127 struct device *parent;
128 struct device dev;
127 129
128 struct module *owner; 130 struct module *owner;
129 131
@@ -145,18 +147,24 @@ struct hci_conn {
145 bdaddr_t dst; 147 bdaddr_t dst;
146 __u16 handle; 148 __u16 handle;
147 __u16 state; 149 __u16 state;
150 __u8 mode;
148 __u8 type; 151 __u8 type;
149 __u8 out; 152 __u8 out;
150 __u8 dev_class[3]; 153 __u8 dev_class[3];
154 __u8 features[8];
155 __u16 interval;
156 __u16 link_policy;
151 __u32 link_mode; 157 __u32 link_mode;
158 __u8 power_save;
152 unsigned long pend; 159 unsigned long pend;
153 160
154 unsigned int sent; 161 unsigned int sent;
155 162
156 struct sk_buff_head data_q; 163 struct sk_buff_head data_q;
157 164
158 struct timer_list timer; 165 struct timer_list disc_timer;
159 166 struct timer_list idle_timer;
167
160 struct hci_dev *hdev; 168 struct hci_dev *hdev;
161 void *l2cap_data; 169 void *l2cap_data;
162 void *sco_data; 170 void *sco_data;
@@ -211,7 +219,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
211enum { 219enum {
212 HCI_CONN_AUTH_PEND, 220 HCI_CONN_AUTH_PEND,
213 HCI_CONN_ENCRYPT_PEND, 221 HCI_CONN_ENCRYPT_PEND,
214 HCI_CONN_RSWITCH_PEND 222 HCI_CONN_RSWITCH_PEND,
223 HCI_CONN_MODE_CHANGE_PEND,
215}; 224};
216 225
217static inline void hci_conn_hash_init(struct hci_dev *hdev) 226static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -286,31 +295,27 @@ int hci_conn_encrypt(struct hci_conn *conn);
286int hci_conn_change_link_key(struct hci_conn *conn); 295int hci_conn_change_link_key(struct hci_conn *conn);
287int hci_conn_switch_role(struct hci_conn *conn, uint8_t role); 296int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
288 297
289static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout) 298void hci_conn_enter_active_mode(struct hci_conn *conn);
290{ 299void hci_conn_enter_sniff_mode(struct hci_conn *conn);
291 mod_timer(&conn->timer, jiffies + timeout);
292}
293
294static inline void hci_conn_del_timer(struct hci_conn *conn)
295{
296 del_timer(&conn->timer);
297}
298 300
299static inline void hci_conn_hold(struct hci_conn *conn) 301static inline void hci_conn_hold(struct hci_conn *conn)
300{ 302{
301 atomic_inc(&conn->refcnt); 303 atomic_inc(&conn->refcnt);
302 hci_conn_del_timer(conn); 304 del_timer(&conn->disc_timer);
303} 305}
304 306
305static inline void hci_conn_put(struct hci_conn *conn) 307static inline void hci_conn_put(struct hci_conn *conn)
306{ 308{
307 if (atomic_dec_and_test(&conn->refcnt)) { 309 if (atomic_dec_and_test(&conn->refcnt)) {
310 unsigned long timeo;
308 if (conn->type == ACL_LINK) { 311 if (conn->type == ACL_LINK) {
309 unsigned long timeo = (conn->out) ? 312 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
310 HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2; 313 if (!conn->out)
311 hci_conn_set_timer(conn, timeo); 314 timeo *= 2;
315 del_timer(&conn->idle_timer);
312 } else 316 } else
313 hci_conn_set_timer(conn, HZ / 100); 317 timeo = msecs_to_jiffies(10);
318 mod_timer(&conn->disc_timer, jiffies + timeo);
314 } 319 }
315} 320}
316 321
@@ -408,11 +413,13 @@ static inline int hci_recv_frame(struct sk_buff *skb)
408int hci_register_sysfs(struct hci_dev *hdev); 413int hci_register_sysfs(struct hci_dev *hdev);
409void hci_unregister_sysfs(struct hci_dev *hdev); 414void hci_unregister_sysfs(struct hci_dev *hdev);
410 415
411#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev)) 416#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
412 417
413/* ----- LMP capabilities ----- */ 418/* ----- LMP capabilities ----- */
414#define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH) 419#define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
415#define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT) 420#define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
421#define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
422#define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
416 423
417/* ----- HCI protocols ----- */ 424/* ----- HCI protocols ----- */
418struct hci_proto { 425struct hci_proto {
diff --git a/include/net/sock.h b/include/net/sock.h
index 7b3d6b856946..324b3ea233d6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -44,6 +44,7 @@
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/cache.h> 45#include <linux/cache.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/lockdep.h>
47#include <linux/netdevice.h> 48#include <linux/netdevice.h>
48#include <linux/skbuff.h> /* struct sk_buff */ 49#include <linux/skbuff.h> /* struct sk_buff */
49#include <linux/security.h> 50#include <linux/security.h>
@@ -78,14 +79,17 @@ typedef struct {
78 spinlock_t slock; 79 spinlock_t slock;
79 struct sock_iocb *owner; 80 struct sock_iocb *owner;
80 wait_queue_head_t wq; 81 wait_queue_head_t wq;
82 /*
83 * We express the mutex-alike socket_lock semantics
84 * to the lock validator by explicitly managing
85 * the slock as a lock variant (in addition to
86 * the slock itself):
87 */
88#ifdef CONFIG_DEBUG_LOCK_ALLOC
89 struct lockdep_map dep_map;
90#endif
81} socket_lock_t; 91} socket_lock_t;
82 92
83#define sock_lock_init(__sk) \
84do { spin_lock_init(&((__sk)->sk_lock.slock)); \
85 (__sk)->sk_lock.owner = NULL; \
86 init_waitqueue_head(&((__sk)->sk_lock.wq)); \
87} while(0)
88
89struct sock; 93struct sock;
90struct proto; 94struct proto;
91 95
@@ -747,6 +751,9 @@ extern void FASTCALL(release_sock(struct sock *sk));
747 751
748/* BH context may only use the following locking interface. */ 752/* BH context may only use the following locking interface. */
749#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 753#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
754#define bh_lock_sock_nested(__sk) \
755 spin_lock_nested(&((__sk)->sk_lock.slock), \
756 SINGLE_DEPTH_NESTING)
750#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 757#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
751 758
752extern struct sock *sk_alloc(int family, 759extern struct sock *sk_alloc(int family,
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild
new file mode 100644
index 000000000000..eb710ba9b1a0
--- /dev/null
+++ b/include/rdma/Kbuild
@@ -0,0 +1 @@
header-y := ib_user_mad.h
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
new file mode 100644
index 000000000000..14a033d73314
--- /dev/null
+++ b/include/scsi/Kbuild
@@ -0,0 +1,2 @@
1header-y += scsi.h
2unifdef-y := scsi_ioctl.h sg.h
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 253797c60095..55ebf035e620 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -47,10 +47,19 @@ enum iscsi_uevent_e {
47 ISCSI_UEVENT_TRANSPORT_EP_POLL = UEVENT_BASE + 13, 47 ISCSI_UEVENT_TRANSPORT_EP_POLL = UEVENT_BASE + 13,
48 ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14, 48 ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
49 49
50 ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
51
50 /* up events */ 52 /* up events */
51 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 53 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
52 ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2, 54 ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
53 ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3, 55 ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
56 ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
57};
58
59enum iscsi_tgt_dscvr {
60 ISCSI_TGT_DSCVR_SEND_TARGETS = 1,
61 ISCSI_TGT_DSCVR_ISNS = 2,
62 ISCSI_TGT_DSCVR_SLP = 3,
54}; 63};
55 64
56struct iscsi_uevent { 65struct iscsi_uevent {
@@ -116,6 +125,17 @@ struct iscsi_uevent {
116 struct msg_transport_disconnect { 125 struct msg_transport_disconnect {
117 uint64_t ep_handle; 126 uint64_t ep_handle;
118 } ep_disconnect; 127 } ep_disconnect;
128 struct msg_tgt_dscvr {
129 enum iscsi_tgt_dscvr type;
130 uint32_t host_no;
131 /*
132 * enable = 1 to establish a new connection
133 * with the server. enable = 0 to disconnect
134 * from the server. Used primarily to switch
135 * from one iSNS server to another.
136 */
137 uint32_t enable;
138 } tgt_dscvr;
119 } u; 139 } u;
120 union { 140 union {
121 /* messages k -> u */ 141 /* messages k -> u */
@@ -138,6 +158,10 @@ struct iscsi_uevent {
138 uint32_t cid; 158 uint32_t cid;
139 uint32_t error; /* enum iscsi_err */ 159 uint32_t error; /* enum iscsi_err */
140 } connerror; 160 } connerror;
161 struct msg_session_destroyed {
162 uint32_t host_no;
163 uint32_t sid;
164 } d_session;
141 struct msg_transport_connect_ret { 165 struct msg_transport_connect_ret {
142 uint64_t handle; 166 uint64_t handle;
143 } ep_connect_ret; 167 } ep_connect_ret;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index cbf7e58bd6f9..ba2760802ded 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -157,6 +157,11 @@ struct iscsi_conn {
157 int max_xmit_dlength; /* target_max_recv_dsl */ 157 int max_xmit_dlength; /* target_max_recv_dsl */
158 int hdrdgst_en; 158 int hdrdgst_en;
159 int datadgst_en; 159 int datadgst_en;
160 int ifmarker_en;
161 int ofmarker_en;
162 /* values userspace uses to id a conn */
163 int persistent_port;
164 char *persistent_address;
160 165
161 /* MIB-statistics */ 166 /* MIB-statistics */
162 uint64_t txdata_octets; 167 uint64_t txdata_octets;
@@ -196,8 +201,8 @@ struct iscsi_session {
196 int pdu_inorder_en; 201 int pdu_inorder_en;
197 int dataseq_inorder_en; 202 int dataseq_inorder_en;
198 int erl; 203 int erl;
199 int ifmarker_en; 204 int tpgt;
200 int ofmarker_en; 205 char *targetname;
201 206
202 /* control data */ 207 /* control data */
203 struct iscsi_transport *tt; 208 struct iscsi_transport *tt;
@@ -240,6 +245,10 @@ iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
240extern void iscsi_session_teardown(struct iscsi_cls_session *); 245extern void iscsi_session_teardown(struct iscsi_cls_session *);
241extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *); 246extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
242extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); 247extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
248extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
249 enum iscsi_param param, char *buf, int buflen);
250extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
251 enum iscsi_param param, char *buf);
243 252
244#define session_to_cls(_sess) \ 253#define session_to_cls(_sess) \
245 hostdata_session(_sess->host->hostdata) 254 hostdata_session(_sess->host->hostdata)
@@ -255,6 +264,8 @@ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
255extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, 264extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
256 int); 265 int);
257extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); 266extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
267extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
268 enum iscsi_param param, char *buf);
258 269
259/* 270/*
260 * pdu and task processing 271 * pdu and task processing
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index e46cd404bd7d..371f70d9aa92 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -143,7 +143,7 @@ struct scsi_cmnd {
143 143
144extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); 144extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
145extern void scsi_put_command(struct scsi_cmnd *); 145extern void scsi_put_command(struct scsi_cmnd *);
146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); 146extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
147extern void scsi_finish_command(struct scsi_cmnd *cmd); 147extern void scsi_finish_command(struct scsi_cmnd *cmd);
148extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); 148extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
149 149
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a42efd6e4be8..b3dd90f3e858 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -542,6 +542,9 @@ struct Scsi_Host {
542 */ 542 */
543 unsigned ordered_tag:1; 543 unsigned ordered_tag:1;
544 544
545 /* task mgmt function in progress */
546 unsigned tmf_in_progress:1;
547
545 /* 548 /*
546 * Optional work queue to be utilized by the transport 549 * Optional work queue to be utilized by the transport
547 */ 550 */
@@ -619,7 +622,8 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
619{ 622{
620 return shost->shost_state == SHOST_RECOVERY || 623 return shost->shost_state == SHOST_RECOVERY ||
621 shost->shost_state == SHOST_CANCEL_RECOVERY || 624 shost->shost_state == SHOST_CANCEL_RECOVERY ||
622 shost->shost_state == SHOST_DEL_RECOVERY; 625 shost->shost_state == SHOST_DEL_RECOVERY ||
626 shost->tmf_in_progress;
623} 627}
624 628
625extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 629extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index b684426a5900..5a3df1d7085f 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -34,6 +34,7 @@ struct iscsi_cls_conn;
34struct iscsi_conn; 34struct iscsi_conn;
35struct iscsi_cmd_task; 35struct iscsi_cmd_task;
36struct iscsi_mgmt_task; 36struct iscsi_mgmt_task;
37struct sockaddr;
37 38
38/** 39/**
39 * struct iscsi_transport - iSCSI Transport template 40 * struct iscsi_transport - iSCSI Transport template
@@ -46,7 +47,12 @@ struct iscsi_mgmt_task;
46 * @bind_conn: associate this connection with existing iSCSI session 47 * @bind_conn: associate this connection with existing iSCSI session
47 * and specified transport descriptor 48 * and specified transport descriptor
48 * @destroy_conn: destroy inactive iSCSI connection 49 * @destroy_conn: destroy inactive iSCSI connection
49 * @set_param: set iSCSI Data-Path operational parameter 50 * @set_param: set iSCSI parameter. Return 0 on success, -ENODATA
51 * when param is not supported, and a -Exx value on other
52 * error.
53 * @get_param get iSCSI parameter. Must return number of bytes
54 * copied to buffer on success, -ENODATA when param
55 * is not supported, and a -Exx value on other error
50 * @start_conn: set connection to be operational 56 * @start_conn: set connection to be operational
51 * @stop_conn: suspend/recover/terminate connection 57 * @stop_conn: suspend/recover/terminate connection
52 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. 58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
@@ -97,15 +103,11 @@ struct iscsi_transport {
97 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag); 103 void (*stop_conn) (struct iscsi_cls_conn *conn, int flag);
98 void (*destroy_conn) (struct iscsi_cls_conn *conn); 104 void (*destroy_conn) (struct iscsi_cls_conn *conn);
99 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param, 105 int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
100 uint32_t value); 106 char *buf, int buflen);
101 int (*get_conn_param) (struct iscsi_cls_conn *conn, 107 int (*get_conn_param) (struct iscsi_cls_conn *conn,
102 enum iscsi_param param, uint32_t *value); 108 enum iscsi_param param, char *buf);
103 int (*get_session_param) (struct iscsi_cls_session *session, 109 int (*get_session_param) (struct iscsi_cls_session *session,
104 enum iscsi_param param, uint32_t *value); 110 enum iscsi_param param, char *buf);
105 int (*get_conn_str_param) (struct iscsi_cls_conn *conn,
106 enum iscsi_param param, char *buf);
107 int (*get_session_str_param) (struct iscsi_cls_session *session,
108 enum iscsi_param param, char *buf);
109 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 111 int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
110 char *data, uint32_t data_size); 112 char *data, uint32_t data_size);
111 void (*get_stats) (struct iscsi_cls_conn *conn, 113 void (*get_stats) (struct iscsi_cls_conn *conn,
@@ -127,6 +129,8 @@ struct iscsi_transport {
127 uint64_t *ep_handle); 129 uint64_t *ep_handle);
128 int (*ep_poll) (uint64_t ep_handle, int timeout_ms); 130 int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
129 void (*ep_disconnect) (uint64_t ep_handle); 131 void (*ep_disconnect) (uint64_t ep_handle);
132 int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
133 uint32_t enable, struct sockaddr *dst_addr);
130}; 134};
131 135
132/* 136/*
@@ -155,13 +159,6 @@ struct iscsi_cls_conn {
155 struct iscsi_transport *transport; 159 struct iscsi_transport *transport;
156 uint32_t cid; /* connection id */ 160 uint32_t cid; /* connection id */
157 161
158 /* portal/group values we got during discovery */
159 char *persistent_address;
160 int persistent_port;
161 /* portal/group values we are currently using */
162 char *address;
163 int port;
164
165 int active; /* must be accessed with the connlock */ 162 int active; /* must be accessed with the connlock */
166 struct device dev; /* sysfs transport/container device */ 163 struct device dev; /* sysfs transport/container device */
167 struct mempool_zone *z_error; 164 struct mempool_zone *z_error;
@@ -185,16 +182,11 @@ struct iscsi_cls_session {
185 struct list_head host_list; 182 struct list_head host_list;
186 struct iscsi_transport *transport; 183 struct iscsi_transport *transport;
187 184
188 /* iSCSI values used as unique id by userspace. */
189 char *targetname;
190 int tpgt;
191
192 /* recovery fields */ 185 /* recovery fields */
193 int recovery_tmo; 186 int recovery_tmo;
194 struct work_struct recovery_work; 187 struct work_struct recovery_work;
195 188
196 int target_id; 189 int target_id;
197 int channel;
198 190
199 int sid; /* session id */ 191 int sid; /* session id */
200 void *dd_data; /* LLD private data */ 192 void *dd_data; /* LLD private data */
@@ -207,8 +199,10 @@ struct iscsi_cls_session {
207#define iscsi_session_to_shost(_session) \ 199#define iscsi_session_to_shost(_session) \
208 dev_to_shost(_session->dev.parent) 200 dev_to_shost(_session->dev.parent)
209 201
202#define starget_to_session(_stgt) \
203 iscsi_dev_to_session(_stgt->dev.parent)
204
210struct iscsi_host { 205struct iscsi_host {
211 int next_target_id;
212 struct list_head sessions; 206 struct list_head sessions;
213 struct mutex mutex; 207 struct mutex mutex;
214}; 208};
@@ -216,8 +210,17 @@ struct iscsi_host {
216/* 210/*
217 * session and connection functions that can be used by HW iSCSI LLDs 211 * session and connection functions that can be used by HW iSCSI LLDs
218 */ 212 */
213extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
214 struct iscsi_transport *transport);
215extern int iscsi_add_session(struct iscsi_cls_session *session,
216 unsigned int target_id);
217extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
218extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
219extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, 219extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
220 struct iscsi_transport *t, int channel); 220 struct iscsi_transport *t,
221 unsigned int target_id);
222extern void iscsi_remove_session(struct iscsi_cls_session *session);
223extern void iscsi_free_session(struct iscsi_cls_session *session);
221extern int iscsi_destroy_session(struct iscsi_cls_session *session); 224extern int iscsi_destroy_session(struct iscsi_cls_session *session);
222extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, 225extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
223 uint32_t cid); 226 uint32_t cid);
@@ -225,4 +228,5 @@ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
225extern void iscsi_unblock_session(struct iscsi_cls_session *session); 228extern void iscsi_unblock_session(struct iscsi_cls_session *session);
226extern void iscsi_block_session(struct iscsi_cls_session *session); 229extern void iscsi_block_session(struct iscsi_cls_session *session);
227 230
231
228#endif 232#endif
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 93cfb4bf4211..e3c503cd175e 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/transport_class.h> 4#include <linux/transport_class.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/mutex.h>
6 7
7struct scsi_transport_template; 8struct scsi_transport_template;
8struct sas_rphy; 9struct sas_rphy;
@@ -55,7 +56,6 @@ struct sas_phy {
55 enum sas_linkrate minimum_linkrate; 56 enum sas_linkrate minimum_linkrate;
56 enum sas_linkrate maximum_linkrate_hw; 57 enum sas_linkrate maximum_linkrate_hw;
57 enum sas_linkrate maximum_linkrate; 58 enum sas_linkrate maximum_linkrate;
58 u8 port_identifier;
59 59
60 /* internal state */ 60 /* internal state */
61 unsigned int local_attached : 1; 61 unsigned int local_attached : 1;
@@ -66,8 +66,8 @@ struct sas_phy {
66 u32 loss_of_dword_sync_count; 66 u32 loss_of_dword_sync_count;
67 u32 phy_reset_problem_count; 67 u32 phy_reset_problem_count;
68 68
69 /* the other end of the link */ 69 /* for the list of phys belonging to a port */
70 struct sas_rphy *rphy; 70 struct list_head port_siblings;
71}; 71};
72 72
73#define dev_to_phy(d) \ 73#define dev_to_phy(d) \
@@ -124,6 +124,24 @@ struct sas_expander_device {
124#define rphy_to_expander_device(r) \ 124#define rphy_to_expander_device(r) \
125 container_of((r), struct sas_expander_device, rphy) 125 container_of((r), struct sas_expander_device, rphy)
126 126
127struct sas_port {
128 struct device dev;
129
130 u8 port_identifier;
131 int num_phys;
132
133 /* the other end of the link */
134 struct sas_rphy *rphy;
135
136 struct mutex phy_list_mutex;
137 struct list_head phy_list;
138};
139
140#define dev_to_sas_port(d) \
141 container_of((d), struct sas_port, dev)
142#define transport_class_to_sas_port(cdev) \
143 dev_to_sas_port((cdev)->dev)
144
127/* The functions by which the transport class and the driver communicate */ 145/* The functions by which the transport class and the driver communicate */
128struct sas_function_template { 146struct sas_function_template {
129 int (*get_linkerrors)(struct sas_phy *); 147 int (*get_linkerrors)(struct sas_phy *);
@@ -133,6 +151,7 @@ struct sas_function_template {
133}; 151};
134 152
135 153
154void sas_remove_children(struct device *);
136extern void sas_remove_host(struct Scsi_Host *); 155extern void sas_remove_host(struct Scsi_Host *);
137 156
138extern struct sas_phy *sas_phy_alloc(struct device *, int); 157extern struct sas_phy *sas_phy_alloc(struct device *, int);
@@ -141,13 +160,21 @@ extern int sas_phy_add(struct sas_phy *);
141extern void sas_phy_delete(struct sas_phy *); 160extern void sas_phy_delete(struct sas_phy *);
142extern int scsi_is_sas_phy(const struct device *); 161extern int scsi_is_sas_phy(const struct device *);
143 162
144extern struct sas_rphy *sas_end_device_alloc(struct sas_phy *); 163extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
145extern struct sas_rphy *sas_expander_alloc(struct sas_phy *, enum sas_device_type); 164extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
146void sas_rphy_free(struct sas_rphy *); 165void sas_rphy_free(struct sas_rphy *);
147extern int sas_rphy_add(struct sas_rphy *); 166extern int sas_rphy_add(struct sas_rphy *);
148extern void sas_rphy_delete(struct sas_rphy *); 167extern void sas_rphy_delete(struct sas_rphy *);
149extern int scsi_is_sas_rphy(const struct device *); 168extern int scsi_is_sas_rphy(const struct device *);
150 169
170struct sas_port *sas_port_alloc(struct device *, int);
171int sas_port_add(struct sas_port *);
172void sas_port_free(struct sas_port *);
173void sas_port_delete(struct sas_port *);
174void sas_port_add_phy(struct sas_port *, struct sas_phy *);
175void sas_port_delete_phy(struct sas_port *, struct sas_phy *);
176int scsi_is_sas_port(const struct device *);
177
151extern struct scsi_transport_template * 178extern struct scsi_transport_template *
152sas_attach_transport(struct sas_function_template *); 179sas_attach_transport(struct sas_function_template *);
153extern void sas_release_transport(struct scsi_transport_template *); 180extern void sas_release_transport(struct scsi_transport_template *);
diff --git a/include/sound/Kbuild b/include/sound/Kbuild
new file mode 100644
index 000000000000..3a5a3df61496
--- /dev/null
+++ b/include/sound/Kbuild
@@ -0,0 +1,2 @@
1header-y := asound_fm.h hdsp.h hdspm.h sfnt_info.h sscape_ioctl.h
2unifdef-y := asequencer.h asound.h emu10k1.h sb16_csp.h
diff --git a/include/video/Kbuild b/include/video/Kbuild
new file mode 100644
index 000000000000..76a60737cc15
--- /dev/null
+++ b/include/video/Kbuild
@@ -0,0 +1 @@
unifdef-y := sisfb.h