aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/asm-alpha/atomic.h64
-rw-r--r--include/asm-alpha/kdebug.h1
-rw-r--r--include/asm-alpha/local.h126
-rw-r--r--include/asm-alpha/pgtable.h4
-rw-r--r--include/asm-alpha/system.h226
-rw-r--r--include/asm-arm/atomic.h1
-rw-r--r--include/asm-arm/kdebug.h1
-rw-r--r--include/asm-arm/kexec.h2
-rw-r--r--include/asm-arm/pgtable-nommu.h4
-rw-r--r--include/asm-arm/pgtable.h4
-rw-r--r--include/asm-arm/system.h4
-rw-r--r--include/asm-arm26/atomic.h1
-rw-r--r--include/asm-arm26/kdebug.h1
-rw-r--r--include/asm-arm26/pgtable.h4
-rw-r--r--include/asm-arm26/system.h2
-rw-r--r--include/asm-avr32/kdebug.h25
-rw-r--r--include/asm-avr32/pgtable.h4
-rw-r--r--include/asm-blackfin/system.h1
-rw-r--r--include/asm-cris/kdebug.h1
-rw-r--r--include/asm-frv/atomic.h91
-rw-r--r--include/asm-frv/kdebug.h1
-rw-r--r--include/asm-frv/pgtable.h4
-rw-r--r--include/asm-frv/semaphore.h14
-rw-r--r--include/asm-frv/system.h70
-rw-r--r--include/asm-generic/atomic.h140
-rw-r--r--include/asm-generic/kdebug.h8
-rw-r--r--include/asm-generic/local.h33
-rw-r--r--include/asm-h8300/kdebug.h1
-rw-r--r--include/asm-h8300/pgtable.h4
-rw-r--r--include/asm-h8300/system.h1
-rw-r--r--include/asm-i386/atomic.h45
-rw-r--r--include/asm-i386/cmpxchg.h293
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/ioctls.h4
-rw-r--r--include/asm-i386/kdebug.h25
-rw-r--r--include/asm-i386/kexec.h2
-rw-r--r--include/asm-i386/local.h205
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-i386/serial.h16
-rw-r--r--include/asm-i386/system.h234
-rw-r--r--include/asm-i386/termbits.h14
-rw-r--r--include/asm-i386/termios.h6
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/atomic.h53
-rw-r--r--include/asm-ia64/kdebug.h27
-rw-r--r--include/asm-ia64/kexec.h2
-rw-r--r--include/asm-ia64/local.h51
-rw-r--r--include/asm-ia64/pgtable.h4
-rw-r--r--include/asm-m32r/atomic.h23
-rw-r--r--include/asm-m32r/kdebug.h1
-rw-r--r--include/asm-m32r/pgtable.h4
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/atomic.h31
-rw-r--r--include/asm-m68k/kdebug.h1
-rw-r--r--include/asm-m68k/pgtable.h4
-rw-r--r--include/asm-m68k/system.h1
-rw-r--r--include/asm-m68knommu/atomic.h25
-rw-r--r--include/asm-m68knommu/kdebug.h1
-rw-r--r--include/asm-m68knommu/pgtable.h4
-rw-r--r--include/asm-m68knommu/system.h1
-rw-r--r--include/asm-mips/atomic.h57
-rw-r--r--include/asm-mips/kdebug.h1
-rw-r--r--include/asm-mips/kexec.h2
-rw-r--r--include/asm-mips/local.h304
-rw-r--r--include/asm-mips/mach-au1x00/au1550_spi.h16
-rw-r--r--include/asm-mips/pgtable.h4
-rw-r--r--include/asm-mips/system.h126
-rw-r--r--include/asm-parisc/atomic.h56
-rw-r--r--include/asm-parisc/kdebug.h1
-rw-r--r--include/asm-parisc/local.h41
-rw-r--r--include/asm-parisc/pgtable.h4
-rw-r--r--include/asm-powerpc/atomic.h7
-rw-r--r--include/asm-powerpc/bitops.h1
-rw-r--r--include/asm-powerpc/iommu.h14
-rw-r--r--include/asm-powerpc/kdebug.h34
-rw-r--r--include/asm-powerpc/kexec.h2
-rw-r--r--include/asm-powerpc/kprobes.h7
-rw-r--r--include/asm-powerpc/local.h201
-rw-r--r--include/asm-powerpc/machdep.h19
-rw-r--r--include/asm-powerpc/mmu-44x.h78
-rw-r--r--include/asm-powerpc/mmu.h7
-rw-r--r--include/asm-powerpc/mpc52xx.h11
-rw-r--r--include/asm-powerpc/mpic.h20
-rw-r--r--include/asm-powerpc/of_device.h2
-rw-r--r--include/asm-powerpc/page.h10
-rw-r--r--include/asm-powerpc/page_32.h2
-rw-r--r--include/asm-powerpc/parport.h5
-rw-r--r--include/asm-powerpc/pgalloc-32.h41
-rw-r--r--include/asm-powerpc/pgalloc-64.h152
-rw-r--r--include/asm-powerpc/pgalloc.h154
-rw-r--r--include/asm-powerpc/pgtable-4k.h3
-rw-r--r--include/asm-powerpc/pgtable-64k.h5
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h813
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h492
-rw-r--r--include/asm-powerpc/pgtable.h493
-rw-r--r--include/asm-powerpc/pmac_feature.h2
-rw-r--r--include/asm-powerpc/prom.h34
-rw-r--r--include/asm-powerpc/ps3.h33
-rw-r--r--include/asm-powerpc/suspend.h9
-rw-r--r--include/asm-powerpc/system.h130
-rw-r--r--include/asm-powerpc/tsi108.h12
-rw-r--r--include/asm-powerpc/tsi108_pci.h45
-rw-r--r--include/asm-powerpc/udbg.h1
-rw-r--r--include/asm-ppc/kdebug.h1
-rw-r--r--include/asm-ppc/pgtable.h4
-rw-r--r--include/asm-ppc/system.h2
-rw-r--r--include/asm-s390/kdebug.h30
-rw-r--r--include/asm-s390/kexec.h2
-rw-r--r--include/asm-s390/qdio.h1
-rw-r--r--include/asm-sh/kdebug.h1
-rw-r--r--include/asm-sh/kexec.h2
-rw-r--r--include/asm-sh/pgtable.h4
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh64/kdebug.h1
-rw-r--r--include/asm-sh64/pgtable.h4
-rw-r--r--include/asm-sh64/system.h2
-rw-r--r--include/asm-sparc/kdebug.h4
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/Kbuild1
-rw-r--r--include/asm-sparc64/atomic.h53
-rw-r--r--include/asm-sparc64/const.h19
-rw-r--r--include/asm-sparc64/kdebug.h23
-rw-r--r--include/asm-sparc64/local.h41
-rw-r--r--include/asm-sparc64/lsu.h2
-rw-r--r--include/asm-sparc64/mmu.h2
-rw-r--r--include/asm-sparc64/page.h2
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-sparc64/pstate.h2
-rw-r--r--include/asm-sparc64/sfafsr.h2
-rw-r--r--include/asm-sparc64/system.h1
-rw-r--r--include/asm-um/cmpxchg.h6
-rw-r--r--include/asm-um/kdebug.h1
-rw-r--r--include/asm-v850/kdebug.h1
-rw-r--r--include/asm-v850/system.h1
-rw-r--r--include/asm-x86_64/Kbuild1
-rw-r--r--include/asm-x86_64/atomic.h65
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/kdebug.h25
-rw-r--r--include/asm-x86_64/kexec.h2
-rw-r--r--include/asm-x86_64/local.h196
-rw-r--r--include/asm-x86_64/page.h9
-rw-r--r--include/asm-x86_64/pgtable.h17
-rw-r--r--include/asm-x86_64/serial.h16
-rw-r--r--include/asm-x86_64/system.h96
-rw-r--r--include/asm-x86_64/termbits.h2
-rw-r--r--include/asm-x86_64/unistd.h2
-rw-r--r--include/asm-xtensa/atomic.h23
-rw-r--r--include/asm-xtensa/kdebug.h1
-rw-r--r--include/asm-xtensa/system.h2
-rw-r--r--include/linux/Kbuild7
-rw-r--r--include/linux/awe_voice.h525
-rw-r--r--include/linux/byteorder/generic.h25
-rw-r--r--include/linux/byteorder/swab.h108
-rw-r--r--include/linux/clockchips.h10
-rw-r--r--include/linux/clocksource.h15
-rw-r--r--include/linux/compat_ioctl.h830
-rw-r--r--include/linux/console.h7
-rw-r--r--include/linux/console_struct.h3
-rw-r--r--include/linux/const.h (renamed from include/asm-x86_64/const.h)9
-rw-r--r--include/linux/cpu.h3
-rw-r--r--include/linux/cyclades.h229
-rw-r--r--include/linux/dcache.h6
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/display.h61
-rw-r--r--include/linux/ds1wm.h11
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ext3_fs.h1
-rw-r--r--include/linux/fb.h57
-rw-r--r--include/linux/font.h3
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/futex.h29
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h38
-rw-r--r--include/linux/ioctl32.h17
-rw-r--r--include/linux/ipc.h11
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/isdn/capiutil.h1
-rw-r--r--include/linux/isdn_divertif.h5
-rw-r--r--include/linux/kallsyms.h13
-rw-r--r--include/linux/kdebug.h20
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/kprobes.h22
-rw-r--r--include/linux/libata.h40
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/mc146818rtc.h7
-rw-r--r--include/linux/mnt_namespace.h5
-rw-r--r--include/linux/module.h30
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nsproxy.h3
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/parport.h8
-rw-r--r--include/linux/parport_pc.h3
-rw-r--r--include/linux/phantom.h42
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pmu.h8
-rw-r--r--include/linux/pnp.h3
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/proc_fs.h3
-rw-r--r--include/linux/quota.h4
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/reiserfs_fs_sb.h3
-rw-r--r--include/linux/rtc.h39
-rw-r--r--include/linux/sched.h35
-rw-r--r--include/linux/spi/Kbuild1
-rw-r--r--include/linux/spi/spi.h82
-rw-r--r--include/linux/spi/spidev.h124
-rw-r--r--include/linux/spinlock_types.h6
-rw-r--r--include/linux/stacktrace.h6
-rw-r--r--include/linux/stat.h3
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/svga.h1
-rw-r--r--include/linux/sysdev.h1
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/timer.h1
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uinput.h2
-rw-r--r--include/linux/utsname.h19
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/math-emu/extended.h396
-rw-r--r--include/net/sock.h9
-rw-r--r--include/video/mach64.h1
-rw-r--r--include/video/permedia2.h4
-rw-r--r--include/video/tgafb.h47
229 files changed, 5251 insertions, 4058 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 16c3c441256e..9cfd5b1a48eb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -303,6 +303,9 @@ struct acpi_device {
303#define to_acpi_device(d) container_of(d, struct acpi_device, dev) 303#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
304#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) 304#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
305 305
306/* acpi_device.dev.bus == &acpi_bus_type */
307extern struct bus_type acpi_bus_type;
308
306/* 309/*
307 * Events 310 * Events
308 * ------ 311 * ------
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index fc77f7413083..f5cb7b878af2 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -2,6 +2,7 @@
2#define _ALPHA_ATOMIC_H 2#define _ALPHA_ATOMIC_H
3 3
4#include <asm/barrier.h> 4#include <asm/barrier.h>
5#include <asm/system.h>
5 6
6/* 7/*
7 * Atomic operations that C can't guarantee us. Useful for 8 * Atomic operations that C can't guarantee us. Useful for
@@ -175,19 +176,64 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
175 return result; 176 return result;
176} 177}
177 178
178#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 179#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
180#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
181
182#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
179#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 183#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
180 184
181#define atomic_add_unless(v, a, u) \ 185/**
182({ \ 186 * atomic_add_unless - add unless the number is a given value
183 int c, old; \ 187 * @v: pointer of type atomic_t
184 c = atomic_read(v); \ 188 * @a: the amount to add to v...
185 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 189 * @u: ...unless v is equal to u.
186 c = old; \ 190 *
187 c != (u); \ 191 * Atomically adds @a to @v, so long as it was not @u.
188}) 192 * Returns non-zero if @v was not @u, and zero otherwise.
193 */
194static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
195{
196 int c, old;
197 c = atomic_read(v);
198 for (;;) {
199 if (unlikely(c == (u)))
200 break;
201 old = atomic_cmpxchg((v), c, c + (a));
202 if (likely(old == c))
203 break;
204 c = old;
205 }
206 return c != (u);
207}
208
189#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 209#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
190 210
211/**
212 * atomic64_add_unless - add unless the number is a given value
213 * @v: pointer of type atomic64_t
214 * @a: the amount to add to v...
215 * @u: ...unless v is equal to u.
216 *
217 * Atomically adds @a to @v, so long as it was not @u.
218 * Returns non-zero if @v was not @u, and zero otherwise.
219 */
220static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
221{
222 long c, old;
223 c = atomic64_read(v);
224 for (;;) {
225 if (unlikely(c == (u)))
226 break;
227 old = atomic64_cmpxchg((v), c, c + (a));
228 if (likely(old == c))
229 break;
230 c = old;
231 }
232 return c != (u);
233}
234
235#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
236
191#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 237#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
192#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 238#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
193 239
diff --git a/include/asm-alpha/kdebug.h b/include/asm-alpha/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-alpha/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h
index 90a510fa358e..6ad3ea696421 100644
--- a/include/asm-alpha/local.h
+++ b/include/asm-alpha/local.h
@@ -4,37 +4,115 @@
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6 6
7typedef atomic64_t local_t; 7typedef struct
8{
9 atomic_long_t a;
10} local_t;
8 11
9#define LOCAL_INIT(i) ATOMIC64_INIT(i) 12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
10#define local_read(v) atomic64_read(v) 13#define local_read(l) atomic_long_read(&(l)->a)
11#define local_set(v,i) atomic64_set(v,i) 14#define local_set(l,i) atomic_long_set(&(l)->a, (i))
15#define local_inc(l) atomic_long_inc(&(l)->a)
16#define local_dec(l) atomic_long_dec(&(l)->a)
17#define local_add(i,l) atomic_long_add((i),(&(l)->a))
18#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
12 19
13#define local_inc(v) atomic64_inc(v) 20static __inline__ long local_add_return(long i, local_t * l)
14#define local_dec(v) atomic64_dec(v) 21{
15#define local_add(i, v) atomic64_add(i, v) 22 long temp, result;
16#define local_sub(i, v) atomic64_sub(i, v) 23 __asm__ __volatile__(
24 "1: ldq_l %0,%1\n"
25 " addq %0,%3,%2\n"
26 " addq %0,%3,%0\n"
27 " stq_c %0,%1\n"
28 " beq %0,2f\n"
29 ".subsection 2\n"
30 "2: br 1b\n"
31 ".previous"
32 :"=&r" (temp), "=m" (l->a.counter), "=&r" (result)
33 :"Ir" (i), "m" (l->a.counter) : "memory");
34 return result;
35}
17 36
18#define __local_inc(v) ((v)->counter++) 37static __inline__ long local_sub_return(long i, local_t * l)
19#define __local_dec(v) ((v)->counter++) 38{
20#define __local_add(i,v) ((v)->counter+=(i)) 39 long temp, result;
21#define __local_sub(i,v) ((v)->counter-=(i)) 40 __asm__ __volatile__(
41 "1: ldq_l %0,%1\n"
42 " subq %0,%3,%2\n"
43 " subq %0,%3,%0\n"
44 " stq_c %0,%1\n"
45 " beq %0,2f\n"
46 ".subsection 2\n"
47 "2: br 1b\n"
48 ".previous"
49 :"=&r" (temp), "=m" (l->a.counter), "=&r" (result)
50 :"Ir" (i), "m" (l->a.counter) : "memory");
51 return result;
52}
53
54#define local_cmpxchg(l, o, n) \
55 (cmpxchg_local(&((l)->a.counter), (o), (n)))
56#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
57
58/**
59 * local_add_unless - add unless the number is a given value
60 * @l: pointer of type local_t
61 * @a: the amount to add to l...
62 * @u: ...unless l is equal to u.
63 *
64 * Atomically adds @a to @l, so long as it was not @u.
65 * Returns non-zero if @l was not @u, and zero otherwise.
66 */
67#define local_add_unless(l, a, u) \
68({ \
69 long c, old; \
70 c = local_read(l); \
71 for (;;) { \
72 if (unlikely(c == (u))) \
73 break; \
74 old = local_cmpxchg((l), c, c + (a)); \
75 if (likely(old == c)) \
76 break; \
77 c = old; \
78 } \
79 c != (u); \
80})
81#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
82
83#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
84
85#define local_dec_return(l) local_sub_return(1,(l))
86
87#define local_inc_return(l) local_add_return(1,(l))
88
89#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0)
90
91#define local_inc_and_test(l) (local_add_return(1, (l)) == 0)
92
93#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
94
95/* Verify if faster than atomic ops */
96#define __local_inc(l) ((l)->a.counter++)
97#define __local_dec(l) ((l)->a.counter++)
98#define __local_add(i,l) ((l)->a.counter+=(i))
99#define __local_sub(i,l) ((l)->a.counter-=(i))
22 100
23/* Use these for per-cpu local_t variables: on some archs they are 101/* Use these for per-cpu local_t variables: on some archs they are
24 * much more efficient than these naive implementations. Note they take 102 * much more efficient than these naive implementations. Note they take
25 * a variable, not an address. 103 * a variable, not an address.
26 */ 104 */
27#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 105#define cpu_local_read(l) local_read(&__get_cpu_var(l))
28#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 106#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
29 107
30#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 108#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
31#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 109#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
32#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 110#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
33#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 111#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
34 112
35#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) 113#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
36#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) 114#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
37#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) 115#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
38#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) 116#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
39 117
40#endif /* _ALPHA_LOCAL_H */ 118#endif /* _ALPHA_LOCAL_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 49ac9bee7ced..616d20662ff3 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -345,10 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
345#define io_remap_pfn_range(vma, start, pfn, size, prot) \ 345#define io_remap_pfn_range(vma, start, pfn, size, prot) \
346 remap_pfn_range(vma, start, pfn, size, prot) 346 remap_pfn_range(vma, start, pfn, size, prot)
347 347
348#define MK_IOSPACE_PFN(space, pfn) (pfn)
349#define GET_IOSPACE(pfn) 0
350#define GET_PFN(pfn) (pfn)
351
352#define pte_ERROR(e) \ 348#define pte_ERROR(e) \
353 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 349 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
354#define pmd_ERROR(e) \ 350#define pmd_ERROR(e) \
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 03e9c0e5ed74..cf1021a97b2e 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -443,8 +443,110 @@ extern void __xchg_called_with_bad_pointer(void);
443 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 443 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
444 }) 444 })
445 445
446#define tas(ptr) (xchg((ptr),1)) 446static inline unsigned long
447__xchg_u8_local(volatile char *m, unsigned long val)
448{
449 unsigned long ret, tmp, addr64;
450
451 __asm__ __volatile__(
452 " andnot %4,7,%3\n"
453 " insbl %1,%4,%1\n"
454 "1: ldq_l %2,0(%3)\n"
455 " extbl %2,%4,%0\n"
456 " mskbl %2,%4,%2\n"
457 " or %1,%2,%2\n"
458 " stq_c %2,0(%3)\n"
459 " beq %2,2f\n"
460 ".subsection 2\n"
461 "2: br 1b\n"
462 ".previous"
463 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
464 : "r" ((long)m), "1" (val) : "memory");
447 465
466 return ret;
467}
468
469static inline unsigned long
470__xchg_u16_local(volatile short *m, unsigned long val)
471{
472 unsigned long ret, tmp, addr64;
473
474 __asm__ __volatile__(
475 " andnot %4,7,%3\n"
476 " inswl %1,%4,%1\n"
477 "1: ldq_l %2,0(%3)\n"
478 " extwl %2,%4,%0\n"
479 " mskwl %2,%4,%2\n"
480 " or %1,%2,%2\n"
481 " stq_c %2,0(%3)\n"
482 " beq %2,2f\n"
483 ".subsection 2\n"
484 "2: br 1b\n"
485 ".previous"
486 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
487 : "r" ((long)m), "1" (val) : "memory");
488
489 return ret;
490}
491
492static inline unsigned long
493__xchg_u32_local(volatile int *m, unsigned long val)
494{
495 unsigned long dummy;
496
497 __asm__ __volatile__(
498 "1: ldl_l %0,%4\n"
499 " bis $31,%3,%1\n"
500 " stl_c %1,%2\n"
501 " beq %1,2f\n"
502 ".subsection 2\n"
503 "2: br 1b\n"
504 ".previous"
505 : "=&r" (val), "=&r" (dummy), "=m" (*m)
506 : "rI" (val), "m" (*m) : "memory");
507
508 return val;
509}
510
511static inline unsigned long
512__xchg_u64_local(volatile long *m, unsigned long val)
513{
514 unsigned long dummy;
515
516 __asm__ __volatile__(
517 "1: ldq_l %0,%4\n"
518 " bis $31,%3,%1\n"
519 " stq_c %1,%2\n"
520 " beq %1,2f\n"
521 ".subsection 2\n"
522 "2: br 1b\n"
523 ".previous"
524 : "=&r" (val), "=&r" (dummy), "=m" (*m)
525 : "rI" (val), "m" (*m) : "memory");
526
527 return val;
528}
529
530#define __xchg_local(ptr, x, size) \
531({ \
532 unsigned long __xchg__res; \
533 volatile void *__xchg__ptr = (ptr); \
534 switch (size) { \
535 case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
536 case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
537 case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
538 case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
539 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
540 } \
541 __xchg__res; \
542})
543
544#define xchg_local(ptr,x) \
545 ({ \
546 __typeof__(*(ptr)) _x_ = (x); \
547 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
548 sizeof(*(ptr))); \
549 })
448 550
449/* 551/*
450 * Atomic compare and exchange. Compare OLD with MEM, if identical, 552 * Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -596,6 +698,128 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
596 (unsigned long)_n_, sizeof(*(ptr))); \ 698 (unsigned long)_n_, sizeof(*(ptr))); \
597 }) 699 })
598 700
701static inline unsigned long
702__cmpxchg_u8_local(volatile char *m, long old, long new)
703{
704 unsigned long prev, tmp, cmp, addr64;
705
706 __asm__ __volatile__(
707 " andnot %5,7,%4\n"
708 " insbl %1,%5,%1\n"
709 "1: ldq_l %2,0(%4)\n"
710 " extbl %2,%5,%0\n"
711 " cmpeq %0,%6,%3\n"
712 " beq %3,2f\n"
713 " mskbl %2,%5,%2\n"
714 " or %1,%2,%2\n"
715 " stq_c %2,0(%4)\n"
716 " beq %2,3f\n"
717 "2:\n"
718 ".subsection 2\n"
719 "3: br 1b\n"
720 ".previous"
721 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
722 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
723
724 return prev;
725}
726
727static inline unsigned long
728__cmpxchg_u16_local(volatile short *m, long old, long new)
729{
730 unsigned long prev, tmp, cmp, addr64;
731
732 __asm__ __volatile__(
733 " andnot %5,7,%4\n"
734 " inswl %1,%5,%1\n"
735 "1: ldq_l %2,0(%4)\n"
736 " extwl %2,%5,%0\n"
737 " cmpeq %0,%6,%3\n"
738 " beq %3,2f\n"
739 " mskwl %2,%5,%2\n"
740 " or %1,%2,%2\n"
741 " stq_c %2,0(%4)\n"
742 " beq %2,3f\n"
743 "2:\n"
744 ".subsection 2\n"
745 "3: br 1b\n"
746 ".previous"
747 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
748 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
749
750 return prev;
751}
752
753static inline unsigned long
754__cmpxchg_u32_local(volatile int *m, int old, int new)
755{
756 unsigned long prev, cmp;
757
758 __asm__ __volatile__(
759 "1: ldl_l %0,%5\n"
760 " cmpeq %0,%3,%1\n"
761 " beq %1,2f\n"
762 " mov %4,%1\n"
763 " stl_c %1,%2\n"
764 " beq %1,3f\n"
765 "2:\n"
766 ".subsection 2\n"
767 "3: br 1b\n"
768 ".previous"
769 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
770 : "r"((long) old), "r"(new), "m"(*m) : "memory");
771
772 return prev;
773}
774
775static inline unsigned long
776__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new)
777{
778 unsigned long prev, cmp;
779
780 __asm__ __volatile__(
781 "1: ldq_l %0,%5\n"
782 " cmpeq %0,%3,%1\n"
783 " beq %1,2f\n"
784 " mov %4,%1\n"
785 " stq_c %1,%2\n"
786 " beq %1,3f\n"
787 "2:\n"
788 ".subsection 2\n"
789 "3: br 1b\n"
790 ".previous"
791 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
792 : "r"((long) old), "r"(new), "m"(*m) : "memory");
793
794 return prev;
795}
796
797static __always_inline unsigned long
798__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
799 int size)
800{
801 switch (size) {
802 case 1:
803 return __cmpxchg_u8_local(ptr, old, new);
804 case 2:
805 return __cmpxchg_u16_local(ptr, old, new);
806 case 4:
807 return __cmpxchg_u32_local(ptr, old, new);
808 case 8:
809 return __cmpxchg_u64_local(ptr, old, new);
810 }
811 __cmpxchg_called_with_bad_pointer();
812 return old;
813}
814
815#define cmpxchg_local(ptr,o,n) \
816 ({ \
817 __typeof__(*(ptr)) _o_ = (o); \
818 __typeof__(*(ptr)) _n_ = (n); \
819 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
820 (unsigned long)_n_, sizeof(*(ptr))); \
821 })
822
599#endif /* __ASSEMBLY__ */ 823#endif /* __ASSEMBLY__ */
600 824
601#define arch_align_stack(x) (x) 825#define arch_align_stack(x) (x)
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f266c2795124..3b59f94b5a3d 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -12,6 +12,7 @@
12#define __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <asm/system.h>
15 16
16typedef struct { volatile int counter; } atomic_t; 17typedef struct { volatile int counter; } atomic_t;
17 18
diff --git a/include/asm-arm/kdebug.h b/include/asm-arm/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-arm/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-arm/kexec.h b/include/asm-arm/kexec.h
index 8c1c6162a80c..b5b030ef633d 100644
--- a/include/asm-arm/kexec.h
+++ b/include/asm-arm/kexec.h
@@ -16,8 +16,6 @@
16 16
17#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
18 18
19#define MAX_NOTE_BYTES 1024
20
21struct kimage; 19struct kimage;
22/* Provide a dummy definition to avoid build failures. */ 20/* Provide a dummy definition to avoid build failures. */
23static inline void crash_setup_regs(struct pt_regs *newregs, 21static inline void crash_setup_regs(struct pt_regs *newregs,
diff --git a/include/asm-arm/pgtable-nommu.h b/include/asm-arm/pgtable-nommu.h
index 7b1c9acdf79a..0c8be19fd66b 100644
--- a/include/asm-arm/pgtable-nommu.h
+++ b/include/asm-arm/pgtable-nommu.h
@@ -83,10 +83,6 @@ extern int is_in_rom(unsigned long);
83#define io_remap_page_range remap_page_range 83#define io_remap_page_range remap_page_range
84#define io_remap_pfn_range remap_pfn_range 84#define io_remap_pfn_range remap_pfn_range
85 85
86#define MK_IOSPACE_PFN(space, pfn) (pfn)
87#define GET_IOSPACE(pfn) 0
88#define GET_PFN(pfn) (pfn)
89
90 86
91/* 87/*
92 * All 32bit addresses are effectively valid for vmalloc... 88 * All 32bit addresses are effectively valid for vmalloc...
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 7b2bafce21a2..21dec9f258d8 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -395,10 +395,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
395#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 395#define io_remap_pfn_range(vma,from,pfn,size,prot) \
396 remap_pfn_range(vma, from, pfn, size, prot) 396 remap_pfn_range(vma, from, pfn, size, prot)
397 397
398#define MK_IOSPACE_PFN(space, pfn) (pfn)
399#define GET_IOSPACE(pfn) 0
400#define GET_PFN(pfn) (pfn)
401
402#define pgtable_cache_init() do { } while (0) 398#define pgtable_cache_init() do { } while (0)
403 399
404#endif /* !__ASSEMBLY__ */ 400#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 63b3080bdac4..f2da3b6e3a83 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -93,7 +93,7 @@ void die(const char *msg, struct pt_regs *regs, int err)
93 __attribute__((noreturn)); 93 __attribute__((noreturn));
94 94
95struct siginfo; 95struct siginfo;
96void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 96void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
97 unsigned long err, unsigned long trap); 97 unsigned long err, unsigned long trap);
98 98
99void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 99void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
@@ -103,8 +103,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
103#define xchg(ptr,x) \ 103#define xchg(ptr,x) \
104 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 104 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
105 105
106#define tas(ptr) (xchg((ptr),1))
107
108extern asmlinkage void __backtrace(void); 106extern asmlinkage void __backtrace(void);
109extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 107extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
110 108
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 97e944fe1cff..d6dd42374cf3 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -20,7 +20,6 @@
20#ifndef __ASM_ARM_ATOMIC_H 20#ifndef __ASM_ARM_ATOMIC_H
21#define __ASM_ARM_ATOMIC_H 21#define __ASM_ARM_ATOMIC_H
22 22
23
24#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
25#error SMP is NOT supported 24#error SMP is NOT supported
26#endif 25#endif
diff --git a/include/asm-arm26/kdebug.h b/include/asm-arm26/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-arm26/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h
index 63a8881fae13..2b20e9f08857 100644
--- a/include/asm-arm26/pgtable.h
+++ b/include/asm-arm26/pgtable.h
@@ -297,10 +297,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
297#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 297#define io_remap_pfn_range(vma,from,pfn,size,prot) \
298 remap_pfn_range(vma, from, pfn, size, prot) 298 remap_pfn_range(vma, from, pfn, size, prot)
299 299
300#define MK_IOSPACE_PFN(space, pfn) (pfn)
301#define GET_IOSPACE(pfn) 0
302#define GET_PFN(pfn) (pfn)
303
304#endif /* !__ASSEMBLY__ */ 300#endif /* !__ASSEMBLY__ */
305 301
306#endif /* _ASMARM_PGTABLE_H */ 302#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index 00ae32aa1dba..4703593b3bb5 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -52,8 +52,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
52#define xchg(ptr,x) \ 52#define xchg(ptr,x) \
53 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 53 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
54 54
55#define tas(ptr) (xchg((ptr),1))
56
57extern asmlinkage void __backtrace(void); 55extern asmlinkage void __backtrace(void);
58 56
59#define set_cr(x) \ 57#define set_cr(x) \
diff --git a/include/asm-avr32/kdebug.h b/include/asm-avr32/kdebug.h
index f583b643ffb2..de419278fc39 100644
--- a/include/asm-avr32/kdebug.h
+++ b/include/asm-avr32/kdebug.h
@@ -3,19 +3,6 @@
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
6struct pt_regs;
7
8struct die_args {
9 struct pt_regs *regs;
10 int trapnr;
11};
12
13int register_die_notifier(struct notifier_block *nb);
14int unregister_die_notifier(struct notifier_block *nb);
15int register_page_fault_notifier(struct notifier_block *nb);
16int unregister_page_fault_notifier(struct notifier_block *nb);
17extern struct atomic_notifier_head avr32_die_chain;
18
19/* Grossly misnamed. */ 6/* Grossly misnamed. */
20enum die_val { 7enum die_val {
21 DIE_FAULT, 8 DIE_FAULT,
@@ -24,15 +11,7 @@ enum die_val {
24 DIE_PAGE_FAULT, 11 DIE_PAGE_FAULT,
25}; 12};
26 13
27static inline int notify_die(enum die_val val, struct pt_regs *regs, 14int register_page_fault_notifier(struct notifier_block *nb);
28 int trap, int sig) 15int unregister_page_fault_notifier(struct notifier_block *nb);
29{
30 struct die_args args = {
31 .regs = regs,
32 .trapnr = trap,
33 };
34
35 return atomic_notifier_call_chain(&avr32_die_chain, val, &args);
36}
37 16
38#endif /* __ASM_AVR32_KDEBUG_H */ 17#endif /* __ASM_AVR32_KDEBUG_H */
diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h
index 6b8ca9db2bd5..f6cc2b0f75c3 100644
--- a/include/asm-avr32/pgtable.h
+++ b/include/asm-avr32/pgtable.h
@@ -394,10 +394,6 @@ typedef pte_t *pte_addr_t;
394#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 394#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
395 remap_pfn_range(vma, vaddr, pfn, size, prot) 395 remap_pfn_range(vma, vaddr, pfn, size, prot)
396 396
397#define MK_IOSPACE_PFN(space, pfn) (pfn)
398#define GET_IOSPACE(pfn) 0
399#define GET_PFN(pfn) (pfn)
400
401/* No page table caches to initialize (?) */ 397/* No page table caches to initialize (?) */
402#define pgtable_cache_init() do { } while(0) 398#define pgtable_cache_init() do { } while(0)
403 399
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index 758bac7c1e74..b5bf6e7cb5e8 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -138,7 +138,6 @@ extern unsigned long irq_flags;
138#endif 138#endif
139 139
140#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 140#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
141#define tas(ptr) ((void)xchg((ptr),1))
142 141
143struct __xchg_dummy { 142struct __xchg_dummy {
144 unsigned long a[100]; 143 unsigned long a[100];
diff --git a/include/asm-cris/kdebug.h b/include/asm-cris/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-cris/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 066386ac238e..d425d8d0ad77 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/spr-regs.h> 18#include <asm/spr-regs.h>
19#include <asm/system.h>
19 20
20#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
21#error not SMP safe 22#error not SMP safe
@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
258 259
259#define tas(ptr) (xchg((ptr), 1)) 260#define tas(ptr) (xchg((ptr), 1))
260 261
261/*****************************************************************************/
262/*
263 * compare and conditionally exchange value with memory
264 * - if (*ptr == test) then orig = *ptr; *ptr = test;
265 * - if (*ptr != test) then orig = *ptr;
266 */
267#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
268
269#define cmpxchg(ptr, test, new) \
270({ \
271 __typeof__(ptr) __xg_ptr = (ptr); \
272 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
273 __typeof__(*(ptr)) __xg_test = (test); \
274 __typeof__(*(ptr)) __xg_new = (new); \
275 \
276 switch (sizeof(__xg_orig)) { \
277 case 4: \
278 asm volatile( \
279 "0: \n" \
280 " orcc gr0,gr0,gr0,icc3 \n" \
281 " ckeq icc3,cc7 \n" \
282 " ld.p %M0,%1 \n" \
283 " orcr cc7,cc7,cc3 \n" \
284 " sub%I4cc %1,%4,%2,icc0 \n" \
285 " bne icc0,#0,1f \n" \
286 " cst.p %3,%M0 ,cc3,#1 \n" \
287 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
288 " beq icc3,#0,0b \n" \
289 "1: \n" \
290 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
291 : "r"(__xg_new), "NPr"(__xg_test) \
292 : "memory", "cc7", "cc3", "icc3", "icc0" \
293 ); \
294 break; \
295 \
296 default: \
297 __xg_orig = 0; \
298 asm volatile("break"); \
299 break; \
300 } \
301 \
302 __xg_orig; \
303})
304
305#else
306
307extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
308
309#define cmpxchg(ptr, test, new) \
310({ \
311 __typeof__(ptr) __xg_ptr = (ptr); \
312 __typeof__(*(ptr)) __xg_orig; \
313 __typeof__(*(ptr)) __xg_test = (test); \
314 __typeof__(*(ptr)) __xg_new = (new); \
315 \
316 switch (sizeof(__xg_orig)) { \
317 case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
318 default: \
319 __xg_orig = 0; \
320 asm volatile("break"); \
321 break; \
322 } \
323 \
324 __xg_orig; \
325})
326
327#endif
328
329#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 262#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
330#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 263#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
331 264
332#define atomic_add_unless(v, a, u) \ 265static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
333({ \ 266{
334 int c, old; \ 267 int c, old;
335 c = atomic_read(v); \ 268 c = atomic_read(v);
336 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 269 for (;;) {
337 c = old; \ 270 if (unlikely(c == (u)))
338 c != (u); \ 271 break;
339}) 272 old = atomic_cmpxchg((v), c, c + (a));
273 if (likely(old == c))
274 break;
275 c = old;
276 }
277 return c != (u);
278}
340 279
341#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 280#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
342 281
diff --git a/include/asm-frv/kdebug.h b/include/asm-frv/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-frv/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 8a05aa168616..2687c7715120 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -509,10 +509,6 @@ static inline int pte_file(pte_t pte)
509#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 509#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
510 remap_pfn_range(vma, vaddr, pfn, size, prot) 510 remap_pfn_range(vma, vaddr, pfn, size, prot)
511 511
512#define MK_IOSPACE_PFN(space, pfn) (pfn)
513#define GET_IOSPACE(pfn) 0
514#define GET_PFN(pfn) (pfn)
515
516#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 512#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
517#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 513#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
518#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 514#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index 907c5c3643cc..09586528e007 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -20,8 +20,6 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/rwsem.h> 21#include <linux/rwsem.h>
22 22
23#define SEMAPHORE_DEBUG 0
24
25/* 23/*
26 * the semaphore definition 24 * the semaphore definition
27 * - if counter is >0 then there are tokens available on the semaphore for down to collect 25 * - if counter is >0 then there are tokens available on the semaphore for down to collect
@@ -32,12 +30,12 @@ struct semaphore {
32 unsigned counter; 30 unsigned counter;
33 spinlock_t wait_lock; 31 spinlock_t wait_lock;
34 struct list_head wait_list; 32 struct list_head wait_list;
35#if SEMAPHORE_DEBUG 33#ifdef CONFIG_DEBUG_SEMAPHORE
36 unsigned __magic; 34 unsigned __magic;
37#endif 35#endif
38}; 36};
39 37
40#if SEMAPHORE_DEBUG 38#ifdef CONFIG_DEBUG_SEMAPHORE
41# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic 39# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
42#else 40#else
43# define __SEM_DEBUG_INIT(name) 41# define __SEM_DEBUG_INIT(name)
@@ -76,7 +74,7 @@ static inline void down(struct semaphore *sem)
76{ 74{
77 unsigned long flags; 75 unsigned long flags;
78 76
79#if SEMAPHORE_DEBUG 77#ifdef CONFIG_DEBUG_SEMAPHORE
80 CHECK_MAGIC(sem->__magic); 78 CHECK_MAGIC(sem->__magic);
81#endif 79#endif
82 80
@@ -95,7 +93,7 @@ static inline int down_interruptible(struct semaphore *sem)
95 unsigned long flags; 93 unsigned long flags;
96 int ret = 0; 94 int ret = 0;
97 95
98#if SEMAPHORE_DEBUG 96#ifdef CONFIG_DEBUG_SEMAPHORE
99 CHECK_MAGIC(sem->__magic); 97 CHECK_MAGIC(sem->__magic);
100#endif 98#endif
101 99
@@ -119,7 +117,7 @@ static inline int down_trylock(struct semaphore *sem)
119 unsigned long flags; 117 unsigned long flags;
120 int success = 0; 118 int success = 0;
121 119
122#if SEMAPHORE_DEBUG 120#ifdef CONFIG_DEBUG_SEMAPHORE
123 CHECK_MAGIC(sem->__magic); 121 CHECK_MAGIC(sem->__magic);
124#endif 122#endif
125 123
@@ -136,7 +134,7 @@ static inline void up(struct semaphore *sem)
136{ 134{
137 unsigned long flags; 135 unsigned long flags;
138 136
139#if SEMAPHORE_DEBUG 137#ifdef CONFIG_DEBUG_SEMAPHORE
140 CHECK_MAGIC(sem->__magic); 138 CHECK_MAGIC(sem->__magic);
141#endif 139#endif
142 140
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 1166899317d7..be303b3eef40 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -13,7 +13,6 @@
13#define _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/atomic.h>
17 16
18struct thread_struct; 17struct thread_struct;
19 18
@@ -197,4 +196,73 @@ extern void free_initmem(void);
197 196
198#define arch_align_stack(x) (x) 197#define arch_align_stack(x) (x)
199 198
199/*****************************************************************************/
200/*
201 * compare and conditionally exchange value with memory
202 * - if (*ptr == test) then orig = *ptr; *ptr = test;
203 * - if (*ptr != test) then orig = *ptr;
204 */
205#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
206
207#define cmpxchg(ptr, test, new) \
208({ \
209 __typeof__(ptr) __xg_ptr = (ptr); \
210 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
211 __typeof__(*(ptr)) __xg_test = (test); \
212 __typeof__(*(ptr)) __xg_new = (new); \
213 \
214 switch (sizeof(__xg_orig)) { \
215 case 4: \
216 asm volatile( \
217 "0: \n" \
218 " orcc gr0,gr0,gr0,icc3 \n" \
219 " ckeq icc3,cc7 \n" \
220 " ld.p %M0,%1 \n" \
221 " orcr cc7,cc7,cc3 \n" \
222 " sub%I4cc %1,%4,%2,icc0 \n" \
223 " bne icc0,#0,1f \n" \
224 " cst.p %3,%M0 ,cc3,#1 \n" \
225 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
226 " beq icc3,#0,0b \n" \
227 "1: \n" \
228 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
229 : "r"(__xg_new), "NPr"(__xg_test) \
230 : "memory", "cc7", "cc3", "icc3", "icc0" \
231 ); \
232 break; \
233 \
234 default: \
235 __xg_orig = 0; \
236 asm volatile("break"); \
237 break; \
238 } \
239 \
240 __xg_orig; \
241})
242
243#else
244
245extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
246
247#define cmpxchg(ptr, test, new) \
248({ \
249 __typeof__(ptr) __xg_ptr = (ptr); \
250 __typeof__(*(ptr)) __xg_orig; \
251 __typeof__(*(ptr)) __xg_test = (test); \
252 __typeof__(*(ptr)) __xg_new = (new); \
253 \
254 switch (sizeof(__xg_orig)) { \
255 case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
256 default: \
257 __xg_orig = 0; \
258 asm volatile("break"); \
259 break; \
260 } \
261 \
262 __xg_orig; \
263})
264
265#endif
266
267
200#endif /* _ASM_SYSTEM_H */ 268#endif /* _ASM_SYSTEM_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index b7e4a0467cb1..85fd0aa27a8c 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,6 +66,76 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66 atomic64_sub(i, v); 66 atomic64_sub(i, v);
67} 67}
68 68
69static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70{
71 atomic64_t *v = (atomic64_t *)l;
72
73 return atomic64_sub_and_test(i, v);
74}
75
76static inline int atomic_long_dec_and_test(atomic_long_t *l)
77{
78 atomic64_t *v = (atomic64_t *)l;
79
80 return atomic64_dec_and_test(v);
81}
82
83static inline int atomic_long_inc_and_test(atomic_long_t *l)
84{
85 atomic64_t *v = (atomic64_t *)l;
86
87 return atomic64_inc_and_test(v);
88}
89
90static inline int atomic_long_add_negative(long i, atomic_long_t *l)
91{
92 atomic64_t *v = (atomic64_t *)l;
93
94 return atomic64_add_negative(i, v);
95}
96
97static inline long atomic_long_add_return(long i, atomic_long_t *l)
98{
99 atomic64_t *v = (atomic64_t *)l;
100
101 return (long)atomic64_add_return(i, v);
102}
103
104static inline long atomic_long_sub_return(long i, atomic_long_t *l)
105{
106 atomic64_t *v = (atomic64_t *)l;
107
108 return (long)atomic64_sub_return(i, v);
109}
110
111static inline long atomic_long_inc_return(atomic_long_t *l)
112{
113 atomic64_t *v = (atomic64_t *)l;
114
115 return (long)atomic64_inc_return(v);
116}
117
118static inline long atomic_long_dec_return(atomic_long_t *l)
119{
120 atomic64_t *v = (atomic64_t *)l;
121
122 return (long)atomic64_dec_return(v);
123}
124
125static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
126{
127 atomic64_t *v = (atomic64_t *)l;
128
129 return (long)atomic64_add_unless(v, a, u);
130}
131
132#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
133
134#define atomic_long_cmpxchg(l, old, new) \
135 (atomic_cmpxchg((atomic64_t *)(l), (old), (new)))
136#define atomic_long_xchg(v, new) \
137 (atomic_xchg((atomic64_t *)(l), (new)))
138
69#else /* BITS_PER_LONG == 64 */ 139#else /* BITS_PER_LONG == 64 */
70 140
71typedef atomic_t atomic_long_t; 141typedef atomic_t atomic_long_t;
@@ -113,6 +183,76 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
113 atomic_sub(i, v); 183 atomic_sub(i, v);
114} 184}
115 185
186static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
187{
188 atomic_t *v = (atomic_t *)l;
189
190 return atomic_sub_and_test(i, v);
191}
192
193static inline int atomic_long_dec_and_test(atomic_long_t *l)
194{
195 atomic_t *v = (atomic_t *)l;
196
197 return atomic_dec_and_test(v);
198}
199
200static inline int atomic_long_inc_and_test(atomic_long_t *l)
201{
202 atomic_t *v = (atomic_t *)l;
203
204 return atomic_inc_and_test(v);
205}
206
207static inline int atomic_long_add_negative(long i, atomic_long_t *l)
208{
209 atomic_t *v = (atomic_t *)l;
210
211 return atomic_add_negative(i, v);
212}
213
214static inline long atomic_long_add_return(long i, atomic_long_t *l)
215{
216 atomic_t *v = (atomic_t *)l;
217
218 return (long)atomic_add_return(i, v);
219}
220
221static inline long atomic_long_sub_return(long i, atomic_long_t *l)
222{
223 atomic_t *v = (atomic_t *)l;
224
225 return (long)atomic_sub_return(i, v);
226}
227
228static inline long atomic_long_inc_return(atomic_long_t *l)
229{
230 atomic_t *v = (atomic_t *)l;
231
232 return (long)atomic_inc_return(v);
233}
234
235static inline long atomic_long_dec_return(atomic_long_t *l)
236{
237 atomic_t *v = (atomic_t *)l;
238
239 return (long)atomic_dec_return(v);
240}
241
242static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
243{
244 atomic_t *v = (atomic_t *)l;
245
246 return (long)atomic_add_unless(v, a, u);
247}
248
249#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
250
251#define atomic_long_cmpxchg(l, old, new) \
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
253#define atomic_long_xchg(v, new) \
254 (atomic_xchg((atomic_t *)(l), (new)))
255
116#endif /* BITS_PER_LONG == 64 */ 256#endif /* BITS_PER_LONG == 64 */
117 257
118#endif /* _ASM_GENERIC_ATOMIC_H */ 258#endif /* _ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
new file mode 100644
index 000000000000..2b799c90b2d4
--- /dev/null
+++ b/include/asm-generic/kdebug.h
@@ -0,0 +1,8 @@
1#ifndef _ASM_GENERIC_KDEBUG_H
2#define _ASM_GENERIC_KDEBUG_H
3
4enum die_val {
5 DIE_UNUSED,
6};
7
8#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index ab469297272c..33d7d04e4119 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -33,6 +33,19 @@ typedef struct
33#define local_add(i,l) atomic_long_add((i),(&(l)->a)) 33#define local_add(i,l) atomic_long_add((i),(&(l)->a))
34#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) 34#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
35 35
36#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
37#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
38#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
39#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
40#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
41#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
42#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
43
44#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
45#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
46#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
47#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
48
36/* Non-atomic variants, ie. preemption disabled and won't be touched 49/* Non-atomic variants, ie. preemption disabled and won't be touched
37 * in interrupt, etc. Some archs can optimize this case well. */ 50 * in interrupt, etc. Some archs can optimize this case well. */
38#define __local_inc(l) local_set((l), local_read(l) + 1) 51#define __local_inc(l) local_set((l), local_read(l) + 1)
@@ -44,19 +57,19 @@ typedef struct
44 * much more efficient than these naive implementations. Note they take 57 * much more efficient than these naive implementations. Note they take
45 * a variable (eg. mystruct.foo), not an address. 58 * a variable (eg. mystruct.foo), not an address.
46 */ 59 */
47#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 60#define cpu_local_read(l) local_read(&__get_cpu_var(l))
48#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 61#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
49#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 62#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
50#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 63#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
51#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 64#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
52#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 65#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
53 66
54/* Non-atomic increments, ie. preemption disabled and won't be touched 67/* Non-atomic increments, ie. preemption disabled and won't be touched
55 * in interrupt, etc. Some archs can optimize this case well. 68 * in interrupt, etc. Some archs can optimize this case well.
56 */ 69 */
57#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) 70#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
58#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) 71#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
59#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) 72#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
60#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) 73#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
61 74
62#endif /* _ASM_GENERIC_LOCAL_H */ 75#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-h8300/kdebug.h b/include/asm-h8300/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-h8300/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-h8300/pgtable.h b/include/asm-h8300/pgtable.h
index ddd07f485dd8..a09230a08e02 100644
--- a/include/asm-h8300/pgtable.h
+++ b/include/asm-h8300/pgtable.h
@@ -55,10 +55,6 @@ extern int is_in_rom(unsigned long);
55#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 55#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
56 remap_pfn_range(vma, vaddr, pfn, size, prot) 56 remap_pfn_range(vma, vaddr, pfn, size, prot)
57 57
58#define MK_IOSPACE_PFN(space, pfn) (pfn)
59#define GET_IOSPACE(pfn) 0
60#define GET_PFN(pfn) (pfn)
61
62/* 58/*
63 * All 32bit addresses are effectively valid for vmalloc... 59 * All 32bit addresses are effectively valid for vmalloc...
64 * Sort of meaningless for non-VM targets. 60 * Sort of meaningless for non-VM targets.
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
index 5084a9d42922..7807018f8500 100644
--- a/include/asm-h8300/system.h
+++ b/include/asm-h8300/system.h
@@ -98,7 +98,6 @@ asmlinkage void resume(void);
98#endif 98#endif
99 99
100#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 100#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
101#define tas(ptr) (xchg((ptr),1))
102 101
103struct __xchg_dummy { unsigned long a[100]; }; 102struct __xchg_dummy { unsigned long a[100]; };
104#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 103#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 4dd272331361..0baa2f89463c 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/cmpxchg.h>
6 7
7/* 8/*
8 * Atomic operations that C can't guarantee us. Useful for 9 * Atomic operations that C can't guarantee us. Useful for
@@ -51,7 +52,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
51} 52}
52 53
53/** 54/**
54 * atomic_sub - subtract the atomic variable 55 * atomic_sub - subtract integer from atomic variable
55 * @i: integer value to subtract 56 * @i: integer value to subtract
56 * @v: pointer of type atomic_t 57 * @v: pointer of type atomic_t
57 * 58 *
@@ -170,7 +171,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
170} 171}
171 172
172/** 173/**
173 * atomic_add_return - add and return 174 * atomic_add_return - add integer and return
174 * @v: pointer of type atomic_t 175 * @v: pointer of type atomic_t
175 * @i: integer value to add 176 * @i: integer value to add
176 * 177 *
@@ -202,13 +203,20 @@ no_xadd: /* Legacy 386 processor */
202#endif 203#endif
203} 204}
204 205
206/**
207 * atomic_sub_return - subtract integer and return
208 * @v: pointer of type atomic_t
209 * @i: integer value to subtract
210 *
211 * Atomically subtracts @i from @v and returns @v - @i
212 */
205static __inline__ int atomic_sub_return(int i, atomic_t *v) 213static __inline__ int atomic_sub_return(int i, atomic_t *v)
206{ 214{
207 return atomic_add_return(-i,v); 215 return atomic_add_return(-i,v);
208} 216}
209 217
210#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 218#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 219#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
212 220
213/** 221/**
214 * atomic_add_unless - add unless the number is already a given value 222 * atomic_add_unless - add unless the number is already a given value
@@ -219,20 +227,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
219 * Atomically adds @a to @v, so long as @v was not already @u. 227 * Atomically adds @a to @v, so long as @v was not already @u.
220 * Returns non-zero if @v was not @u, and zero otherwise. 228 * Returns non-zero if @v was not @u, and zero otherwise.
221 */ 229 */
222#define atomic_add_unless(v, a, u) \ 230static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
223({ \ 231{
224 int c, old; \ 232 int c, old;
225 c = atomic_read(v); \ 233 c = atomic_read(v);
226 for (;;) { \ 234 for (;;) {
227 if (unlikely(c == (u))) \ 235 if (unlikely(c == (u)))
228 break; \ 236 break;
229 old = atomic_cmpxchg((v), c, c + (a)); \ 237 old = atomic_cmpxchg((v), c, c + (a));
230 if (likely(old == c)) \ 238 if (likely(old == c))
231 break; \ 239 break;
232 c = old; \ 240 c = old;
233 } \ 241 }
234 c != (u); \ 242 return c != (u);
235}) 243}
244
236#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
237 246
238#define atomic_inc_return(v) (atomic_add_return(1,v)) 247#define atomic_inc_return(v) (atomic_add_return(1,v))
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
new file mode 100644
index 000000000000..7adcef0cd53b
--- /dev/null
+++ b/include/asm-i386/cmpxchg.h
@@ -0,0 +1,293 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7
8struct __xchg_dummy { unsigned long a[100]; };
9#define __xg(x) ((struct __xchg_dummy *)(x))
10
11
12#ifdef CONFIG_X86_CMPXCHG64
13
14/*
15 * The semantics of XCHGCMP8B are a bit strange, this is why
16 * there is a loop and the loading of %%eax and %%edx has to
17 * be inside. This inlines well in most cases, the cached
18 * cost is around ~38 cycles. (in the future we might want
19 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
20 * might have an implicit FPU-save as a cost, so it's not
21 * clear which path to go.)
22 *
23 * cmpxchg8b must be used with the lock prefix here to allow
24 * the instruction to be executed atomically, see page 3-102
25 * of the instruction set reference 24319102.pdf. We need
26 * the reader side to see the coherent 64bit value.
27 */
28static inline void __set_64bit (unsigned long long * ptr,
29 unsigned int low, unsigned int high)
30{
31 __asm__ __volatile__ (
32 "\n1:\t"
33 "movl (%0), %%eax\n\t"
34 "movl 4(%0), %%edx\n\t"
35 "lock cmpxchg8b (%0)\n\t"
36 "jnz 1b"
37 : /* no outputs */
38 : "D"(ptr),
39 "b"(low),
40 "c"(high)
41 : "ax","dx","memory");
42}
43
44static inline void __set_64bit_constant (unsigned long long *ptr,
45 unsigned long long value)
46{
47 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
48}
49#define ll_low(x) *(((unsigned int*)&(x))+0)
50#define ll_high(x) *(((unsigned int*)&(x))+1)
51
52static inline void __set_64bit_var (unsigned long long *ptr,
53 unsigned long long value)
54{
55 __set_64bit(ptr,ll_low(value), ll_high(value));
56}
57
58#define set_64bit(ptr,value) \
59(__builtin_constant_p(value) ? \
60 __set_64bit_constant(ptr, value) : \
61 __set_64bit_var(ptr, value) )
62
63#define _set_64bit(ptr,value) \
64(__builtin_constant_p(value) ? \
65 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
66 __set_64bit(ptr, ll_low(value), ll_high(value)) )
67
68#endif
69
70/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
72 * Note 2: xchg has side effect, so that attribute volatile is necessary,
73 * but generally the primitive is invalid, *ptr is output argument. --ANK
74 */
75static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76{
77 switch (size) {
78 case 1:
79 __asm__ __volatile__("xchgb %b0,%1"
80 :"=q" (x)
81 :"m" (*__xg(ptr)), "0" (x)
82 :"memory");
83 break;
84 case 2:
85 __asm__ __volatile__("xchgw %w0,%1"
86 :"=r" (x)
87 :"m" (*__xg(ptr)), "0" (x)
88 :"memory");
89 break;
90 case 4:
91 __asm__ __volatile__("xchgl %0,%1"
92 :"=r" (x)
93 :"m" (*__xg(ptr)), "0" (x)
94 :"memory");
95 break;
96 }
97 return x;
98}
99
100/*
101 * Atomic compare and exchange. Compare OLD with MEM, if identical,
102 * store NEW in MEM. Return the initial value in MEM. Success is
103 * indicated by comparing RETURN with OLD.
104 */
105
106#ifdef CONFIG_X86_CMPXCHG
107#define __HAVE_ARCH_CMPXCHG 1
108#define cmpxchg(ptr,o,n)\
109 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
110 (unsigned long)(n),sizeof(*(ptr))))
111#define sync_cmpxchg(ptr,o,n)\
112 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
113 (unsigned long)(n),sizeof(*(ptr))))
114#define cmpxchg_local(ptr,o,n)\
115 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
116 (unsigned long)(n),sizeof(*(ptr))))
117#endif
118
119static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
120 unsigned long new, int size)
121{
122 unsigned long prev;
123 switch (size) {
124 case 1:
125 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
126 : "=a"(prev)
127 : "q"(new), "m"(*__xg(ptr)), "0"(old)
128 : "memory");
129 return prev;
130 case 2:
131 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
132 : "=a"(prev)
133 : "r"(new), "m"(*__xg(ptr)), "0"(old)
134 : "memory");
135 return prev;
136 case 4:
137 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
138 : "=a"(prev)
139 : "r"(new), "m"(*__xg(ptr)), "0"(old)
140 : "memory");
141 return prev;
142 }
143 return old;
144}
145
146/*
147 * Always use locked operations when touching memory shared with a
148 * hypervisor, since the system may be SMP even if the guest kernel
149 * isn't.
150 */
151static inline unsigned long __sync_cmpxchg(volatile void *ptr,
152 unsigned long old,
153 unsigned long new, int size)
154{
155 unsigned long prev;
156 switch (size) {
157 case 1:
158 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
159 : "=a"(prev)
160 : "q"(new), "m"(*__xg(ptr)), "0"(old)
161 : "memory");
162 return prev;
163 case 2:
164 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
165 : "=a"(prev)
166 : "r"(new), "m"(*__xg(ptr)), "0"(old)
167 : "memory");
168 return prev;
169 case 4:
170 __asm__ __volatile__("lock; cmpxchgl %1,%2"
171 : "=a"(prev)
172 : "r"(new), "m"(*__xg(ptr)), "0"(old)
173 : "memory");
174 return prev;
175 }
176 return old;
177}
178
179static inline unsigned long __cmpxchg_local(volatile void *ptr,
180 unsigned long old, unsigned long new, int size)
181{
182 unsigned long prev;
183 switch (size) {
184 case 1:
185 __asm__ __volatile__("cmpxchgb %b1,%2"
186 : "=a"(prev)
187 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 : "memory");
189 return prev;
190 case 2:
191 __asm__ __volatile__("cmpxchgw %w1,%2"
192 : "=a"(prev)
193 : "r"(new), "m"(*__xg(ptr)), "0"(old)
194 : "memory");
195 return prev;
196 case 4:
197 __asm__ __volatile__("cmpxchgl %1,%2"
198 : "=a"(prev)
199 : "r"(new), "m"(*__xg(ptr)), "0"(old)
200 : "memory");
201 return prev;
202 }
203 return old;
204}
205
206#ifndef CONFIG_X86_CMPXCHG
207/*
208 * Building a kernel capable running on 80386. It may be necessary to
209 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
210 * a function for each of the sizes we support.
211 */
212
213extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
214extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
215extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
216
217static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
218 unsigned long new, int size)
219{
220 switch (size) {
221 case 1:
222 return cmpxchg_386_u8(ptr, old, new);
223 case 2:
224 return cmpxchg_386_u16(ptr, old, new);
225 case 4:
226 return cmpxchg_386_u32(ptr, old, new);
227 }
228 return old;
229}
230
231#define cmpxchg(ptr,o,n) \
232({ \
233 __typeof__(*(ptr)) __ret; \
234 if (likely(boot_cpu_data.x86 > 3)) \
235 __ret = __cmpxchg((ptr), (unsigned long)(o), \
236 (unsigned long)(n), sizeof(*(ptr))); \
237 else \
238 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
239 (unsigned long)(n), sizeof(*(ptr))); \
240 __ret; \
241})
242#define cmpxchg_local(ptr,o,n) \
243({ \
244 __typeof__(*(ptr)) __ret; \
245 if (likely(boot_cpu_data.x86 > 3)) \
246 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
247 (unsigned long)(n), sizeof(*(ptr))); \
248 else \
249 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
250 (unsigned long)(n), sizeof(*(ptr))); \
251 __ret; \
252})
253#endif
254
255#ifdef CONFIG_X86_CMPXCHG64
256
257static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
258 unsigned long long new)
259{
260 unsigned long long prev;
261 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
262 : "=A"(prev)
263 : "b"((unsigned long)new),
264 "c"((unsigned long)(new >> 32)),
265 "m"(*__xg(ptr)),
266 "0"(old)
267 : "memory");
268 return prev;
269}
270
271static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
272 unsigned long long old, unsigned long long new)
273{
274 unsigned long long prev;
275 __asm__ __volatile__("cmpxchg8b %3"
276 : "=A"(prev)
277 : "b"((unsigned long)new),
278 "c"((unsigned long)(new >> 32)),
279 "m"(*__xg(ptr)),
280 "0"(old)
281 : "memory");
282 return prev;
283}
284
285#define cmpxchg64(ptr,o,n)\
286 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
287 (unsigned long long)(n)))
288#define cmpxchg64_local(ptr,o,n)\
289 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
290 (unsigned long long)(n)))
291#endif
292
293#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index d304ab4161ff..b32df3a332da 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -9,8 +9,6 @@
9#include <asm/user.h> 9#include <asm/user.h>
10#include <asm/auxvec.h> 10#include <asm/auxvec.h>
11 11
12#include <linux/utsname.h>
13
14#define R_386_NONE 0 12#define R_386_NONE 0
15#define R_386_32 1 13#define R_386_32 1
16#define R_386_PC32 2 14#define R_386_PC32 2
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
index f962fadab0fa..ef5878762dc9 100644
--- a/include/asm-i386/ioctls.h
+++ b/include/asm-i386/ioctls.h
@@ -47,6 +47,10 @@
47#define TIOCSBRK 0x5427 /* BSD compatibility */ 47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */ 48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */ 49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T',0x2A, struct termios2)
51#define TCSETS2 _IOW('T',0x2B, struct termios2)
52#define TCSETSW2 _IOW('T',0x2C, struct termios2)
53#define TCSETSF2 _IOW('T',0x2D, struct termios2)
50#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
51#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
52 56
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index d18cdb9fc9a6..05c3117788b9 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -9,19 +9,8 @@
9 9
10struct pt_regs; 10struct pt_regs;
11 11
12struct die_args {
13 struct pt_regs *regs;
14 const char *str;
15 long err;
16 int trapnr;
17 int signr;
18};
19
20extern int register_die_notifier(struct notifier_block *);
21extern int unregister_die_notifier(struct notifier_block *);
22extern int register_page_fault_notifier(struct notifier_block *); 12extern int register_page_fault_notifier(struct notifier_block *);
23extern int unregister_page_fault_notifier(struct notifier_block *); 13extern int unregister_page_fault_notifier(struct notifier_block *);
24extern struct atomic_notifier_head i386die_chain;
25 14
26 15
27/* Grossly misnamed. */ 16/* Grossly misnamed. */
@@ -38,20 +27,8 @@ enum die_val {
38 DIE_GPF, 27 DIE_GPF,
39 DIE_CALL, 28 DIE_CALL,
40 DIE_NMI_IPI, 29 DIE_NMI_IPI,
30 DIE_NMI_POST,
41 DIE_PAGE_FAULT, 31 DIE_PAGE_FAULT,
42}; 32};
43 33
44static inline int notify_die(enum die_val val, const char *str,
45 struct pt_regs *regs, long err, int trap, int sig)
46{
47 struct die_args args = {
48 .regs = regs,
49 .str = str,
50 .err = err,
51 .trapnr = trap,
52 .signr = sig
53 };
54 return atomic_notifier_call_chain(&i386die_chain, val, &args);
55}
56
57#endif 34#endif
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index bcb5b21de2d2..4b9dc9e6b701 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -45,8 +45,6 @@
45/* We can also handle crash dumps from 64 bit kernel. */ 45/* We can also handle crash dumps from 64 bit kernel. */
46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) 46#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
47 47
48#define MAX_NOTE_BYTES 1024
49
50/* CPU does not save ss and esp on stack if execution is already 48/* CPU does not save ss and esp on stack if execution is already
51 * running in kernel mode at the time of NMI occurrence. This code 49 * running in kernel mode at the time of NMI occurrence. This code
52 * fixes it. 50 * fixes it.
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 12060e22f7e2..e13d3e98823f 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -2,47 +2,198 @@
2#define _ARCH_I386_LOCAL_H 2#define _ARCH_I386_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/system.h>
6#include <asm/atomic.h>
5 7
6typedef struct 8typedef struct
7{ 9{
8 volatile long counter; 10 atomic_long_t a;
9} local_t; 11} local_t;
10 12
11#define LOCAL_INIT(i) { (i) } 13#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
12 14
13#define local_read(v) ((v)->counter) 15#define local_read(l) atomic_long_read(&(l)->a)
14#define local_set(v,i) (((v)->counter) = (i)) 16#define local_set(l,i) atomic_long_set(&(l)->a, (i))
15 17
16static __inline__ void local_inc(local_t *v) 18static __inline__ void local_inc(local_t *l)
17{ 19{
18 __asm__ __volatile__( 20 __asm__ __volatile__(
19 "incl %0" 21 "incl %0"
20 :"+m" (v->counter)); 22 :"+m" (l->a.counter));
21} 23}
22 24
23static __inline__ void local_dec(local_t *v) 25static __inline__ void local_dec(local_t *l)
24{ 26{
25 __asm__ __volatile__( 27 __asm__ __volatile__(
26 "decl %0" 28 "decl %0"
27 :"+m" (v->counter)); 29 :"+m" (l->a.counter));
28} 30}
29 31
30static __inline__ void local_add(long i, local_t *v) 32static __inline__ void local_add(long i, local_t *l)
31{ 33{
32 __asm__ __volatile__( 34 __asm__ __volatile__(
33 "addl %1,%0" 35 "addl %1,%0"
34 :"+m" (v->counter) 36 :"+m" (l->a.counter)
35 :"ir" (i)); 37 :"ir" (i));
36} 38}
37 39
38static __inline__ void local_sub(long i, local_t *v) 40static __inline__ void local_sub(long i, local_t *l)
39{ 41{
40 __asm__ __volatile__( 42 __asm__ __volatile__(
41 "subl %1,%0" 43 "subl %1,%0"
42 :"+m" (v->counter) 44 :"+m" (l->a.counter)
43 :"ir" (i)); 45 :"ir" (i));
44} 46}
45 47
48/**
49 * local_sub_and_test - subtract value from variable and test result
50 * @i: integer value to subtract
51 * @l: pointer of type local_t
52 *
53 * Atomically subtracts @i from @l and returns
54 * true if the result is zero, or false for all
55 * other cases.
56 */
57static __inline__ int local_sub_and_test(long i, local_t *l)
58{
59 unsigned char c;
60
61 __asm__ __volatile__(
62 "subl %2,%0; sete %1"
63 :"+m" (l->a.counter), "=qm" (c)
64 :"ir" (i) : "memory");
65 return c;
66}
67
68/**
69 * local_dec_and_test - decrement and test
70 * @l: pointer of type local_t
71 *
72 * Atomically decrements @l by 1 and
73 * returns true if the result is 0, or false for all other
74 * cases.
75 */
76static __inline__ int local_dec_and_test(local_t *l)
77{
78 unsigned char c;
79
80 __asm__ __volatile__(
81 "decl %0; sete %1"
82 :"+m" (l->a.counter), "=qm" (c)
83 : : "memory");
84 return c != 0;
85}
86
87/**
88 * local_inc_and_test - increment and test
89 * @l: pointer of type local_t
90 *
91 * Atomically increments @l by 1
92 * and returns true if the result is zero, or false for all
93 * other cases.
94 */
95static __inline__ int local_inc_and_test(local_t *l)
96{
97 unsigned char c;
98
99 __asm__ __volatile__(
100 "incl %0; sete %1"
101 :"+m" (l->a.counter), "=qm" (c)
102 : : "memory");
103 return c != 0;
104}
105
106/**
107 * local_add_negative - add and test if negative
108 * @l: pointer of type local_t
109 * @i: integer value to add
110 *
111 * Atomically adds @i to @l and returns true
112 * if the result is negative, or false when
113 * result is greater than or equal to zero.
114 */
115static __inline__ int local_add_negative(long i, local_t *l)
116{
117 unsigned char c;
118
119 __asm__ __volatile__(
120 "addl %2,%0; sets %1"
121 :"+m" (l->a.counter), "=qm" (c)
122 :"ir" (i) : "memory");
123 return c;
124}
125
126/**
127 * local_add_return - add and return
128 * @l: pointer of type local_t
129 * @i: integer value to add
130 *
131 * Atomically adds @i to @l and returns @i + @l
132 */
133static __inline__ long local_add_return(long i, local_t *l)
134{
135 long __i;
136#ifdef CONFIG_M386
137 unsigned long flags;
138 if(unlikely(boot_cpu_data.x86==3))
139 goto no_xadd;
140#endif
141 /* Modern 486+ processor */
142 __i = i;
143 __asm__ __volatile__(
144 "xaddl %0, %1;"
145 :"+r" (i), "+m" (l->a.counter)
146 : : "memory");
147 return i + __i;
148
149#ifdef CONFIG_M386
150no_xadd: /* Legacy 386 processor */
151 local_irq_save(flags);
152 __i = local_read(l);
153 local_set(l, i + __i);
154 local_irq_restore(flags);
155 return i + __i;
156#endif
157}
158
159static __inline__ long local_sub_return(long i, local_t *l)
160{
161 return local_add_return(-i,l);
162}
163
164#define local_inc_return(l) (local_add_return(1,l))
165#define local_dec_return(l) (local_sub_return(1,l))
166
167#define local_cmpxchg(l, o, n) \
168 (cmpxchg_local(&((l)->a.counter), (o), (n)))
169/* Always has a lock prefix */
170#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
171
172/**
173 * local_add_unless - add unless the number is a given value
174 * @l: pointer of type local_t
175 * @a: the amount to add to l...
176 * @u: ...unless l is equal to u.
177 *
178 * Atomically adds @a to @l, so long as it was not @u.
179 * Returns non-zero if @l was not @u, and zero otherwise.
180 */
181#define local_add_unless(l, a, u) \
182({ \
183 long c, old; \
184 c = local_read(l); \
185 for (;;) { \
186 if (unlikely(c == (u))) \
187 break; \
188 old = local_cmpxchg((l), c, c + (a)); \
189 if (likely(old == c)) \
190 break; \
191 c = old; \
192 } \
193 c != (u); \
194})
195#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
196
46/* On x86, these are no better than the atomic variants. */ 197/* On x86, these are no better than the atomic variants. */
47#define __local_inc(l) local_inc(l) 198#define __local_inc(l) local_inc(l)
48#define __local_dec(l) local_dec(l) 199#define __local_dec(l) local_dec(l)
@@ -56,27 +207,27 @@ static __inline__ void local_sub(long i, local_t *v)
56 207
57/* Need to disable preemption for the cpu local counters otherwise we could 208/* Need to disable preemption for the cpu local counters otherwise we could
58 still access a variable of a previous CPU in a non atomic way. */ 209 still access a variable of a previous CPU in a non atomic way. */
59#define cpu_local_wrap_v(v) \ 210#define cpu_local_wrap_v(l) \
60 ({ local_t res__; \ 211 ({ local_t res__; \
61 preempt_disable(); \ 212 preempt_disable(); \
62 res__ = (v); \ 213 res__ = (l); \
63 preempt_enable(); \ 214 preempt_enable(); \
64 res__; }) 215 res__; })
65#define cpu_local_wrap(v) \ 216#define cpu_local_wrap(l) \
66 ({ preempt_disable(); \ 217 ({ preempt_disable(); \
67 v; \ 218 l; \
68 preempt_enable(); }) \ 219 preempt_enable(); }) \
69 220
70#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) 221#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
71#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) 222#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
72#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) 223#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
73#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) 224#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
74#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) 225#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
75#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) 226#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
76 227
77#define __cpu_local_inc(v) cpu_local_inc(v) 228#define __cpu_local_inc(l) cpu_local_inc(l)
78#define __cpu_local_dec(v) cpu_local_dec(v) 229#define __cpu_local_dec(l) cpu_local_dec(l)
79#define __cpu_local_add(i, v) cpu_local_add((i), (v)) 230#define __cpu_local_add(i, l) cpu_local_add((i), (l))
80#define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) 231#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
81 232
82#endif /* _ARCH_I386_LOCAL_H */ 233#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e16359f81a40..edce9d51a676 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -243,8 +243,6 @@ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; re
243static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } 243static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
244static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } 244static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
245 245
246extern void vmalloc_sync_all(void);
247
248#ifdef CONFIG_X86_PAE 246#ifdef CONFIG_X86_PAE
249# include <asm/pgtable-3level.h> 247# include <asm/pgtable-3level.h>
250#else 248#else
@@ -544,10 +542,6 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
544#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 542#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
545 remap_pfn_range(vma, vaddr, pfn, size, prot) 543 remap_pfn_range(vma, vaddr, pfn, size, prot)
546 544
547#define MK_IOSPACE_PFN(space, pfn) (pfn)
548#define GET_IOSPACE(pfn) 0
549#define GET_PFN(pfn) (pfn)
550
551#include <asm-generic/pgtable.h> 545#include <asm-generic/pgtable.h>
552 546
553#endif /* _I386_PGTABLE_H */ 547#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
index bd67480ca109..57a4306cdf63 100644
--- a/include/asm-i386/serial.h
+++ b/include/asm-i386/serial.h
@@ -11,19 +11,3 @@
11 * megabits/second; but this requires the faster clock. 11 * megabits/second; but this requires the faster clock.
12 */ 12 */
13#define BASE_BAUD ( 1843200 / 16 ) 13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index c3a58c08c495..94ed3686a5f3 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/cpufeature.h> 6#include <asm/cpufeature.h>
7#include <linux/bitops.h> /* for LOCK_PREFIX */ 7#include <asm/cmpxchg.h>
8 8
9#ifdef __KERNEL__ 9#ifdef __KERNEL__
10 10
@@ -195,238 +195,6 @@ static inline unsigned long get_limit(unsigned long segment)
195 195
196#define nop() __asm__ __volatile__ ("nop") 196#define nop() __asm__ __volatile__ ("nop")
197 197
198#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
199
200#define tas(ptr) (xchg((ptr),1))
201
202struct __xchg_dummy { unsigned long a[100]; };
203#define __xg(x) ((struct __xchg_dummy *)(x))
204
205
206#ifdef CONFIG_X86_CMPXCHG64
207
208/*
209 * The semantics of XCHGCMP8B are a bit strange, this is why
210 * there is a loop and the loading of %%eax and %%edx has to
211 * be inside. This inlines well in most cases, the cached
212 * cost is around ~38 cycles. (in the future we might want
213 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
214 * might have an implicit FPU-save as a cost, so it's not
215 * clear which path to go.)
216 *
217 * cmpxchg8b must be used with the lock prefix here to allow
218 * the instruction to be executed atomically, see page 3-102
219 * of the instruction set reference 24319102.pdf. We need
220 * the reader side to see the coherent 64bit value.
221 */
222static inline void __set_64bit (unsigned long long * ptr,
223 unsigned int low, unsigned int high)
224{
225 __asm__ __volatile__ (
226 "\n1:\t"
227 "movl (%0), %%eax\n\t"
228 "movl 4(%0), %%edx\n\t"
229 "lock cmpxchg8b (%0)\n\t"
230 "jnz 1b"
231 : /* no outputs */
232 : "D"(ptr),
233 "b"(low),
234 "c"(high)
235 : "ax","dx","memory");
236}
237
238static inline void __set_64bit_constant (unsigned long long *ptr,
239 unsigned long long value)
240{
241 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
242}
243#define ll_low(x) *(((unsigned int*)&(x))+0)
244#define ll_high(x) *(((unsigned int*)&(x))+1)
245
246static inline void __set_64bit_var (unsigned long long *ptr,
247 unsigned long long value)
248{
249 __set_64bit(ptr,ll_low(value), ll_high(value));
250}
251
252#define set_64bit(ptr,value) \
253(__builtin_constant_p(value) ? \
254 __set_64bit_constant(ptr, value) : \
255 __set_64bit_var(ptr, value) )
256
257#define _set_64bit(ptr,value) \
258(__builtin_constant_p(value) ? \
259 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
260 __set_64bit(ptr, ll_low(value), ll_high(value)) )
261
262#endif
263
264/*
265 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
266 * Note 2: xchg has side effect, so that attribute volatile is necessary,
267 * but generally the primitive is invalid, *ptr is output argument. --ANK
268 */
269static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
270{
271 switch (size) {
272 case 1:
273 __asm__ __volatile__("xchgb %b0,%1"
274 :"=q" (x)
275 :"m" (*__xg(ptr)), "0" (x)
276 :"memory");
277 break;
278 case 2:
279 __asm__ __volatile__("xchgw %w0,%1"
280 :"=r" (x)
281 :"m" (*__xg(ptr)), "0" (x)
282 :"memory");
283 break;
284 case 4:
285 __asm__ __volatile__("xchgl %0,%1"
286 :"=r" (x)
287 :"m" (*__xg(ptr)), "0" (x)
288 :"memory");
289 break;
290 }
291 return x;
292}
293
294/*
295 * Atomic compare and exchange. Compare OLD with MEM, if identical,
296 * store NEW in MEM. Return the initial value in MEM. Success is
297 * indicated by comparing RETURN with OLD.
298 */
299
300#ifdef CONFIG_X86_CMPXCHG
301#define __HAVE_ARCH_CMPXCHG 1
302#define cmpxchg(ptr,o,n)\
303 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
304 (unsigned long)(n),sizeof(*(ptr))))
305#define sync_cmpxchg(ptr,o,n)\
306 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
307 (unsigned long)(n),sizeof(*(ptr))))
308#endif
309
310static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
311 unsigned long new, int size)
312{
313 unsigned long prev;
314 switch (size) {
315 case 1:
316 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
317 : "=a"(prev)
318 : "q"(new), "m"(*__xg(ptr)), "0"(old)
319 : "memory");
320 return prev;
321 case 2:
322 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
323 : "=a"(prev)
324 : "r"(new), "m"(*__xg(ptr)), "0"(old)
325 : "memory");
326 return prev;
327 case 4:
328 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
329 : "=a"(prev)
330 : "r"(new), "m"(*__xg(ptr)), "0"(old)
331 : "memory");
332 return prev;
333 }
334 return old;
335}
336
337/*
338 * Always use locked operations when touching memory shared with a
339 * hypervisor, since the system may be SMP even if the guest kernel
340 * isn't.
341 */
342static inline unsigned long __sync_cmpxchg(volatile void *ptr,
343 unsigned long old,
344 unsigned long new, int size)
345{
346 unsigned long prev;
347 switch (size) {
348 case 1:
349 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
350 : "=a"(prev)
351 : "q"(new), "m"(*__xg(ptr)), "0"(old)
352 : "memory");
353 return prev;
354 case 2:
355 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
356 : "=a"(prev)
357 : "r"(new), "m"(*__xg(ptr)), "0"(old)
358 : "memory");
359 return prev;
360 case 4:
361 __asm__ __volatile__("lock; cmpxchgl %1,%2"
362 : "=a"(prev)
363 : "r"(new), "m"(*__xg(ptr)), "0"(old)
364 : "memory");
365 return prev;
366 }
367 return old;
368}
369
370#ifndef CONFIG_X86_CMPXCHG
371/*
372 * Building a kernel capable running on 80386. It may be necessary to
373 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
374 * a function for each of the sizes we support.
375 */
376
377extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
378extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
379extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
380
381static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
382 unsigned long new, int size)
383{
384 switch (size) {
385 case 1:
386 return cmpxchg_386_u8(ptr, old, new);
387 case 2:
388 return cmpxchg_386_u16(ptr, old, new);
389 case 4:
390 return cmpxchg_386_u32(ptr, old, new);
391 }
392 return old;
393}
394
395#define cmpxchg(ptr,o,n) \
396({ \
397 __typeof__(*(ptr)) __ret; \
398 if (likely(boot_cpu_data.x86 > 3)) \
399 __ret = __cmpxchg((ptr), (unsigned long)(o), \
400 (unsigned long)(n), sizeof(*(ptr))); \
401 else \
402 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
403 (unsigned long)(n), sizeof(*(ptr))); \
404 __ret; \
405})
406#endif
407
408#ifdef CONFIG_X86_CMPXCHG64
409
410static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
411 unsigned long long new)
412{
413 unsigned long long prev;
414 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
415 : "=A"(prev)
416 : "b"((unsigned long)new),
417 "c"((unsigned long)(new >> 32)),
418 "m"(*__xg(ptr)),
419 "0"(old)
420 : "memory");
421 return prev;
422}
423
424#define cmpxchg64(ptr,o,n)\
425 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
426 (unsigned long long)(n)))
427
428#endif
429
430/* 198/*
431 * Force strict CPU ordering. 199 * Force strict CPU ordering.
432 * And yes, this is required on UP too when we're talking 200 * And yes, this is required on UP too when we're talking
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h
index 2e6237693814..a21700352e7b 100644
--- a/include/asm-i386/termbits.h
+++ b/include/asm-i386/termbits.h
@@ -17,6 +17,17 @@ struct termios {
17 cc_t c_cc[NCCS]; /* control characters */ 17 cc_t c_cc[NCCS]; /* control characters */
18}; 18};
19 19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
20struct ktermios { 31struct ktermios {
21 tcflag_t c_iflag; /* input mode flags */ 32 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */ 33 tcflag_t c_oflag; /* output mode flags */
@@ -129,6 +140,7 @@ struct ktermios {
129#define HUPCL 0002000 140#define HUPCL 0002000
130#define CLOCAL 0004000 141#define CLOCAL 0004000
131#define CBAUDEX 0010000 142#define CBAUDEX 0010000
143#define BOTHER 0010000
132#define B57600 0010001 144#define B57600 0010001
133#define B115200 0010002 145#define B115200 0010002
134#define B230400 0010003 146#define B230400 0010003
@@ -148,6 +160,8 @@ struct ktermios {
148#define CMSPAR 010000000000 /* mark or space (stick) parity */ 160#define CMSPAR 010000000000 /* mark or space (stick) parity */
149#define CRTSCTS 020000000000 /* flow control */ 161#define CRTSCTS 020000000000 /* flow control */
150 162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
151/* c_lflag bits */ 165/* c_lflag bits */
152#define ISIG 0000001 166#define ISIG 0000001
153#define ICANON 0000002 167#define ICANON 0000002
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
index 7c99678a8f86..f520b7c16fa2 100644
--- a/include/asm-i386/termios.h
+++ b/include/asm-i386/termios.h
@@ -81,8 +81,10 @@ struct termio {
81 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ 81 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
82}) 82})
83 83
84#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 84#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
85#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 85#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
86#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
87#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
86 88
87#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
88 90
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 833fa1704ff9..bd21e795197c 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -325,10 +325,11 @@
325#define __NR_move_pages 317 325#define __NR_move_pages 317
326#define __NR_getcpu 318 326#define __NR_getcpu 318
327#define __NR_epoll_pwait 319 327#define __NR_epoll_pwait 319
328#define __NR_utimensat 320
328 329
329#ifdef __KERNEL__ 330#ifdef __KERNEL__
330 331
331#define NR_syscalls 320 332#define NR_syscalls 321
332 333
333#define __ARCH_WANT_IPC_PARSE_VERSION 334#define __ARCH_WANT_IPC_PARSE_VERSION
334#define __ARCH_WANT_OLD_READDIR 335#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 569ec7574baf..1fc3b83325da 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h>
18 19
19/* 20/*
20 * On IA-64, counter must always be volatile to ensure that that the 21 * On IA-64, counter must always be volatile to ensure that that the
@@ -88,25 +89,47 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
88 return new; 89 return new;
89} 90}
90 91
91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 92#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
92#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 93#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
93 94
94#define atomic_add_unless(v, a, u) \ 95#define atomic64_cmpxchg(v, old, new) \
95({ \ 96 (cmpxchg(&((v)->counter), old, new))
96 int c, old; \ 97#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
97 c = atomic_read(v); \ 98
98 for (;;) { \ 99static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
99 if (unlikely(c == (u))) \ 100{
100 break; \ 101 int c, old;
101 old = atomic_cmpxchg((v), c, c + (a)); \ 102 c = atomic_read(v);
102 if (likely(old == c)) \ 103 for (;;) {
103 break; \ 104 if (unlikely(c == (u)))
104 c = old; \ 105 break;
105 } \ 106 old = atomic_cmpxchg((v), c, c + (a));
106 c != (u); \ 107 if (likely(old == c))
107}) 108 break;
109 c = old;
110 }
111 return c != (u);
112}
113
108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 114#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
109 115
116static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
117{
118 long c, old;
119 c = atomic64_read(v);
120 for (;;) {
121 if (unlikely(c == (u)))
122 break;
123 old = atomic64_cmpxchg((v), c, c + (a));
124 if (likely(old == c))
125 break;
126 c = old;
127 }
128 return c != (u);
129}
130
131#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
132
110#define atomic_add_return(i,v) \ 133#define atomic_add_return(i,v) \
111({ \ 134({ \
112 int __ia64_aar_i = (i); \ 135 int __ia64_aar_i = (i); \
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index aed7142f9e4a..ba211e011a1d 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -28,21 +28,8 @@
28 */ 28 */
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30 30
31struct pt_regs;
32
33struct die_args {
34 struct pt_regs *regs;
35 const char *str;
36 long err;
37 int trapnr;
38 int signr;
39};
40
41extern int register_die_notifier(struct notifier_block *);
42extern int unregister_die_notifier(struct notifier_block *);
43extern int register_page_fault_notifier(struct notifier_block *); 31extern int register_page_fault_notifier(struct notifier_block *);
44extern int unregister_page_fault_notifier(struct notifier_block *); 32extern int unregister_page_fault_notifier(struct notifier_block *);
45extern struct atomic_notifier_head ia64die_chain;
46 33
47enum die_val { 34enum die_val {
48 DIE_BREAK = 1, 35 DIE_BREAK = 1,
@@ -74,18 +61,4 @@ enum die_val {
74 DIE_KDUMP_LEAVE, 61 DIE_KDUMP_LEAVE,
75}; 62};
76 63
77static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs,
78 long err, int trap, int sig)
79{
80 struct die_args args = {
81 .regs = regs,
82 .str = str,
83 .err = err,
84 .trapnr = trap,
85 .signr = sig
86 };
87
88 return atomic_notifier_call_chain(&ia64die_chain, val, &args);
89}
90
91#endif 64#endif
diff --git a/include/asm-ia64/kexec.h b/include/asm-ia64/kexec.h
index 41299ddfee30..541be835fc5a 100644
--- a/include/asm-ia64/kexec.h
+++ b/include/asm-ia64/kexec.h
@@ -14,8 +14,6 @@
14/* The native architecture */ 14/* The native architecture */
15#define KEXEC_ARCH KEXEC_ARCH_IA_64 15#define KEXEC_ARCH KEXEC_ARCH_IA_64
16 16
17#define MAX_NOTE_BYTES 1024
18
19#define kexec_flush_icache_page(page) do { \ 17#define kexec_flush_icache_page(page) do { \
20 unsigned long page_addr = (unsigned long)page_address(page); \ 18 unsigned long page_addr = (unsigned long)page_address(page); \
21 flush_icache_range(page_addr, page_addr + PAGE_SIZE); \ 19 flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
diff --git a/include/asm-ia64/local.h b/include/asm-ia64/local.h
index dc519092ef4d..c11c530f74d0 100644
--- a/include/asm-ia64/local.h
+++ b/include/asm-ia64/local.h
@@ -1,50 +1 @@
1#ifndef _ASM_IA64_LOCAL_H #include <asm-generic/local.h>
2#define _ASM_IA64_LOCAL_H
3
4/*
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/percpu.h>
10
11typedef struct {
12 atomic64_t val;
13} local_t;
14
15#define LOCAL_INIT(i) ((local_t) { { (i) } })
16#define local_read(l) atomic64_read(&(l)->val)
17#define local_set(l, i) atomic64_set(&(l)->val, i)
18#define local_inc(l) atomic64_inc(&(l)->val)
19#define local_dec(l) atomic64_dec(&(l)->val)
20#define local_add(i, l) atomic64_add((i), &(l)->val)
21#define local_sub(i, l) atomic64_sub((i), &(l)->val)
22
23/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
24
25#define __local_inc(l) (++(l)->val.counter)
26#define __local_dec(l) (--(l)->val.counter)
27#define __local_add(i,l) ((l)->val.counter += (i))
28#define __local_sub(i,l) ((l)->val.counter -= (i))
29
30/*
31 * Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
32 * not an address.
33 */
34#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
35#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
36#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
37#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
38#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
39#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
40
41/*
42 * Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
43 * etc.
44 */
45#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
46#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
47#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
48#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
49
50#endif /* _ASM_IA64_LOCAL_H */
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 553182747722..670b706411b8 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -485,10 +485,6 @@ extern void paging_init (void);
485#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 485#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
486 remap_pfn_range(vma, vaddr, pfn, size, prot) 486 remap_pfn_range(vma, vaddr, pfn, size, prot)
487 487
488#define MK_IOSPACE_PFN(space, pfn) (pfn)
489#define GET_IOSPACE(pfn) 0
490#define GET_PFN(pfn) (pfn)
491
492/* 488/*
493 * ZERO_PAGE is a global shared page that is always zero: used 489 * ZERO_PAGE is a global shared page that is always zero: used
494 * for zero-mapped memory areas etc.. 490 * for zero-mapped memory areas etc..
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index f5a7d7301c72..3a38ffe4a4f4 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
253 * Atomically adds @a to @v, so long as it was not @u. 253 * Atomically adds @a to @v, so long as it was not @u.
254 * Returns non-zero if @v was not @u, and zero otherwise. 254 * Returns non-zero if @v was not @u, and zero otherwise.
255 */ 255 */
256#define atomic_add_unless(v, a, u) \ 256static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
257({ \ 257{
258 int c, old; \ 258 int c, old;
259 c = atomic_read(v); \ 259 c = atomic_read(v);
260 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 260 for (;;) {
261 c = old; \ 261 if (unlikely(c == (u)))
262 c != (u); \ 262 break;
263}) 263 old = atomic_cmpxchg((v), c, c + (a));
264 if (likely(old == c))
265 break;
266 c = old;
267 }
268 return c != (u);
269}
270
264#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 271#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
265 272
266static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 273static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
diff --git a/include/asm-m32r/kdebug.h b/include/asm-m32r/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-m32r/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 1c15ba7ce319..8b2a2f17e695 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -381,10 +381,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
381#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 381#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
382 remap_pfn_range(vma, vaddr, pfn, size, prot) 382 remap_pfn_range(vma, vaddr, pfn, size, prot)
383 383
384#define MK_IOSPACE_PFN(space, pfn) (pfn)
385#define GET_IOSPACE(pfn) 0
386#define GET_PFN(pfn) (pfn)
387
388#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 384#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
389#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 385#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
390#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 386#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 99ee09889ff7..06cdece35865 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -122,8 +122,6 @@ static inline void local_irq_disable(void)
122#define xchg(ptr,x) \ 122#define xchg(ptr,x) \
123 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 123 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
124 124
125#define tas(ptr) (xchg((ptr),1))
126
127#ifdef CONFIG_SMP 125#ifdef CONFIG_SMP
128extern void __xchg_called_with_bad_pointer(void); 126extern void __xchg_called_with_bad_pointer(void);
129#endif 127#endif
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index d5eed64cb833..4915294fea63 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -2,7 +2,7 @@
2#define __ARCH_M68K_ATOMIC__ 2#define __ARCH_M68K_ATOMIC__
3 3
4 4
5#include <asm/system.h> /* local_irq_XXX() */ 5#include <asm/system.h>
6 6
7/* 7/*
8 * Atomic operations that C can't guarantee us. Useful for 8 * Atomic operations that C can't guarantee us. Useful for
@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 170 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
171} 171}
172 172
173#define atomic_add_unless(v, a, u) \ 173static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
174({ \ 174{
175 int c, old; \ 175 int c, old;
176 c = atomic_read(v); \ 176 c = atomic_read(v);
177 for (;;) { \ 177 for (;;) {
178 if (unlikely(c == (u))) \ 178 if (unlikely(c == (u)))
179 break; \ 179 break;
180 old = atomic_cmpxchg((v), c, c + (a)); \ 180 old = atomic_cmpxchg((v), c, c + (a));
181 if (likely(old == c)) \ 181 if (likely(old == c))
182 break; \ 182 break;
183 c = old; \ 183 c = old;
184 } \ 184 }
185 c != (u); \ 185 return c != (u);
186}) 186}
187
187#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
188 189
189/* Atomic operations are already serializing */ 190/* Atomic operations are already serializing */
diff --git a/include/asm-m68k/kdebug.h b/include/asm-m68k/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-m68k/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index f3aa05377987..555b87a1f7e3 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -143,10 +143,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
143#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 143#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
144 remap_pfn_range(vma, vaddr, pfn, size, prot) 144 remap_pfn_range(vma, vaddr, pfn, size, prot)
145 145
146#define MK_IOSPACE_PFN(space, pfn) (pfn)
147#define GET_IOSPACE(pfn) 0
148#define GET_PFN(pfn) (pfn)
149
150/* MMU-specific headers */ 146/* MMU-specific headers */
151 147
152#ifdef CONFIG_SUN3 148#ifdef CONFIG_SUN3
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 243dd13e6bfc..198878b53a61 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -88,7 +88,6 @@ static inline int irqs_disabled(void)
88 88
89 89
90#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 90#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
91#define tas(ptr) (xchg((ptr),1))
92 91
93struct __xchg_dummy { unsigned long a[100]; }; 92struct __xchg_dummy { unsigned long a[100]; };
94#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 93#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 6c4e4b63e454..d5632a305dae 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -1,7 +1,7 @@
1#ifndef __ARCH_M68KNOMMU_ATOMIC__ 1#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__ 2#define __ARCH_M68KNOMMU_ATOMIC__
3 3
4#include <asm/system.h> /* local_irq_XXX() */ 4#include <asm/system.h>
5 5
6/* 6/*
7 * Atomic operations that C can't guarantee us. Useful for 7 * Atomic operations that C can't guarantee us. Useful for
@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 133
134#define atomic_add_unless(v, a, u) \ 134static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
135({ \ 135{
136 int c, old; \ 136 int c, old;
137 c = atomic_read(v); \ 137 c = atomic_read(v);
138 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 138 for (;;) {
139 c = old; \ 139 if (unlikely(c == (u)))
140 c != (u); \ 140 break;
141}) 141 old = atomic_cmpxchg((v), c, c + (a));
142 if (likely(old == c))
143 break;
144 c = old;
145 }
146 return c != (u);
147}
148
142#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 149#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
143 150
144#define atomic_dec_return(v) atomic_sub_return(1,(v)) 151#define atomic_dec_return(v) atomic_sub_return(1,(v))
diff --git a/include/asm-m68knommu/kdebug.h b/include/asm-m68knommu/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-m68knommu/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h
index 549ad231efad..9dfbbc24aa71 100644
--- a/include/asm-m68knommu/pgtable.h
+++ b/include/asm-m68knommu/pgtable.h
@@ -59,10 +59,6 @@ extern int is_in_rom(unsigned long);
59#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 59#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
60 remap_pfn_range(vma, vaddr, pfn, size, prot) 60 remap_pfn_range(vma, vaddr, pfn, size, prot)
61 61
62#define MK_IOSPACE_PFN(space, pfn) (pfn)
63#define GET_IOSPACE(pfn) 0
64#define GET_PFN(pfn) (pfn)
65
66/* 62/*
67 * All 32bit addresses are effectively valid for vmalloc... 63 * All 32bit addresses are effectively valid for vmalloc...
68 * Sort of meaningless for non-VM targets. 64 * Sort of meaningless for non-VM targets.
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 2a814498672d..5e5ed18bb78f 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -120,7 +120,6 @@ asmlinkage void resume(void);
120#endif 120#endif
121 121
122#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 122#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
123#define tas(ptr) (xchg((ptr),1))
124 123
125struct __xchg_dummy { unsigned long a[100]; }; 124struct __xchg_dummy { unsigned long a[100]; };
126#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 125#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 1ac50b6c47ad..62daa746a9c9 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -18,6 +18,7 @@
18#include <asm/barrier.h> 18#include <asm/barrier.h>
19#include <asm/cpu-features.h> 19#include <asm/cpu-features.h>
20#include <asm/war.h> 20#include <asm/war.h>
21#include <asm/system.h>
21 22
22typedef struct { volatile int counter; } atomic_t; 23typedef struct { volatile int counter; } atomic_t;
23 24
@@ -306,8 +307,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
306 return result; 307 return result;
307} 308}
308 309
309#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 310#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
310#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 311#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
311 312
312/** 313/**
313 * atomic_add_unless - add unless the number is a given value 314 * atomic_add_unless - add unless the number is a given value
@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
318 * Atomically adds @a to @v, so long as it was not @u. 319 * Atomically adds @a to @v, so long as it was not @u.
319 * Returns non-zero if @v was not @u, and zero otherwise. 320 * Returns non-zero if @v was not @u, and zero otherwise.
320 */ 321 */
321#define atomic_add_unless(v, a, u) \ 322static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
322({ \ 323{
323 int c, old; \ 324 int c, old;
324 c = atomic_read(v); \ 325 c = atomic_read(v);
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 326 for (;;) {
326 c = old; \ 327 if (unlikely(c == (u)))
327 c != (u); \ 328 break;
328}) 329 old = atomic_cmpxchg((v), c, c + (a));
330 if (likely(old == c))
331 break;
332 c = old;
333 }
334 return c != (u);
335}
329#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 336#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
330 337
331#define atomic_dec_return(v) atomic_sub_return(1,(v)) 338#define atomic_dec_return(v) atomic_sub_return(1,(v))
@@ -681,6 +688,36 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
681 return result; 688 return result;
682} 689}
683 690
691#define atomic64_cmpxchg(v, o, n) \
692 (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n)))
693#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
694
695/**
696 * atomic64_add_unless - add unless the number is a given value
697 * @v: pointer of type atomic64_t
698 * @a: the amount to add to v...
699 * @u: ...unless v is equal to u.
700 *
701 * Atomically adds @a to @v, so long as it was not @u.
702 * Returns non-zero if @v was not @u, and zero otherwise.
703 */
704static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
705{
706 long c, old;
707 c = atomic64_read(v);
708 for (;;) {
709 if (unlikely(c == (u)))
710 break;
711 old = atomic64_cmpxchg((v), c, c + (a));
712 if (likely(old == c))
713 break;
714 c = old;
715 }
716 return c != (u);
717}
718
719#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
720
684#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 721#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
685#define atomic64_inc_return(v) atomic64_add_return(1,(v)) 722#define atomic64_inc_return(v) atomic64_add_return(1,(v))
686 723
diff --git a/include/asm-mips/kdebug.h b/include/asm-mips/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-mips/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
index b25267ebcb09..cdbab43b7d3a 100644
--- a/include/asm-mips/kexec.h
+++ b/include/asm-mips/kexec.h
@@ -21,8 +21,6 @@
21/* The native architecture */ 21/* The native architecture */
22#define KEXEC_ARCH KEXEC_ARCH_MIPS 22#define KEXEC_ARCH KEXEC_ARCH_MIPS
23 23
24#define MAX_NOTE_BYTES 1024
25
26static inline void crash_setup_regs(struct pt_regs *newregs, 24static inline void crash_setup_regs(struct pt_regs *newregs,
27 struct pt_regs *oldregs) 25 struct pt_regs *oldregs)
28{ 26{
diff --git a/include/asm-mips/local.h b/include/asm-mips/local.h
index 9e2d43bae388..ed882c88e0ca 100644
--- a/include/asm-mips/local.h
+++ b/include/asm-mips/local.h
@@ -1,60 +1,288 @@
1#ifndef _ASM_LOCAL_H 1#ifndef _ARCH_MIPS_LOCAL_H
2#define _ASM_LOCAL_H 2#define _ARCH_MIPS_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <linux/bitops.h>
5#include <asm/atomic.h> 6#include <asm/atomic.h>
7#include <asm/war.h>
6 8
7#ifdef CONFIG_32BIT 9typedef struct
10{
11 atomic_long_t a;
12} local_t;
8 13
9typedef atomic_t local_t; 14#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
10 15
11#define LOCAL_INIT(i) ATOMIC_INIT(i) 16#define local_read(l) atomic_long_read(&(l)->a)
12#define local_read(v) atomic_read(v) 17#define local_set(l,i) atomic_long_set(&(l)->a, (i))
13#define local_set(v,i) atomic_set(v,i)
14 18
15#define local_inc(v) atomic_inc(v) 19#define local_add(i,l) atomic_long_add((i),(&(l)->a))
16#define local_dec(v) atomic_dec(v) 20#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
17#define local_add(i, v) atomic_add(i, v) 21#define local_inc(l) atomic_long_inc(&(l)->a)
18#define local_sub(i, v) atomic_sub(i, v) 22#define local_dec(l) atomic_long_dec(&(l)->a)
19 23
20#endif 24/*
25 * Same as above, but return the result value
26 */
27static __inline__ long local_add_return(long i, local_t * l)
28{
29 unsigned long result;
30
31 if (cpu_has_llsc && R10000_LLSC_WAR) {
32 unsigned long temp;
33
34 __asm__ __volatile__(
35 " .set mips3 \n"
36 "1:" __LL "%1, %2 # local_add_return \n"
37 " addu %0, %1, %3 \n"
38 __SC "%0, %2 \n"
39 " beqzl %0, 1b \n"
40 " addu %0, %1, %3 \n"
41 " .set mips0 \n"
42 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
43 : "Ir" (i), "m" (l->a.counter)
44 : "memory");
45 } else if (cpu_has_llsc) {
46 unsigned long temp;
47
48 __asm__ __volatile__(
49 " .set mips3 \n"
50 "1:" __LL "%1, %2 # local_add_return \n"
51 " addu %0, %1, %3 \n"
52 __SC "%0, %2 \n"
53 " beqz %0, 1b \n"
54 " addu %0, %1, %3 \n"
55 " .set mips0 \n"
56 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
57 : "Ir" (i), "m" (l->a.counter)
58 : "memory");
59 } else {
60 unsigned long flags;
21 61
22#ifdef CONFIG_64BIT 62 local_irq_save(flags);
63 result = l->a.counter;
64 result += i;
65 l->a.counter = result;
66 local_irq_restore(flags);
67 }
23 68
24typedef atomic64_t local_t; 69 return result;
70}
25 71
26#define LOCAL_INIT(i) ATOMIC64_INIT(i) 72static __inline__ long local_sub_return(long i, local_t * l)
27#define local_read(v) atomic64_read(v) 73{
28#define local_set(v,i) atomic64_set(v,i) 74 unsigned long result;
29 75
30#define local_inc(v) atomic64_inc(v) 76 if (cpu_has_llsc && R10000_LLSC_WAR) {
31#define local_dec(v) atomic64_dec(v) 77 unsigned long temp;
32#define local_add(i, v) atomic64_add(i, v)
33#define local_sub(i, v) atomic64_sub(i, v)
34 78
35#endif 79 __asm__ __volatile__(
80 " .set mips3 \n"
81 "1:" __LL "%1, %2 # local_sub_return \n"
82 " subu %0, %1, %3 \n"
83 __SC "%0, %2 \n"
84 " beqzl %0, 1b \n"
85 " subu %0, %1, %3 \n"
86 " .set mips0 \n"
87 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
88 : "Ir" (i), "m" (l->a.counter)
89 : "memory");
90 } else if (cpu_has_llsc) {
91 unsigned long temp;
36 92
37#define __local_inc(v) ((v)->counter++) 93 __asm__ __volatile__(
38#define __local_dec(v) ((v)->counter--) 94 " .set mips3 \n"
39#define __local_add(i,v) ((v)->counter+=(i)) 95 "1:" __LL "%1, %2 # local_sub_return \n"
40#define __local_sub(i,v) ((v)->counter-=(i)) 96 " subu %0, %1, %3 \n"
97 __SC "%0, %2 \n"
98 " beqz %0, 1b \n"
99 " subu %0, %1, %3 \n"
100 " .set mips0 \n"
101 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
102 : "Ir" (i), "m" (l->a.counter)
103 : "memory");
104 } else {
105 unsigned long flags;
106
107 local_irq_save(flags);
108 result = l->a.counter;
109 result -= i;
110 l->a.counter = result;
111 local_irq_restore(flags);
112 }
113
114 return result;
115}
41 116
42/* 117/*
43 * Use these for per-cpu local_t variables: on some archs they are 118 * local_sub_if_positive - conditionally subtract integer from atomic variable
119 * @i: integer value to subtract
120 * @l: pointer of type local_t
121 *
122 * Atomically test @l and subtract @i if @l is greater or equal than @i.
123 * The function returns the old value of @l minus @i.
124 */
125static __inline__ long local_sub_if_positive(long i, local_t * l)
126{
127 unsigned long result;
128
129 if (cpu_has_llsc && R10000_LLSC_WAR) {
130 unsigned long temp;
131
132 __asm__ __volatile__(
133 " .set mips3 \n"
134 "1:" __LL "%1, %2 # local_sub_if_positive\n"
135 " dsubu %0, %1, %3 \n"
136 " bltz %0, 1f \n"
137 __SC "%0, %2 \n"
138 " .set noreorder \n"
139 " beqzl %0, 1b \n"
140 " dsubu %0, %1, %3 \n"
141 " .set reorder \n"
142 "1: \n"
143 " .set mips0 \n"
144 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
145 : "Ir" (i), "m" (l->a.counter)
146 : "memory");
147 } else if (cpu_has_llsc) {
148 unsigned long temp;
149
150 __asm__ __volatile__(
151 " .set mips3 \n"
152 "1:" __LL "%1, %2 # local_sub_if_positive\n"
153 " dsubu %0, %1, %3 \n"
154 " bltz %0, 1f \n"
155 __SC "%0, %2 \n"
156 " .set noreorder \n"
157 " beqz %0, 1b \n"
158 " dsubu %0, %1, %3 \n"
159 " .set reorder \n"
160 "1: \n"
161 " .set mips0 \n"
162 : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
163 : "Ir" (i), "m" (l->a.counter)
164 : "memory");
165 } else {
166 unsigned long flags;
167
168 local_irq_save(flags);
169 result = l->a.counter;
170 result -= i;
171 if (result >= 0)
172 l->a.counter = result;
173 local_irq_restore(flags);
174 }
175
176 return result;
177}
178
179#define local_cmpxchg(l, o, n) \
180 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
181#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n)))
182
183/**
184 * local_add_unless - add unless the number is a given value
185 * @l: pointer of type local_t
186 * @a: the amount to add to l...
187 * @u: ...unless l is equal to u.
188 *
189 * Atomically adds @a to @l, so long as it was not @u.
190 * Returns non-zero if @l was not @u, and zero otherwise.
191 */
192#define local_add_unless(l, a, u) \
193({ \
194 long c, old; \
195 c = local_read(l); \
196 while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
197 c = old; \
198 c != (u); \
199})
200#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
201
202#define local_dec_return(l) local_sub_return(1,(l))
203#define local_inc_return(l) local_add_return(1,(l))
204
205/*
206 * local_sub_and_test - subtract value from variable and test result
207 * @i: integer value to subtract
208 * @l: pointer of type local_t
209 *
210 * Atomically subtracts @i from @l and returns
211 * true if the result is zero, or false for all
212 * other cases.
213 */
214#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0)
215
216/*
217 * local_inc_and_test - increment and test
218 * @l: pointer of type local_t
219 *
220 * Atomically increments @l by 1
221 * and returns true if the result is zero, or false for all
222 * other cases.
223 */
224#define local_inc_and_test(l) (local_inc_return(l) == 0)
225
226/*
227 * local_dec_and_test - decrement by 1 and test
228 * @l: pointer of type local_t
229 *
230 * Atomically decrements @l by 1 and
231 * returns true if the result is 0, or false for all other
232 * cases.
233 */
234#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
235
236/*
237 * local_dec_if_positive - decrement by 1 if old value positive
238 * @l: pointer of type local_t
239 */
240#define local_dec_if_positive(l) local_sub_if_positive(1, l)
241
242/*
243 * local_add_negative - add and test if negative
244 * @l: pointer of type local_t
245 * @i: integer value to add
246 *
247 * Atomically adds @i to @l and returns true
248 * if the result is negative, or false when
249 * result is greater than or equal to zero.
250 */
251#define local_add_negative(i,l) (local_add_return(i, (l)) < 0)
252
253/* Use these for per-cpu local_t variables: on some archs they are
44 * much more efficient than these naive implementations. Note they take 254 * much more efficient than these naive implementations. Note they take
45 * a variable, not an address. 255 * a variable, not an address.
46 */ 256 */
47#define cpu_local_read(v) local_read(&__get_cpu_var(v))
48#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
49 257
50#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 258#define __local_inc(l) ((l)->a.counter++)
51#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 259#define __local_dec(l) ((l)->a.counter++)
52#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 260#define __local_add(i,l) ((l)->a.counter+=(i))
53#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 261#define __local_sub(i,l) ((l)->a.counter-=(i))
262
263/* Need to disable preemption for the cpu local counters otherwise we could
264 still access a variable of a previous CPU in a non atomic way. */
265#define cpu_local_wrap_v(l) \
266 ({ local_t res__; \
267 preempt_disable(); \
268 res__ = (l); \
269 preempt_enable(); \
270 res__; })
271#define cpu_local_wrap(l) \
272 ({ preempt_disable(); \
273 l; \
274 preempt_enable(); }) \
275
276#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
277#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
278#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
279#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
280#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
281#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
54 282
55#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) 283#define __cpu_local_inc(l) cpu_local_inc(l)
56#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) 284#define __cpu_local_dec(l) cpu_local_dec(l)
57#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) 285#define __cpu_local_add(i, l) cpu_local_add((i), (l))
58#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) 286#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
59 287
60#endif /* _ASM_LOCAL_H */ 288#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/include/asm-mips/mach-au1x00/au1550_spi.h b/include/asm-mips/mach-au1x00/au1550_spi.h
new file mode 100644
index 000000000000..c2f0466523ec
--- /dev/null
+++ b/include/asm-mips/mach-au1x00/au1550_spi.h
@@ -0,0 +1,16 @@
1/*
2 * au1550_spi.h - au1550 psc spi controller driver - platform data struct
3 */
4
5#ifndef _AU1550_SPI_H_
6#define _AU1550_SPI_H_
7
8struct au1550_spi_info {
9 s16 bus_num; /* defines which PSC and IRQ to use */
10 u32 mainclk_hz; /* main input clock frequency of PSC */
11 u16 num_chipselect; /* number of chipselects supported */
12 void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
13 void (*deactivate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
14};
15
16#endif
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 0d3295f57a95..27d77d981937 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -387,10 +387,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
387 remap_pfn_range(vma, vaddr, pfn, size, prot) 387 remap_pfn_range(vma, vaddr, pfn, size, prot)
388#endif 388#endif
389 389
390#define MK_IOSPACE_PFN(space, pfn) (pfn)
391#define GET_IOSPACE(pfn) 0
392#define GET_PFN(pfn) (pfn)
393
394#include <asm-generic/pgtable.h> 390#include <asm-generic/pgtable.h>
395 391
396/* 392/*
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 290887077e44..30f23a2b46ca 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -201,7 +201,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
201} 201}
202 202
203#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 203#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
204#define tas(ptr) (xchg((ptr),1))
205 204
206#define __HAVE_ARCH_CMPXCHG 1 205#define __HAVE_ARCH_CMPXCHG 1
207 206
@@ -262,6 +261,58 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
262 return retval; 261 return retval;
263} 262}
264 263
264static inline unsigned long __cmpxchg_u32_local(volatile int * m,
265 unsigned long old, unsigned long new)
266{
267 __u32 retval;
268
269 if (cpu_has_llsc && R10000_LLSC_WAR) {
270 __asm__ __volatile__(
271 " .set push \n"
272 " .set noat \n"
273 " .set mips3 \n"
274 "1: ll %0, %2 # __cmpxchg_u32 \n"
275 " bne %0, %z3, 2f \n"
276 " .set mips0 \n"
277 " move $1, %z4 \n"
278 " .set mips3 \n"
279 " sc $1, %1 \n"
280 " beqzl $1, 1b \n"
281 "2: \n"
282 " .set pop \n"
283 : "=&r" (retval), "=R" (*m)
284 : "R" (*m), "Jr" (old), "Jr" (new)
285 : "memory");
286 } else if (cpu_has_llsc) {
287 __asm__ __volatile__(
288 " .set push \n"
289 " .set noat \n"
290 " .set mips3 \n"
291 "1: ll %0, %2 # __cmpxchg_u32 \n"
292 " bne %0, %z3, 2f \n"
293 " .set mips0 \n"
294 " move $1, %z4 \n"
295 " .set mips3 \n"
296 " sc $1, %1 \n"
297 " beqz $1, 1b \n"
298 "2: \n"
299 " .set pop \n"
300 : "=&r" (retval), "=R" (*m)
301 : "R" (*m), "Jr" (old), "Jr" (new)
302 : "memory");
303 } else {
304 unsigned long flags;
305
306 local_irq_save(flags);
307 retval = *m;
308 if (retval == old)
309 *m = new;
310 local_irq_restore(flags); /* implies memory barrier */
311 }
312
313 return retval;
314}
315
265#ifdef CONFIG_64BIT 316#ifdef CONFIG_64BIT
266static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, 317static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
267 unsigned long new) 318 unsigned long new)
@@ -315,10 +366,62 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
315 366
316 return retval; 367 return retval;
317} 368}
369
370static inline unsigned long __cmpxchg_u64_local(volatile int * m,
371 unsigned long old, unsigned long new)
372{
373 __u64 retval;
374
375 if (cpu_has_llsc && R10000_LLSC_WAR) {
376 __asm__ __volatile__(
377 " .set push \n"
378 " .set noat \n"
379 " .set mips3 \n"
380 "1: lld %0, %2 # __cmpxchg_u64 \n"
381 " bne %0, %z3, 2f \n"
382 " move $1, %z4 \n"
383 " scd $1, %1 \n"
384 " beqzl $1, 1b \n"
385 "2: \n"
386 " .set pop \n"
387 : "=&r" (retval), "=R" (*m)
388 : "R" (*m), "Jr" (old), "Jr" (new)
389 : "memory");
390 } else if (cpu_has_llsc) {
391 __asm__ __volatile__(
392 " .set push \n"
393 " .set noat \n"
394 " .set mips3 \n"
395 "1: lld %0, %2 # __cmpxchg_u64 \n"
396 " bne %0, %z3, 2f \n"
397 " move $1, %z4 \n"
398 " scd $1, %1 \n"
399 " beqz $1, 1b \n"
400 "2: \n"
401 " .set pop \n"
402 : "=&r" (retval), "=R" (*m)
403 : "R" (*m), "Jr" (old), "Jr" (new)
404 : "memory");
405 } else {
406 unsigned long flags;
407
408 local_irq_save(flags);
409 retval = *m;
410 if (retval == old)
411 *m = new;
412 local_irq_restore(flags); /* implies memory barrier */
413 }
414
415 return retval;
416}
417
318#else 418#else
319extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( 419extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
320 volatile int * m, unsigned long old, unsigned long new); 420 volatile int * m, unsigned long old, unsigned long new);
321#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels 421#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
422extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
423 volatile int * m, unsigned long old, unsigned long new);
424#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
322#endif 425#endif
323 426
324/* This function doesn't exist, so you'll get a linker error 427/* This function doesn't exist, so you'll get a linker error
@@ -338,7 +441,26 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
338 return old; 441 return old;
339} 442}
340 443
341#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) 444static inline unsigned long __cmpxchg_local(volatile void * ptr,
445 unsigned long old, unsigned long new, int size)
446{
447 switch (size) {
448 case 4:
449 return __cmpxchg_u32_local(ptr, old, new);
450 case 8:
451 return __cmpxchg_u64_local(ptr, old, new);
452 }
453 __cmpxchg_called_with_bad_pointer();
454 return old;
455}
456
457#define cmpxchg(ptr,old,new) \
458 ((__typeof__(*(ptr)))__cmpxchg((ptr), \
459 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
460
461#define cmpxchg_local(ptr,old,new) \
462 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
463 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
342 464
343extern void set_handler (unsigned long offset, void *addr, unsigned long len); 465extern void set_handler (unsigned long offset, void *addr, unsigned long len);
344extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); 466extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 7d57d34fcca8..e894ee35074b 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -163,7 +163,7 @@ static __inline__ int atomic_read(const atomic_t *v)
163} 163}
164 164
165/* exported interface */ 165/* exported interface */
166#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 166#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
167#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 167#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
168 168
169/** 169/**
@@ -175,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v)
175 * Atomically adds @a to @v, so long as it was not @u. 175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise. 176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */ 177 */
178#define atomic_add_unless(v, a, u) \ 178static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
179({ \ 179{
180 int c, old; \ 180 int c, old;
181 c = atomic_read(v); \ 181 c = atomic_read(v);
182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 182 for (;;) {
183 c = old; \ 183 if (unlikely(c == (u)))
184 c != (u); \ 184 break;
185}) 185 old = atomic_cmpxchg((v), c, c + (a));
186 if (likely(old == c))
187 break;
188 c = old;
189 }
190 return c != (u);
191}
192
186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 193#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
187 194
188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 195#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
@@ -270,6 +277,37 @@ atomic64_read(const atomic64_t *v)
270#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) 277#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
271#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) 278#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
272 279
280/* exported interface */
281#define atomic64_cmpxchg(v, o, n) \
282 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
283#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
284
285/**
286 * atomic64_add_unless - add unless the number is a given value
287 * @v: pointer of type atomic64_t
288 * @a: the amount to add to v...
289 * @u: ...unless v is equal to u.
290 *
291 * Atomically adds @a to @v, so long as it was not @u.
292 * Returns non-zero if @v was not @u, and zero otherwise.
293 */
294static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
295{
296 long c, old;
297 c = atomic64_read(v);
298 for (;;) {
299 if (unlikely(c == (u)))
300 break;
301 old = atomic64_cmpxchg((v), c, c + (a));
302 if (likely(old == c))
303 break;
304 c = old;
305 }
306 return c != (u);
307}
308
309#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
310
273#endif /* CONFIG_64BIT */ 311#endif /* CONFIG_64BIT */
274 312
275#include <asm-generic/atomic.h> 313#include <asm-generic/atomic.h>
diff --git a/include/asm-parisc/kdebug.h b/include/asm-parisc/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-parisc/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-parisc/local.h b/include/asm-parisc/local.h
index d0f550912755..c11c530f74d0 100644
--- a/include/asm-parisc/local.h
+++ b/include/asm-parisc/local.h
@@ -1,40 +1 @@
1#ifndef _ARCH_PARISC_LOCAL_H #include <asm-generic/local.h>
2#define _ARCH_PARISC_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef atomic_long_t local_t;
8
9#define LOCAL_INIT(i) ATOMIC_LONG_INIT(i)
10#define local_read(v) atomic_long_read(v)
11#define local_set(v,i) atomic_long_set(v,i)
12
13#define local_inc(v) atomic_long_inc(v)
14#define local_dec(v) atomic_long_dec(v)
15#define local_add(i, v) atomic_long_add(i, v)
16#define local_sub(i, v) atomic_long_sub(i, v)
17
18#define __local_inc(v) ((v)->counter++)
19#define __local_dec(v) ((v)->counter--)
20#define __local_add(i,v) ((v)->counter+=(i))
21#define __local_sub(i,v) ((v)->counter-=(i))
22
23/* Use these for per-cpu local_t variables: on some archs they are
24 * much more efficient than these naive implementations. Note they take
25 * a variable, not an address.
26 */
27#define cpu_local_read(v) local_read(&__get_cpu_var(v))
28#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
29
30#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
31#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
32#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
33#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
34
35#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
36#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
37#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
38#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
39
40#endif /* _ARCH_PARISC_LOCAL_H */
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index d7e1b10da5c6..beb2adb979d9 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -528,10 +528,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
528 528
529#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) 529#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
530 530
531#define MK_IOSPACE_PFN(space, pfn) (pfn)
532#define GET_IOSPACE(pfn) 0
533#define GET_PFN(pfn) (pfn)
534
535/* We provide our own get_unmapped_area to provide cache coherency */ 531/* We provide our own get_unmapped_area to provide cache coherency */
536 532
537#define HAVE_ARCH_UNMAPPED_AREA 533#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 2ce4b6b7b348..c44810b9d322 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t;
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <asm/synch.h> 12#include <asm/synch.h>
13#include <asm/asm-compat.h> 13#include <asm/asm-compat.h>
14#include <asm/system.h>
14 15
15#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
16 17
@@ -165,8 +166,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
165 return t; 166 return t;
166} 167}
167 168
168#define atomic_cmpxchg(v, o, n) \ 169#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
169 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
170#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 170#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
171 171
172/** 172/**
@@ -414,8 +414,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
414 return t; 414 return t;
415} 415}
416 416
417#define atomic64_cmpxchg(v, o, n) \ 417#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
418 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
419#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 418#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
420 419
421/** 420/**
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 8f757f6246e4..8144a2788db6 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -39,7 +39,6 @@
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40 40
41#include <linux/compiler.h> 41#include <linux/compiler.h>
42#include <asm/atomic.h>
43#include <asm/asm-compat.h> 42#include <asm/asm-compat.h>
44#include <asm/synch.h> 43#include <asm/synch.h>
45 44
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index b2e56b30306a..870967e47204 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -26,6 +26,7 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <asm/machdep.h>
29#include <asm/types.h> 30#include <asm/types.h>
30#include <asm/bitops.h> 31#include <asm/bitops.h>
31 32
@@ -109,6 +110,19 @@ static inline void pci_iommu_init(void) { }
109#endif 110#endif
110 111
111extern void alloc_dart_table(void); 112extern void alloc_dart_table(void);
113#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
114static inline void iommu_save(void)
115{
116 if (ppc_md.iommu_save)
117 ppc_md.iommu_save();
118}
119
120static inline void iommu_restore(void)
121{
122 if (ppc_md.iommu_restore)
123 ppc_md.iommu_restore();
124}
125#endif
112 126
113#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
114#endif /* _ASM_IOMMU_H */ 128#endif /* _ASM_IOMMU_H */
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index 532bfee934f4..295f0162c608 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -6,20 +6,19 @@
6 6
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8 8
9struct pt_regs; 9/*
10 10 * These are only here because kprobes.c wants them to implement a
11struct die_args { 11 * blatant layering violation. Will hopefully go away soon once all
12 struct pt_regs *regs; 12 * architectures are updated.
13 const char *str; 13 */
14 long err; 14static inline int register_page_fault_notifier(struct notifier_block *nb)
15 int trapnr; 15{
16 int signr; 16 return 0;
17}; 17}
18 18static inline int unregister_page_fault_notifier(struct notifier_block *nb)
19extern int register_die_notifier(struct notifier_block *); 19{
20extern int unregister_die_notifier(struct notifier_block *); 20 return 0;
21extern int register_page_fault_notifier(struct notifier_block *); 21}
22extern int unregister_page_fault_notifier(struct notifier_block *);
23extern struct atomic_notifier_head powerpc_die_chain; 22extern struct atomic_notifier_head powerpc_die_chain;
24 23
25/* Grossly misnamed. */ 24/* Grossly misnamed. */
@@ -29,14 +28,7 @@ enum die_val {
29 DIE_DABR_MATCH, 28 DIE_DABR_MATCH,
30 DIE_BPT, 29 DIE_BPT,
31 DIE_SSTEP, 30 DIE_SSTEP,
32 DIE_PAGE_FAULT,
33}; 31};
34 32
35static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
36{
37 struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
38 return atomic_notifier_call_chain(&powerpc_die_chain, val, &args);
39}
40
41#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
42#endif /* _ASM_POWERPC_KDEBUG_H */ 34#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 11cbdf81fd2e..b6f817b8ba3d 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -108,8 +108,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
108 struct pt_regs *oldregs) { } 108 struct pt_regs *oldregs) { }
109#endif /* !__powerpc64 __ */ 109#endif /* !__powerpc64 __ */
110 110
111#define MAX_NOTE_BYTES 1024
112
113extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 111extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
114 master to copy new code to 0 */ 112 master to copy new code to 0 */
115extern int crashing_cpu; 113extern int crashing_cpu;
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index f850ca7020ed..b0e40ff32ee0 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -64,6 +64,12 @@ typedef unsigned int kprobe_opcode_t;
64 addr = *(kprobe_opcode_t **)addr; \ 64 addr = *(kprobe_opcode_t **)addr; \
65 } else if (name[0] != '.') \ 65 } else if (name[0] != '.') \
66 addr = *(kprobe_opcode_t **)addr; \ 66 addr = *(kprobe_opcode_t **)addr; \
67 } else { \
68 char dot_name[KSYM_NAME_LEN+1]; \
69 dot_name[0] = '.'; \
70 dot_name[1] = '\0'; \
71 strncat(dot_name, name, KSYM_NAME_LEN); \
72 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
67 } \ 73 } \
68} 74}
69 75
@@ -110,5 +116,6 @@ struct kprobe_ctlblk {
110 116
111extern int kprobe_exceptions_notify(struct notifier_block *self, 117extern int kprobe_exceptions_notify(struct notifier_block *self,
112 unsigned long val, void *data); 118 unsigned long val, void *data);
119extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
113#endif /* __KERNEL__ */ 120#endif /* __KERNEL__ */
114#endif /* _ASM_POWERPC_KPROBES_H */ 121#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-powerpc/local.h b/include/asm-powerpc/local.h
index c11c530f74d0..612d83276653 100644
--- a/include/asm-powerpc/local.h
+++ b/include/asm-powerpc/local.h
@@ -1 +1,200 @@
1#include <asm-generic/local.h> 1#ifndef _ARCH_POWERPC_LOCAL_H
2#define _ARCH_POWERPC_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef struct
8{
9 atomic_long_t a;
10} local_t;
11
12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13
14#define local_read(l) atomic_long_read(&(l)->a)
15#define local_set(l,i) atomic_long_set(&(l)->a, (i))
16
17#define local_add(i,l) atomic_long_add((i),(&(l)->a))
18#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
19#define local_inc(l) atomic_long_inc(&(l)->a)
20#define local_dec(l) atomic_long_dec(&(l)->a)
21
22static __inline__ long local_add_return(long a, local_t *l)
23{
24 long t;
25
26 __asm__ __volatile__(
27"1:" PPC_LLARX "%0,0,%2 # local_add_return\n\
28 add %0,%1,%0\n"
29 PPC405_ERR77(0,%2)
30 PPC_STLCX "%0,0,%2 \n\
31 bne- 1b"
32 : "=&r" (t)
33 : "r" (a), "r" (&(l->a.counter))
34 : "cc", "memory");
35
36 return t;
37}
38
39#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
40
41static __inline__ long local_sub_return(long a, local_t *l)
42{
43 long t;
44
45 __asm__ __volatile__(
46"1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\
47 subf %0,%1,%0\n"
48 PPC405_ERR77(0,%2)
49 PPC_STLCX "%0,0,%2 \n\
50 bne- 1b"
51 : "=&r" (t)
52 : "r" (a), "r" (&(l->a.counter))
53 : "cc", "memory");
54
55 return t;
56}
57
58static __inline__ long local_inc_return(local_t *l)
59{
60 long t;
61
62 __asm__ __volatile__(
63"1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\
64 addic %0,%0,1\n"
65 PPC405_ERR77(0,%1)
66 PPC_STLCX "%0,0,%1 \n\
67 bne- 1b"
68 : "=&r" (t)
69 : "r" (&(l->a.counter))
70 : "cc", "memory");
71
72 return t;
73}
74
75/*
76 * local_inc_and_test - increment and test
77 * @l: pointer of type local_t
78 *
79 * Atomically increments @l by 1
80 * and returns true if the result is zero, or false for all
81 * other cases.
82 */
83#define local_inc_and_test(l) (local_inc_return(l) == 0)
84
85static __inline__ long local_dec_return(local_t *l)
86{
87 long t;
88
89 __asm__ __volatile__(
90"1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\
91 addic %0,%0,-1\n"
92 PPC405_ERR77(0,%1)
93 PPC_STLCX "%0,0,%1\n\
94 bne- 1b"
95 : "=&r" (t)
96 : "r" (&(l->a.counter))
97 : "cc", "memory");
98
99 return t;
100}
101
102#define local_cmpxchg(l, o, n) \
103 (cmpxchg_local(&((l)->a.counter), (o), (n)))
104#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
105
106/**
107 * local_add_unless - add unless the number is a given value
108 * @l: pointer of type local_t
109 * @a: the amount to add to v...
110 * @u: ...unless v is equal to u.
111 *
112 * Atomically adds @a to @l, so long as it was not @u.
113 * Returns non-zero if @l was not @u, and zero otherwise.
114 */
115static __inline__ int local_add_unless(local_t *l, long a, long u)
116{
117 long t;
118
119 __asm__ __volatile__ (
120"1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\
121 cmpw 0,%0,%3 \n\
122 beq- 2f \n\
123 add %0,%2,%0 \n"
124 PPC405_ERR77(0,%2)
125 PPC_STLCX "%0,0,%1 \n\
126 bne- 1b \n"
127" subf %0,%2,%0 \n\
1282:"
129 : "=&r" (t)
130 : "r" (&(l->a.counter)), "r" (a), "r" (u)
131 : "cc", "memory");
132
133 return t != u;
134}
135
136#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
137
138#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
139#define local_dec_and_test(l) (local_dec_return((l)) == 0)
140
141/*
142 * Atomically test *l and decrement if it is greater than 0.
143 * The function returns the old value of *l minus 1.
144 */
145static __inline__ long local_dec_if_positive(local_t *l)
146{
147 long t;
148
149 __asm__ __volatile__(
150"1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\
151 cmpwi %0,1\n\
152 addi %0,%0,-1\n\
153 blt- 2f\n"
154 PPC405_ERR77(0,%1)
155 PPC_STLCX "%0,0,%1\n\
156 bne- 1b"
157 "\n\
1582:" : "=&b" (t)
159 : "r" (&(l->a.counter))
160 : "cc", "memory");
161
162 return t;
163}
164
165/* Use these for per-cpu local_t variables: on some archs they are
166 * much more efficient than these naive implementations. Note they take
167 * a variable, not an address.
168 */
169
170#define __local_inc(l) ((l)->a.counter++)
171#define __local_dec(l) ((l)->a.counter++)
172#define __local_add(i,l) ((l)->a.counter+=(i))
173#define __local_sub(i,l) ((l)->a.counter-=(i))
174
175/* Need to disable preemption for the cpu local counters otherwise we could
176 still access a variable of a previous CPU in a non atomic way. */
177#define cpu_local_wrap_v(l) \
178 ({ local_t res__; \
179 preempt_disable(); \
180 res__ = (l); \
181 preempt_enable(); \
182 res__; })
183#define cpu_local_wrap(l) \
184 ({ preempt_disable(); \
185 l; \
186 preempt_enable(); }) \
187
188#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
189#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
190#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
191#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
192#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
193#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
194
195#define __cpu_local_inc(l) cpu_local_inc(l)
196#define __cpu_local_dec(l) cpu_local_dec(l)
197#define __cpu_local_add(i, l) cpu_local_add((i), (l))
198#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
199
200#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index b204926ce913..6cf1a831f550 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -91,6 +91,11 @@ struct machdep_calls {
91 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 91 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
92 unsigned long flags); 92 unsigned long flags);
93 void (*iounmap)(volatile void __iomem *token); 93 void (*iounmap)(volatile void __iomem *token);
94
95#ifdef CONFIG_PM
96 void (*iommu_save)(void);
97 void (*iommu_restore)(void);
98#endif
94#endif /* CONFIG_PPC64 */ 99#endif /* CONFIG_PPC64 */
95 100
96 int (*probe)(void); 101 int (*probe)(void);
@@ -115,6 +120,14 @@ struct machdep_calls {
115 /* To setup PHBs when using automatic OF platform driver for PCI */ 120 /* To setup PHBs when using automatic OF platform driver for PCI */
116 int (*pci_setup_phb)(struct pci_controller *host); 121 int (*pci_setup_phb)(struct pci_controller *host);
117 122
123#ifdef CONFIG_PCI_MSI
124 int (*msi_check_device)(struct pci_dev* dev,
125 int nvec, int type);
126 int (*setup_msi_irqs)(struct pci_dev *dev,
127 int nvec, int type);
128 void (*teardown_msi_irqs)(struct pci_dev *dev);
129#endif
130
118 void (*restart)(char *cmd); 131 void (*restart)(char *cmd);
119 void (*power_off)(void); 132 void (*power_off)(void);
120 void (*halt)(void); 133 void (*halt)(void);
@@ -240,14 +253,10 @@ struct machdep_calls {
240 */ 253 */
241 void (*machine_kexec)(struct kimage *image); 254 void (*machine_kexec)(struct kimage *image);
242#endif /* CONFIG_KEXEC */ 255#endif /* CONFIG_KEXEC */
243
244#ifdef CONFIG_PCI_MSI
245 int (*enable_msi)(struct pci_dev *pdev);
246 void (*disable_msi)(struct pci_dev *pdev);
247#endif /* CONFIG_PCI_MSI */
248}; 256};
249 257
250extern void power4_idle(void); 258extern void power4_idle(void);
259extern void power4_cpu_offline_powersave(void);
251extern void ppc6xx_idle(void); 260extern void ppc6xx_idle(void);
252 261
253/* 262/*
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
new file mode 100644
index 000000000000..d5ce7a8dfe9f
--- /dev/null
+++ b/include/asm-powerpc/mmu-44x.h
@@ -0,0 +1,78 @@
1#ifndef _ASM_POWERPC_MMU_44X_H_
2#define _ASM_POWERPC_MMU_44X_H_
3/*
4 * PPC440 support
5 */
6
7#define PPC44x_MMUCR_TID 0x000000ff
8#define PPC44x_MMUCR_STS 0x00010000
9
10#define PPC44x_TLB_PAGEID 0
11#define PPC44x_TLB_XLAT 1
12#define PPC44x_TLB_ATTRIB 2
13
14/* Page identification fields */
15#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */
16#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */
17#define PPC44x_TLB_TS 0x00000100 /* Translation address space */
18#define PPC44x_TLB_1K 0x00000000 /* Page sizes */
19#define PPC44x_TLB_4K 0x00000010
20#define PPC44x_TLB_16K 0x00000020
21#define PPC44x_TLB_64K 0x00000030
22#define PPC44x_TLB_256K 0x00000040
23#define PPC44x_TLB_1M 0x00000050
24#define PPC44x_TLB_16M 0x00000070
25#define PPC44x_TLB_256M 0x00000090
26
27/* Translation fields */
28#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */
29#define PPC44x_TLB_ERPN_MASK 0x0000000f
30
31/* Storage attribute and access control fields */
32#define PPC44x_TLB_ATTR_MASK 0x0000ff80
33#define PPC44x_TLB_U0 0x00008000 /* User 0 */
34#define PPC44x_TLB_U1 0x00004000 /* User 1 */
35#define PPC44x_TLB_U2 0x00002000 /* User 2 */
36#define PPC44x_TLB_U3 0x00001000 /* User 3 */
37#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */
38#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
39#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
40#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
41#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */
42
43#define PPC44x_TLB_PERM_MASK 0x0000003f
44#define PPC44x_TLB_UX 0x00000020 /* User execution */
45#define PPC44x_TLB_UW 0x00000010 /* User write */
46#define PPC44x_TLB_UR 0x00000008 /* User read */
47#define PPC44x_TLB_SX 0x00000004 /* Super execution */
48#define PPC44x_TLB_SW 0x00000002 /* Super write */
49#define PPC44x_TLB_SR 0x00000001 /* Super read */
50
51/* Number of TLB entries */
52#define PPC44x_TLB_SIZE 64
53
54#ifndef __ASSEMBLY__
55
56typedef unsigned long long phys_addr_t;
57
58extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t);
59
60typedef struct {
61 unsigned long id;
62 unsigned long vdso_base;
63} mm_context_t;
64
65#endif /* !__ASSEMBLY__ */
66
67#ifndef CONFIG_PPC_EARLY_DEBUG_44x
68#define PPC44x_EARLY_TLBS 1
69#else
70#define PPC44x_EARLY_TLBS 2
71#define PPC44x_EARLY_DEBUG_VIRTADDR (ASM_CONST(0xf0000000) \
72 | (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff))
73#endif
74
75/* Size of the TLBs used for pinning in lowmem */
76#define PPC_PIN_SIZE (1 << 28) /* 256M */
77
78#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 06b3e6d336cb..fe510fff8907 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -5,9 +5,12 @@
5#ifdef CONFIG_PPC64 5#ifdef CONFIG_PPC64
6/* 64-bit classic hash table MMU */ 6/* 64-bit classic hash table MMU */
7# include <asm/mmu-hash64.h> 7# include <asm/mmu-hash64.h>
8#elif defined(CONFIG_44x)
9/* 44x-style software loaded TLB */
10# include <asm/mmu-44x.h>
8#else 11#else
9/* 32-bit. FIXME: split up the 32-bit MMU types, and revise for 12/* Other 32-bit. FIXME: split up the other 32-bit MMU types, and
10 * arch/powerpc */ 13 * revise for arch/powerpc */
11# include <asm-ppc/mmu.h> 14# include <asm-ppc/mmu.h>
12#endif 15#endif
13 16
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
index 7afd5bf94528..c4631f6dd4f9 100644
--- a/include/asm-powerpc/mpc52xx.h
+++ b/include/asm-powerpc/mpc52xx.h
@@ -253,5 +253,16 @@ extern int __init mpc52xx_add_bridge(struct device_node *node);
253 253
254#endif /* __ASSEMBLY__ */ 254#endif /* __ASSEMBLY__ */
255 255
256#ifdef CONFIG_PM
257struct mpc52xx_suspend {
258 void (*board_suspend_prepare)(void __iomem *mbar);
259 void (*board_resume_finish)(void __iomem *mbar);
260};
261
262extern struct mpc52xx_suspend mpc52xx_suspend;
263extern int __init mpc52xx_pm_init(void);
264extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
265#endif /* CONFIG_PM */
266
256#endif /* __ASM_POWERPC_MPC52xx_H__ */ 267#endif /* __ASM_POWERPC_MPC52xx_H__ */
257 268
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index e4d5fc5362a0..2ffb06abe881 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -3,6 +3,7 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/irq.h> 5#include <linux/irq.h>
6#include <linux/sysdev.h>
6#include <asm/dcr.h> 7#include <asm/dcr.h>
7 8
8/* 9/*
@@ -228,6 +229,14 @@ struct mpic_reg_bank {
228#endif /* CONFIG_PPC_DCR */ 229#endif /* CONFIG_PPC_DCR */
229}; 230};
230 231
232struct mpic_irq_save {
233 u32 vecprio,
234 dest;
235#ifdef CONFIG_MPIC_U3_HT_IRQS
236 u32 fixup_data;
237#endif
238};
239
231/* The instance data of a given MPIC */ 240/* The instance data of a given MPIC */
232struct mpic 241struct mpic
233{ 242{
@@ -292,8 +301,19 @@ struct mpic
292 u32 *hw_set; 301 u32 *hw_set;
293#endif 302#endif
294 303
304#ifdef CONFIG_PCI_MSI
305 spinlock_t bitmap_lock;
306 unsigned long *hwirq_bitmap;
307#endif
308
295 /* link */ 309 /* link */
296 struct mpic *next; 310 struct mpic *next;
311
312 struct sys_device sysdev;
313
314#ifdef CONFIG_PM
315 struct mpic_irq_save *save_data;
316#endif
297}; 317};
298 318
299/* 319/*
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h
index 4f1aabe0ce73..e9af49eb1aa8 100644
--- a/include/asm-powerpc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -32,6 +32,8 @@ extern int of_device_register(struct of_device *ofdev);
32extern void of_device_unregister(struct of_device *ofdev); 32extern void of_device_unregister(struct of_device *ofdev);
33extern void of_release_dev(struct device *dev); 33extern void of_release_dev(struct device *dev);
34 34
35extern ssize_t of_device_get_modalias(struct of_device *ofdev,
36 char *str, ssize_t len);
35extern int of_device_uevent(struct device *dev, 37extern int of_device_uevent(struct device *dev,
36 char **envp, int num_envp, char *buffer, int buffer_size); 38 char **envp, int num_envp, char *buffer, int buffer_size);
37 39
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index b4d38b0b15f8..10c51f457d48 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -121,6 +121,7 @@ typedef struct { pte_t pte; } real_pte_t;
121#endif 121#endif
122 122
123/* PMD level */ 123/* PMD level */
124#ifdef CONFIG_PPC64
124typedef struct { unsigned long pmd; } pmd_t; 125typedef struct { unsigned long pmd; } pmd_t;
125#define pmd_val(x) ((x).pmd) 126#define pmd_val(x) ((x).pmd)
126#define __pmd(x) ((pmd_t) { (x) }) 127#define __pmd(x) ((pmd_t) { (x) })
@@ -130,7 +131,8 @@ typedef struct { unsigned long pmd; } pmd_t;
130typedef struct { unsigned long pud; } pud_t; 131typedef struct { unsigned long pud; } pud_t;
131#define pud_val(x) ((x).pud) 132#define pud_val(x) ((x).pud)
132#define __pud(x) ((pud_t) { (x) }) 133#define __pud(x) ((pud_t) { (x) })
133#endif 134#endif /* !CONFIG_PPC_64K_PAGES */
135#endif /* CONFIG_PPC64 */
134 136
135/* PGD level */ 137/* PGD level */
136typedef struct { unsigned long pgd; } pgd_t; 138typedef struct { unsigned long pgd; } pgd_t;
@@ -159,15 +161,17 @@ typedef unsigned long real_pte_t;
159#endif 161#endif
160 162
161 163
164#ifdef CONFIG_PPC64
162typedef unsigned long pmd_t; 165typedef unsigned long pmd_t;
163#define pmd_val(x) (x) 166#define pmd_val(x) (x)
164#define __pmd(x) (x) 167#define __pmd(x) (x)
165 168
166#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES) 169#ifndef CONFIG_PPC_64K_PAGES
167typedef unsigned long pud_t; 170typedef unsigned long pud_t;
168#define pud_val(x) (x) 171#define pud_val(x) (x)
169#define __pud(x) (x) 172#define __pud(x) (x)
170#endif 173#endif /* !CONFIG_PPC_64K_PAGES */
174#endif /* CONFIG_PPC64 */
171 175
172typedef unsigned long pgd_t; 176typedef unsigned long pgd_t;
173#define pgd_val(x) (x) 177#define pgd_val(x) (x)
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
index 07f6d3cf5e5a..374d0db37e1c 100644
--- a/include/asm-powerpc/page_32.h
+++ b/include/asm-powerpc/page_32.h
@@ -14,11 +14,9 @@
14#ifdef CONFIG_PTE_64BIT 14#ifdef CONFIG_PTE_64BIT
15typedef unsigned long long pte_basic_t; 15typedef unsigned long long pte_basic_t;
16#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ 16#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
17#define PTE_FMT "%16Lx"
18#else 17#else
19typedef unsigned long pte_basic_t; 18typedef unsigned long pte_basic_t;
20#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ 19#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
21#define PTE_FMT "%.8lx"
22#endif 20#endif
23 21
24struct page; 22struct page;
diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h
index b37b81e37278..414c50e2e881 100644
--- a/include/asm-powerpc/parport.h
+++ b/include/asm-powerpc/parport.h
@@ -12,11 +12,6 @@
12 12
13#include <asm/prom.h> 13#include <asm/prom.h>
14 14
15extern struct parport *parport_pc_probe_port (unsigned long int base,
16 unsigned long int base_hi,
17 int irq, int dma,
18 struct pci_dev *dev);
19
20static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) 15static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
21{ 16{
22 struct device_node *np; 17 struct device_node *np;
diff --git a/include/asm-powerpc/pgalloc-32.h b/include/asm-powerpc/pgalloc-32.h
new file mode 100644
index 000000000000..e1307432163c
--- /dev/null
+++ b/include/asm-powerpc/pgalloc-32.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_POWERPC_PGALLOC_32_H
2#define _ASM_POWERPC_PGALLOC_32_H
3
4#include <linux/threads.h>
5
6extern void __bad_pte(pmd_t *pmd);
7
8extern pgd_t *pgd_alloc(struct mm_struct *mm);
9extern void pgd_free(pgd_t *pgd);
10
11/*
12 * We don't have any real pmd's, and this code never triggers because
13 * the pgd will always be present..
14 */
15/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
16#define pmd_free(x) do { } while (0)
17#define __pmd_free_tlb(tlb,x) do { } while (0)
18/* #define pgd_populate(mm, pmd, pte) BUG() */
19
20#ifndef CONFIG_BOOKE
21#define pmd_populate_kernel(mm, pmd, pte) \
22 (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
23#define pmd_populate(mm, pmd, pte) \
24 (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
25#else
26#define pmd_populate_kernel(mm, pmd, pte) \
27 (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
28#define pmd_populate(mm, pmd, pte) \
29 (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
30#endif
31
32extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
33extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
34extern void pte_free_kernel(pte_t *pte);
35extern void pte_free(struct page *pte);
36
37#define __pte_free_tlb(tlb, pte) pte_free((pte))
38
39#define check_pgt_cache() do { } while (0)
40
41#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
new file mode 100644
index 000000000000..30b50cf56e2c
--- /dev/null
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -0,0 +1,152 @@
1#ifndef _ASM_POWERPC_PGALLOC_64_H
2#define _ASM_POWERPC_PGALLOC_64_H
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/cpumask.h>
13#include <linux/percpu.h>
14
15extern struct kmem_cache *pgtable_cache[];
16
17#ifdef CONFIG_PPC_64K_PAGES
18#define PTE_CACHE_NUM 0
19#define PMD_CACHE_NUM 1
20#define PGD_CACHE_NUM 2
21#define HUGEPTE_CACHE_NUM 3
22#else
23#define PTE_CACHE_NUM 0
24#define PMD_CACHE_NUM 1
25#define PUD_CACHE_NUM 1
26#define PGD_CACHE_NUM 0
27#define HUGEPTE_CACHE_NUM 2
28#endif
29
30static inline pgd_t *pgd_alloc(struct mm_struct *mm)
31{
32 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
33}
34
35static inline void pgd_free(pgd_t *pgd)
36{
37 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
38}
39
40#ifndef CONFIG_PPC_64K_PAGES
41
42#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
43
44static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
45{
46 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
47 GFP_KERNEL|__GFP_REPEAT);
48}
49
50static inline void pud_free(pud_t *pud)
51{
52 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
53}
54
55static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
56{
57 pud_set(pud, (unsigned long)pmd);
58}
59
60#define pmd_populate(mm, pmd, pte_page) \
61 pmd_populate_kernel(mm, pmd, page_address(pte_page))
62#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
63
64
65#else /* CONFIG_PPC_64K_PAGES */
66
67#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
68
69static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
70 pte_t *pte)
71{
72 pmd_set(pmd, (unsigned long)pte);
73}
74
75#define pmd_populate(mm, pmd, pte_page) \
76 pmd_populate_kernel(mm, pmd, page_address(pte_page))
77
78#endif /* CONFIG_PPC_64K_PAGES */
79
80static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
81{
82 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
83 GFP_KERNEL|__GFP_REPEAT);
84}
85
86static inline void pmd_free(pmd_t *pmd)
87{
88 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
89}
90
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
92 unsigned long address)
93{
94 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
95 GFP_KERNEL|__GFP_REPEAT);
96}
97
98static inline struct page *pte_alloc_one(struct mm_struct *mm,
99 unsigned long address)
100{
101 return virt_to_page(pte_alloc_one_kernel(mm, address));
102}
103
104static inline void pte_free_kernel(pte_t *pte)
105{
106 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
107}
108
109static inline void pte_free(struct page *ptepage)
110{
111 pte_free_kernel(page_address(ptepage));
112}
113
114#define PGF_CACHENUM_MASK 0x3
115
116typedef struct pgtable_free {
117 unsigned long val;
118} pgtable_free_t;
119
120static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
121 unsigned long mask)
122{
123 BUG_ON(cachenum > PGF_CACHENUM_MASK);
124
125 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
126}
127
128static inline void pgtable_free(pgtable_free_t pgf)
129{
130 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
131 int cachenum = pgf.val & PGF_CACHENUM_MASK;
132
133 kmem_cache_free(pgtable_cache[cachenum], p);
134}
135
136extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
137
138#define __pte_free_tlb(tlb, ptepage) \
139 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
140 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
141#define __pmd_free_tlb(tlb, pmd) \
142 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
143 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
144#ifndef CONFIG_PPC_64K_PAGES
145#define __pud_free_tlb(tlb, pud) \
146 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
147 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
148#endif /* CONFIG_PPC_64K_PAGES */
149
150#define check_pgt_cache() do { } while (0)
151
152#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index b0830db68f8a..b4505ed0f0f2 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -2,159 +2,11 @@
2#define _ASM_POWERPC_PGALLOC_H 2#define _ASM_POWERPC_PGALLOC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifndef CONFIG_PPC64 5#ifdef CONFIG_PPC64
6#include <asm-ppc/pgalloc.h> 6#include <asm/pgalloc-64.h>
7#else 7#else
8 8#include <asm/pgalloc-32.h>
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13
14extern struct kmem_cache *pgtable_cache[];
15
16#ifdef CONFIG_PPC_64K_PAGES
17#define PTE_CACHE_NUM 0
18#define PMD_CACHE_NUM 1
19#define PGD_CACHE_NUM 2
20#define HUGEPTE_CACHE_NUM 3
21#else
22#define PTE_CACHE_NUM 0
23#define PMD_CACHE_NUM 1
24#define PUD_CACHE_NUM 1
25#define PGD_CACHE_NUM 0
26#define HUGEPTE_CACHE_NUM 2
27#endif 9#endif
28 10
29/*
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36static inline pgd_t *pgd_alloc(struct mm_struct *mm)
37{
38 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
39}
40
41static inline void pgd_free(pgd_t *pgd)
42{
43 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
44}
45
46#ifndef CONFIG_PPC_64K_PAGES
47
48#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
49
50static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
51{
52 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
53 GFP_KERNEL|__GFP_REPEAT);
54}
55
56static inline void pud_free(pud_t *pud)
57{
58 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
59}
60
61static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
62{
63 pud_set(pud, (unsigned long)pmd);
64}
65
66#define pmd_populate(mm, pmd, pte_page) \
67 pmd_populate_kernel(mm, pmd, page_address(pte_page))
68#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
69
70
71#else /* CONFIG_PPC_64K_PAGES */
72
73#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
74
75static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
76 pte_t *pte)
77{
78 pmd_set(pmd, (unsigned long)pte);
79}
80
81#define pmd_populate(mm, pmd, pte_page) \
82 pmd_populate_kernel(mm, pmd, page_address(pte_page))
83
84#endif /* CONFIG_PPC_64K_PAGES */
85
86static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
87{
88 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
89 GFP_KERNEL|__GFP_REPEAT);
90}
91
92static inline void pmd_free(pmd_t *pmd)
93{
94 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
95}
96
97static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
98 unsigned long address)
99{
100 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
101 GFP_KERNEL|__GFP_REPEAT);
102}
103
104static inline struct page *pte_alloc_one(struct mm_struct *mm,
105 unsigned long address)
106{
107 return virt_to_page(pte_alloc_one_kernel(mm, address));
108}
109
110static inline void pte_free_kernel(pte_t *pte)
111{
112 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
113}
114
115static inline void pte_free(struct page *ptepage)
116{
117 pte_free_kernel(page_address(ptepage));
118}
119
120#define PGF_CACHENUM_MASK 0x3
121
122typedef struct pgtable_free {
123 unsigned long val;
124} pgtable_free_t;
125
126static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
127 unsigned long mask)
128{
129 BUG_ON(cachenum > PGF_CACHENUM_MASK);
130
131 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
132}
133
134static inline void pgtable_free(pgtable_free_t pgf)
135{
136 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
137 int cachenum = pgf.val & PGF_CACHENUM_MASK;
138
139 kmem_cache_free(pgtable_cache[cachenum], p);
140}
141
142extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
143
144#define __pte_free_tlb(tlb, ptepage) \
145 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
146 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
147#define __pmd_free_tlb(tlb, pmd) \
148 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
149 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
150#ifndef CONFIG_PPC_64K_PAGES
151#define __pud_free_tlb(tlb, pud) \
152 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
153 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
154#endif /* CONFIG_PPC_64K_PAGES */
155
156#define check_pgt_cache() do { } while (0)
157
158#endif /* CONFIG_PPC64 */
159#endif /* __KERNEL__ */ 11#endif /* __KERNEL__ */
160#endif /* _ASM_POWERPC_PGALLOC_H */ 12#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index a28fa8bc01da..1744d6ac12a2 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -1,3 +1,5 @@
1#ifndef _ASM_POWERPC_PGTABLE_4K_H
2#define _ASM_POWERPC_PGTABLE_4K_H
1/* 3/*
2 * Entries per page directory level. The PTE level must use a 64b record 4 * Entries per page directory level. The PTE level must use a 64b record
3 * for each page table entry. The PMD and PGD level use a 32b record for 5 * for each page table entry. The PMD and PGD level use a 32b record for
@@ -100,3 +102,4 @@
100 102
101#define remap_4k_pfn(vma, addr, pfn, prot) \ 103#define remap_4k_pfn(vma, addr, pfn, prot) \
102 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) 104 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
105#endif /* _ASM_POWERPC_PGTABLE_4K_H */
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 5e84f070eaf7..16ef4978520d 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -1,6 +1,5 @@
1#ifndef _ASM_POWERPC_PGTABLE_64K_H 1#ifndef _ASM_POWERPC_PGTABLE_64K_H
2#define _ASM_POWERPC_PGTABLE_64K_H 2#define _ASM_POWERPC_PGTABLE_64K_H
3#ifdef __KERNEL__
4 3
5#include <asm-generic/pgtable-nopud.h> 4#include <asm-generic/pgtable-nopud.h>
6 5
@@ -65,8 +64,6 @@
65/* Bits to mask out from a PGD/PUD to get to the PMD page */ 64/* Bits to mask out from a PGD/PUD to get to the PMD page */
66#define PUD_MASKED_BITS 0x1ff 65#define PUD_MASKED_BITS 0x1ff
67 66
68#ifndef __ASSEMBLY__
69
70/* Manipulate "rpte" values */ 67/* Manipulate "rpte" values */
71#define __real_pte(e,p) ((real_pte_t) { \ 68#define __real_pte(e,p) ((real_pte_t) { \
72 (e), pte_val(*((p) + PTRS_PER_PTE)) }) 69 (e), pte_val(*((p) + PTRS_PER_PTE)) })
@@ -98,6 +95,4 @@
98 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ 95 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
99 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) 96 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
100 97
101#endif /* __ASSEMBLY__ */
102#endif /* __KERNEL__ */
103#endif /* _ASM_POWERPC_PGTABLE_64K_H */ 98#endif /* _ASM_POWERPC_PGTABLE_64K_H */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
new file mode 100644
index 000000000000..09662a24f226
--- /dev/null
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -0,0 +1,813 @@
1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
9#include <asm/processor.h> /* For TASK_SIZE */
10#include <asm/mmu.h>
11#include <asm/page.h>
12#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
13struct mm_struct;
14
15extern unsigned long va_to_phys(unsigned long address);
16extern pte_t *va_to_pte(unsigned long address);
17extern unsigned long ioremap_bot, ioremap_base;
18#endif /* __ASSEMBLY__ */
19
20/*
21 * The PowerPC MMU uses a hash table containing PTEs, together with
22 * a set of 16 segment registers (on 32-bit implementations), to define
23 * the virtual to physical address mapping.
24 *
25 * We use the hash table as an extended TLB, i.e. a cache of currently
26 * active mappings. We maintain a two-level page table tree, much
27 * like that used by the i386, for the sake of the Linux memory
28 * management code. Low-level assembler code in hashtable.S
29 * (procedure hash_page) is responsible for extracting ptes from the
30 * tree and putting them into the hash table when necessary, and
31 * updating the accessed and modified bits in the page table tree.
32 */
33
34/*
35 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
36 * We also use the two level tables, but we can put the real bits in them
37 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
38 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
39 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
40 * based upon user/super access. The TLB does not have accessed nor write
41 * protect. We assume that if the TLB get loaded with an entry it is
42 * accessed, and overload the changed bit for write protect. We use
43 * two bits in the software pte that are supposed to be set to zero in
44 * the TLB entry (24 and 25) for these indicators. Although the level 1
45 * descriptor contains the guarded and writethrough/copyback bits, we can
46 * set these at the page level since they get copied from the Mx_TWC
47 * register when the TLB entry is loaded. We will use bit 27 for guard, since
48 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
49 * These will get masked from the level 2 descriptor at TLB load time, and
50 * copied to the MD_TWC before it gets loaded.
51 * Large page sizes added. We currently support two sizes, 4K and 8M.
52 * This also allows a TLB hander optimization because we can directly
53 * load the PMD into MD_TWC. The 8M pages are only used for kernel
54 * mapping of well known areas. The PMD (PGD) entries contain control
55 * flags in addition to the address, so care must be taken that the
56 * software no longer assumes these are only pointers.
57 */
58
59/*
60 * At present, all PowerPC 400-class processors share a similar TLB
61 * architecture. The instruction and data sides share a unified,
62 * 64-entry, fully-associative TLB which is maintained totally under
63 * software control. In addition, the instruction side has a
64 * hardware-managed, 4-entry, fully-associative TLB which serves as a
65 * first level to the shared TLB. These two TLBs are known as the UTLB
66 * and ITLB, respectively (see "mmu.h" for definitions).
67 */
68
69/*
70 * The normal case is that PTEs are 32-bits and we have a 1-page
71 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
72 *
73 * For any >32-bit physical address platform, we can use the following
74 * two level page table layout where the pgdir is 8KB and the MS 13 bits
75 * are an index to the second level table. The combined pgdir/pmd first
76 * level has 2048 entries and the second level has 512 64-bit PTE entries.
77 * -Matt
78 */
79/* PGDIR_SHIFT determines what a top-level page table entry can map */
80#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
81#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
82#define PGDIR_MASK (~(PGDIR_SIZE-1))
83
84/*
85 * entries per page directory level: our page-table tree is two-level, so
86 * we don't really have any PMD directory.
87 */
88#define PTRS_PER_PTE (1 << PTE_SHIFT)
89#define PTRS_PER_PMD 1
90#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
91
92#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
93#define FIRST_USER_ADDRESS 0
94
95#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
96#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
97
98#define pte_ERROR(e) \
99 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
100 (unsigned long long)pte_val(e))
101#define pgd_ERROR(e) \
102 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
103
104/*
105 * Just any arbitrary offset to the start of the vmalloc VM area: the
106 * current 64MB value just means that there will be a 64MB "hole" after the
107 * physical memory until the kernel virtual memory starts. That means that
108 * any out-of-bounds memory accesses will hopefully be caught.
109 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
110 * area for the same reason. ;)
111 *
112 * We no longer map larger than phys RAM with the BATs so we don't have
113 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
114 * about clashes between our early calls to ioremap() that start growing down
115 * from ioremap_base being run into the VM area allocations (growing upwards
116 * from VMALLOC_START). For this reason we have ioremap_bot to check when
117 * we actually run into our mappings setup in the early boot with the VM
118 * system. This really does become a problem for machines with good amounts
119 * of RAM. -- Cort
120 */
121#define VMALLOC_OFFSET (0x1000000) /* 16M */
122#ifdef PPC_PIN_SIZE
123#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
124#else
125#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
126#endif
127#define VMALLOC_END ioremap_bot
128
129/*
130 * Bits in a linux-style PTE. These match the bits in the
131 * (hardware-defined) PowerPC PTE as closely as possible.
132 */
133
134#if defined(CONFIG_40x)
135
136/* There are several potential gotchas here. The 40x hardware TLBLO
137 field looks like this:
138
139 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
140 RPN..................... 0 0 EX WR ZSEL....... W I M G
141
142 Where possible we make the Linux PTE bits match up with this
143
144 - bits 20 and 21 must be cleared, because we use 4k pages (40x can
145 support down to 1k pages), this is done in the TLBMiss exception
146 handler.
147 - We use only zones 0 (for kernel pages) and 1 (for user pages)
148 of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
149 miss handler. Bit 27 is PAGE_USER, thus selecting the correct
150 zone.
151 - PRESENT *must* be in the bottom two bits because swap cache
152 entries use the top 30 bits. Because 40x doesn't support SMP
153 anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
154 is cleared in the TLB miss handler before the TLB entry is loaded.
155 - All other bits of the PTE are loaded into TLBLO without
156 modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
157 software PTE bits. We actually use use bits 21, 24, 25, and
158 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
159 PRESENT.
160*/
161
162/* Definitions for 40x embedded chips. */
163#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
164#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
165#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
166#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
167#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
168#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
169#define _PAGE_RW 0x040 /* software: Writes permitted */
170#define _PAGE_DIRTY 0x080 /* software: dirty page */
171#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
172#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
173#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
174
175#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
176#define _PMD_BAD 0x802
177#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
178#define _PMD_SIZE_4M 0x0c0
179#define _PMD_SIZE_16M 0x0e0
180#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
181
182#elif defined(CONFIG_44x)
183/*
184 * Definitions for PPC440
185 *
186 * Because of the 3 word TLB entries to support 36-bit addressing,
187 * the attribute are difficult to map in such a fashion that they
188 * are easily loaded during exception processing. I decided to
189 * organize the entry so the ERPN is the only portion in the
190 * upper word of the PTE and the attribute bits below are packed
191 * in as sensibly as they can be in the area below a 4KB page size
192 * oriented RPN. This at least makes it easy to load the RPN and
193 * ERPN fields in the TLB. -Matt
194 *
195 * Note that these bits preclude future use of a page size
196 * less than 4KB.
197 *
198 *
199 * PPC 440 core has following TLB attribute fields;
200 *
201 * TLB1:
202 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
203 * RPN................................. - - - - - - ERPN.......
204 *
205 * TLB2:
206 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
207 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
208 *
209 * There are some constrains and options, to decide mapping software bits
210 * into TLB entry.
211 *
212 * - PRESENT *must* be in the bottom three bits because swap cache
213 * entries use the top 29 bits for TLB2.
214 *
215 * - FILE *must* be in the bottom three bits because swap cache
216 * entries use the top 29 bits for TLB2.
217 *
218 * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
219 * doesn't support SMP. So we can use this as software bit, like
220 * DIRTY.
221 *
222 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
223 * for memory protection related functions (see PTE structure in
224 * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
225 * above bits. Note that the bit values are CPU specific, not architecture
226 * specific.
227 *
228 * The kernel PTE entry holds an arch-dependent swp_entry structure under
229 * certain situations. In other words, in such situations some portion of
230 * the PTE bits are used as a swp_entry. In the PPC implementation, the
231 * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
232 * hold protection values. That means the three protection bits are
233 * reserved for both PTE and SWAP entry at the most significant three
234 * LSBs.
235 *
236 * There are three protection bits available for SWAP entry:
237 * _PAGE_PRESENT
238 * _PAGE_FILE
239 * _PAGE_HASHPTE (if HW has)
240 *
241 * So those three bits have to be inside of 0-2nd LSB of PTE.
242 *
243 */
244
245#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
246#define _PAGE_RW 0x00000002 /* S: Write permission */
247#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
248#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
249#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */
250#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */
251#define _PAGE_USER 0x00000040 /* S: User page */
252#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
253#define _PAGE_GUARDED 0x00000100 /* H: G bit */
254#define _PAGE_DIRTY 0x00000200 /* S: Page dirty */
255#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
256#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
257
258/* TODO: Add large page lowmem mapping support */
259#define _PMD_PRESENT 0
260#define _PMD_PRESENT_MASK (PAGE_MASK)
261#define _PMD_BAD (~PAGE_MASK)
262
263/* ERPN in a PTE never gets cleared, ignore it */
264#define _PTE_NONE_MASK 0xffffffff00000000ULL
265
266#elif defined(CONFIG_FSL_BOOKE)
267/*
268 MMU Assist Register 3:
269
270 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
271 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
272
273 - PRESENT *must* be in the bottom three bits because swap cache
274 entries use the top 29 bits.
275
276 - FILE *must* be in the bottom three bits because swap cache
277 entries use the top 29 bits.
278*/
279
280/* Definitions for FSL Book-E Cores */
281#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
282#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
283#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
284#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
285#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
286#define _PAGE_RW 0x00010 /* S: Write permission */
287#define _PAGE_HWEXEC 0x00020 /* H: UX permission */
288
289#define _PAGE_ENDIAN 0x00040 /* H: E bit */
290#define _PAGE_GUARDED 0x00080 /* H: G bit */
291#define _PAGE_COHERENT 0x00100 /* H: M bit */
292#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
293#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
294
295#ifdef CONFIG_PTE_64BIT
296#define _PAGE_DIRTY 0x08000 /* S: Page dirty */
297
298/* ERPN in a PTE never gets cleared, ignore it */
299#define _PTE_NONE_MASK 0xffffffffffff0000ULL
300#else
301#define _PAGE_DIRTY 0x00800 /* S: Page dirty */
302#endif
303
304#define _PMD_PRESENT 0
305#define _PMD_PRESENT_MASK (PAGE_MASK)
306#define _PMD_BAD (~PAGE_MASK)
307
308#elif defined(CONFIG_8xx)
309/* Definitions for 8xx embedded chips. */
310#define _PAGE_PRESENT 0x0001 /* Page is valid */
311#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
312#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
313#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
314
315/* These five software bits must be masked out when the entry is loaded
316 * into the TLB.
317 */
318#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
319#define _PAGE_GUARDED 0x0010 /* software: guarded access */
320#define _PAGE_DIRTY 0x0020 /* software: page changed */
321#define _PAGE_RW 0x0040 /* software: user write access allowed */
322#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
323
324/* Setting any bits in the nibble with the follow two controls will
325 * require a TLB exception handler change. It is assumed unused bits
326 * are always zero.
327 */
328#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
329#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
330
331#define _PMD_PRESENT 0x0001
332#define _PMD_BAD 0x0ff0
333#define _PMD_PAGE_MASK 0x000c
334#define _PMD_PAGE_8M 0x000c
335
336/*
337 * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
338 * for an address even if _PAGE_PRESENT is not set, as a performance
339 * optimization. This is a bug if you ever want to use swap unless
340 * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
341 * definitions for __swp_entry etc. below, which would be gross.
342 * -- paulus
343 */
344#define _PTE_NONE_MASK _PAGE_ACCESSED
345
346#else /* CONFIG_6xx */
347/* Definitions for 60x, 740/750, etc. */
348#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
349#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
350#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
351#define _PAGE_USER 0x004 /* usermode access allowed */
352#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
353#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
354#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
355#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
356#define _PAGE_DIRTY 0x080 /* C: page changed */
357#define _PAGE_ACCESSED 0x100 /* R: page referenced */
358#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
359#define _PAGE_RW 0x400 /* software: user write access allowed */
360
361#define _PTE_NONE_MASK _PAGE_HASHPTE
362
363#define _PMD_PRESENT 0
364#define _PMD_PRESENT_MASK (PAGE_MASK)
365#define _PMD_BAD (~PAGE_MASK)
366#endif
367
368/*
369 * Some bits are only used on some cpu families...
370 */
371#ifndef _PAGE_HASHPTE
372#define _PAGE_HASHPTE 0
373#endif
374#ifndef _PTE_NONE_MASK
375#define _PTE_NONE_MASK 0
376#endif
377#ifndef _PAGE_SHARED
378#define _PAGE_SHARED 0
379#endif
380#ifndef _PAGE_HWWRITE
381#define _PAGE_HWWRITE 0
382#endif
383#ifndef _PAGE_HWEXEC
384#define _PAGE_HWEXEC 0
385#endif
386#ifndef _PAGE_EXEC
387#define _PAGE_EXEC 0
388#endif
389#ifndef _PMD_PRESENT_MASK
390#define _PMD_PRESENT_MASK _PMD_PRESENT
391#endif
392#ifndef _PMD_SIZE
393#define _PMD_SIZE 0
394#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
395#endif
396
397#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
398
399/*
400 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
401 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
402 * to have it in the Linux PTE, and in fact the bit could be reused for
403 * another purpose. -- paulus.
404 */
405
406#ifdef CONFIG_44x
407#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
408#else
409#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
410#endif
411#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
412#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
413
414#ifdef CONFIG_PPC_STD_MMU
415/* On standard PPC MMU, no user access implies kernel read/write access,
416 * so to write-protect kernel memory we must turn on user access */
417#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
418#else
419#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
420#endif
421
422#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
423#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
424
425#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)
426/* We want the debuggers to be able to set breakpoints anywhere, so
427 * don't write protect the kernel text */
428#define _PAGE_RAM_TEXT _PAGE_RAM
429#else
430#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
431#endif
432
433#define PAGE_NONE __pgprot(_PAGE_BASE)
434#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
435#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
436#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
437#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
438#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
439#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
440
441#define PAGE_KERNEL __pgprot(_PAGE_RAM)
442#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
443
444/*
445 * The PowerPC can only do execute protection on a segment (256MB) basis,
446 * not on a page basis. So we consider execute permission the same as read.
447 * Also, write permissions imply read permissions.
448 * This is the closest we can get..
449 */
450#define __P000 PAGE_NONE
451#define __P001 PAGE_READONLY_X
452#define __P010 PAGE_COPY
453#define __P011 PAGE_COPY_X
454#define __P100 PAGE_READONLY
455#define __P101 PAGE_READONLY_X
456#define __P110 PAGE_COPY
457#define __P111 PAGE_COPY_X
458
459#define __S000 PAGE_NONE
460#define __S001 PAGE_READONLY_X
461#define __S010 PAGE_SHARED
462#define __S011 PAGE_SHARED_X
463#define __S100 PAGE_READONLY
464#define __S101 PAGE_READONLY_X
465#define __S110 PAGE_SHARED
466#define __S111 PAGE_SHARED_X
467
468#ifndef __ASSEMBLY__
469/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
470 * kernel without large page PMD support */
471extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
472
473/*
474 * Conversions between PTE values and page frame numbers.
475 */
476
477/* in some case we want to additionaly adjust where the pfn is in the pte to
478 * allow room for more flags */
479#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
480#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
481#else
482#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
483#endif
484
485#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
486#define pte_page(x) pfn_to_page(pte_pfn(x))
487
488#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
489 pgprot_val(prot))
490#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
491
492/*
493 * ZERO_PAGE is a global shared page that is always zero: used
494 * for zero-mapped memory areas etc..
495 */
496extern unsigned long empty_zero_page[1024];
497#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
498
499#endif /* __ASSEMBLY__ */
500
501#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
502#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
503#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
504
505#define pmd_none(pmd) (!pmd_val(pmd))
506#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
507#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
508#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
509
510#ifndef __ASSEMBLY__
511/*
512 * The following only work if pte_present() is true.
513 * Undefined behaviour if not..
514 */
515static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
516static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
517static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
518static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
519static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
520static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
521
522static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
523static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
524
525static inline pte_t pte_rdprotect(pte_t pte) {
526 pte_val(pte) &= ~_PAGE_USER; return pte; }
527static inline pte_t pte_wrprotect(pte_t pte) {
528 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
529static inline pte_t pte_exprotect(pte_t pte) {
530 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
531static inline pte_t pte_mkclean(pte_t pte) {
532 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
533static inline pte_t pte_mkold(pte_t pte) {
534 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
535
536static inline pte_t pte_mkread(pte_t pte) {
537 pte_val(pte) |= _PAGE_USER; return pte; }
538static inline pte_t pte_mkexec(pte_t pte) {
539 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
540static inline pte_t pte_mkwrite(pte_t pte) {
541 pte_val(pte) |= _PAGE_RW; return pte; }
542static inline pte_t pte_mkdirty(pte_t pte) {
543 pte_val(pte) |= _PAGE_DIRTY; return pte; }
544static inline pte_t pte_mkyoung(pte_t pte) {
545 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
546
547static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
548{
549 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
550 return pte;
551}
552
553/*
554 * When flushing the tlb entry for a page, we also need to flush the hash
555 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
556 */
557extern int flush_hash_pages(unsigned context, unsigned long va,
558 unsigned long pmdval, int count);
559
560/* Add an HPTE to the hash table */
561extern void add_hash_page(unsigned context, unsigned long va,
562 unsigned long pmdval);
563
564/*
565 * Atomic PTE updates.
566 *
567 * pte_update clears and sets bit atomically, and returns
568 * the old pte value. In the 64-bit PTE case we lock around the
569 * low PTE word since we expect ALL flag bits to be there
570 */
571#ifndef CONFIG_PTE_64BIT
572static inline unsigned long pte_update(pte_t *p, unsigned long clr,
573 unsigned long set)
574{
575 unsigned long old, tmp;
576
577 __asm__ __volatile__("\
5781: lwarx %0,0,%3\n\
579 andc %1,%0,%4\n\
580 or %1,%1,%5\n"
581 PPC405_ERR77(0,%3)
582" stwcx. %1,0,%3\n\
583 bne- 1b"
584 : "=&r" (old), "=&r" (tmp), "=m" (*p)
585 : "r" (p), "r" (clr), "r" (set), "m" (*p)
586 : "cc" );
587 return old;
588}
589#else
590static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
591 unsigned long set)
592{
593 unsigned long long old;
594 unsigned long tmp;
595
596 __asm__ __volatile__("\
5971: lwarx %L0,0,%4\n\
598 lwzx %0,0,%3\n\
599 andc %1,%L0,%5\n\
600 or %1,%1,%6\n"
601 PPC405_ERR77(0,%3)
602" stwcx. %1,0,%4\n\
603 bne- 1b"
604 : "=&r" (old), "=&r" (tmp), "=m" (*p)
605 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
606 : "cc" );
607 return old;
608}
609#endif
610
611/*
612 * set_pte stores a linux PTE into the linux page table.
613 * On machines which use an MMU hash table we avoid changing the
614 * _PAGE_HASHPTE bit.
615 */
616static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
617 pte_t *ptep, pte_t pte)
618{
619#if _PAGE_HASHPTE != 0
620 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
621#else
622 *ptep = pte;
623#endif
624}
625
626/*
627 * 2.6 calles this without flushing the TLB entry, this is wrong
628 * for our hash-based implementation, we fix that up here
629 */
630#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
631static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
632{
633 unsigned long old;
634 old = pte_update(ptep, _PAGE_ACCESSED, 0);
635#if _PAGE_HASHPTE != 0
636 if (old & _PAGE_HASHPTE) {
637 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
638 flush_hash_pages(context, addr, ptephys, 1);
639 }
640#endif
641 return (old & _PAGE_ACCESSED) != 0;
642}
643#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
644 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
645
646#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
647static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
648 unsigned long addr, pte_t *ptep)
649{
650 return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
651}
652
653#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
654static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
655 pte_t *ptep)
656{
657 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
658}
659
660#define __HAVE_ARCH_PTEP_SET_WRPROTECT
661static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
662 pte_t *ptep)
663{
664 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
665}
666
667#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
668static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
669{
670 unsigned long bits = pte_val(entry) &
671 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
672 pte_update(ptep, 0, bits);
673}
674
675#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
676 do { \
677 __ptep_set_access_flags(__ptep, __entry, __dirty); \
678 flush_tlb_page_nohash(__vma, __address); \
679 } while(0)
680
681/*
682 * Macro to mark a page protection value as "uncacheable".
683 */
684#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
685
686struct file;
687extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
688 unsigned long size, pgprot_t vma_prot);
689#define __HAVE_PHYS_MEM_ACCESS_PROT
690
691#define __HAVE_ARCH_PTE_SAME
692#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
693
694/*
695 * Note that on Book E processors, the pmd contains the kernel virtual
696 * (lowmem) address of the pte page. The physical address is less useful
697 * because everything runs with translation enabled (even the TLB miss
698 * handler). On everything else the pmd contains the physical address
699 * of the pte page. -- paulus
700 */
701#ifndef CONFIG_BOOKE
702#define pmd_page_vaddr(pmd) \
703 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
704#define pmd_page(pmd) \
705 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
706#else
707#define pmd_page_vaddr(pmd) \
708 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
709#define pmd_page(pmd) \
710 (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT))
711#endif
712
713/* to find an entry in a kernel page-table-directory */
714#define pgd_offset_k(address) pgd_offset(&init_mm, address)
715
716/* to find an entry in a page-table-directory */
717#define pgd_index(address) ((address) >> PGDIR_SHIFT)
718#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
719
720/* Find an entry in the third-level page table.. */
721#define pte_index(address) \
722 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
723#define pte_offset_kernel(dir, addr) \
724 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
725#define pte_offset_map(dir, addr) \
726 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
727#define pte_offset_map_nested(dir, addr) \
728 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
729
730#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
731#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
732
733extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
734
735extern void paging_init(void);
736
737/*
738 * Encode and decode a swap entry.
739 * Note that the bits we use in a PTE for representing a swap entry
740 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
741 *_PAGE_HASHPTE bit (if used). -- paulus
742 */
743#define __swp_type(entry) ((entry).val & 0x1f)
744#define __swp_offset(entry) ((entry).val >> 5)
745#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
746#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
747#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
748
749/* Encode and decode a nonlinear file mapping entry */
750#define PTE_FILE_MAX_BITS 29
751#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
752#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
753
754/* CONFIG_APUS */
755/* For virtual address to physical address conversion */
756extern void cache_clear(__u32 addr, int length);
757extern void cache_push(__u32 addr, int length);
758extern int mm_end_of_chunk (unsigned long addr, int len);
759extern unsigned long iopa(unsigned long addr);
760extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
761
762/* Values for nocacheflag and cmode */
763/* These are not used by the APUS kernel_map, but prevents
764 compilation errors. */
765#define KERNELMAP_FULL_CACHING 0
766#define KERNELMAP_NOCACHE_SER 1
767#define KERNELMAP_NOCACHE_NONSER 2
768#define KERNELMAP_NO_COPYBACK 3
769
770/*
771 * Map some physical address range into the kernel address space.
772 */
773extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
774 int nocacheflag, unsigned long *memavailp );
775
776/*
777 * Set cache mode of (kernel space) address range.
778 */
779extern void kernel_set_cachemode (unsigned long address, unsigned long size,
780 unsigned int cmode);
781
782/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
783#define kern_addr_valid(addr) (1)
784
785#ifdef CONFIG_PHYS_64BIT
786extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
787 unsigned long paddr, unsigned long size, pgprot_t prot);
788
789static inline int io_remap_pfn_range(struct vm_area_struct *vma,
790 unsigned long vaddr,
791 unsigned long pfn,
792 unsigned long size,
793 pgprot_t prot)
794{
795 phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
796 return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
797}
798#else
799#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
800 remap_pfn_range(vma, vaddr, pfn, size, prot)
801#endif
802
803/*
804 * No page table caches to initialise
805 */
806#define pgtable_cache_init() do { } while (0)
807
808extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
809 pmd_t **pmdp);
810
811#endif /* !__ASSEMBLY__ */
812
813#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
new file mode 100644
index 000000000000..704c4e669fe0
--- /dev/null
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -0,0 +1,492 @@
1#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
2#define _ASM_POWERPC_PGTABLE_PPC64_H_
3/*
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
6 */
7
8#ifndef __ASSEMBLY__
9#include <linux/stddef.h>
10#include <asm/processor.h> /* For TASK_SIZE */
11#include <asm/mmu.h>
12#include <asm/page.h>
13#include <asm/tlbflush.h>
14struct mm_struct;
15#endif /* __ASSEMBLY__ */
16
17#ifdef CONFIG_PPC_64K_PAGES
18#include <asm/pgtable-64k.h>
19#else
20#include <asm/pgtable-4k.h>
21#endif
22
23#define FIRST_USER_ADDRESS 0
24
25/*
26 * Size of EA range mapped by our pagetables.
27 */
28#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
29 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
30#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
31
32#if TASK_SIZE_USER64 > PGTABLE_RANGE
33#error TASK_SIZE_USER64 exceeds pagetable range
34#endif
35
36#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
37#error TASK_SIZE_USER64 exceeds user VSID range
38#endif
39
40/*
41 * Define the address range of the vmalloc VM area.
42 */
43#define VMALLOC_START ASM_CONST(0xD000000000000000)
44#define VMALLOC_SIZE ASM_CONST(0x80000000000)
45#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
46
47/*
48 * Define the address range of the imalloc VM area.
49 */
50#define PHBS_IO_BASE VMALLOC_END
51#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
52#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
53
54/*
55 * Region IDs
56 */
57#define REGION_SHIFT 60UL
58#define REGION_MASK (0xfUL << REGION_SHIFT)
59#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
60
61#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
62#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
63#define USER_REGION_ID (0UL)
64
65/*
66 * Common bits in a linux-style PTE. These match the bits in the
67 * (hardware-defined) PowerPC PTE as closely as possible. Additional
68 * bits may be defined in pgtable-*.h
69 */
70#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
71#define _PAGE_USER 0x0002 /* matches one of the PP bits */
72#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
73#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
74#define _PAGE_GUARDED 0x0008
75#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
76#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
77#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
78#define _PAGE_DIRTY 0x0080 /* C: page changed */
79#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
80#define _PAGE_RW 0x0200 /* software: user write access allowed */
81#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
82#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
83
84#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
85
86#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
87
88/* __pgprot defined in asm-powerpc/page.h */
89#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
90
91#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
92#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
93#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
94#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
95#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
96#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
97#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
98#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
99 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
100#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
101
102#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
103#define HAVE_PAGE_AGP
104
105/* PTEIDX nibble */
106#define _PTEIDX_SECONDARY 0x8
107#define _PTEIDX_GROUP_IX 0x7
108
109
110/*
111 * POWER4 and newer have per page execute protection, older chips can only
112 * do this on a segment (256MB) basis.
113 *
114 * Also, write permissions imply read permissions.
115 * This is the closest we can get..
116 *
117 * Note due to the way vm flags are laid out, the bits are XWR
118 */
119#define __P000 PAGE_NONE
120#define __P001 PAGE_READONLY
121#define __P010 PAGE_COPY
122#define __P011 PAGE_COPY
123#define __P100 PAGE_READONLY_X
124#define __P101 PAGE_READONLY_X
125#define __P110 PAGE_COPY_X
126#define __P111 PAGE_COPY_X
127
128#define __S000 PAGE_NONE
129#define __S001 PAGE_READONLY
130#define __S010 PAGE_SHARED
131#define __S011 PAGE_SHARED
132#define __S100 PAGE_READONLY_X
133#define __S101 PAGE_READONLY_X
134#define __S110 PAGE_SHARED_X
135#define __S111 PAGE_SHARED_X
136
137#ifndef __ASSEMBLY__
138
139/*
140 * ZERO_PAGE is a global shared page that is always zero: used
141 * for zero-mapped memory areas etc..
142 */
143extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
144#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
145#endif /* __ASSEMBLY__ */
146
147#ifdef CONFIG_HUGETLB_PAGE
148
149#define HAVE_ARCH_UNMAPPED_AREA
150#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
151
152#endif
153
154#ifndef __ASSEMBLY__
155
156/*
157 * Conversion functions: convert a page and protection to a page entry,
158 * and a page entry and page directory to the page they refer to.
159 *
160 * mk_pte takes a (struct page *) as input
161 */
162#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
163
164static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
165{
166 pte_t pte;
167
168
169 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
170 return pte;
171}
172
173#define pte_modify(_pte, newprot) \
174 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
175
176#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
177#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
178
179/* pte_clear moved to later in this file */
180
181#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
182#define pte_page(x) pfn_to_page(pte_pfn(x))
183
184#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
185#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
186
187#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
188#define pmd_none(pmd) (!pmd_val(pmd))
189#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
190 || (pmd_val(pmd) & PMD_BAD_BITS))
191#define pmd_present(pmd) (pmd_val(pmd) != 0)
192#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
193#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
194#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
195
196#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
197#define pud_none(pud) (!pud_val(pud))
198#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
199 || (pud_val(pud) & PUD_BAD_BITS))
200#define pud_present(pud) (pud_val(pud) != 0)
201#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
202#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
203#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
204
205#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
206
207/*
208 * Find an entry in a page-table-directory. We combine the address region
209 * (the high order N bits) and the pgd portion of the address.
210 */
211/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
212#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
213
214#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
215
216#define pmd_offset(pudp,addr) \
217 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
218
219#define pte_offset_kernel(dir,addr) \
220 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
221
222#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
223#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
224#define pte_unmap(pte) do { } while(0)
225#define pte_unmap_nested(pte) do { } while(0)
226
227/* to find an entry in a kernel page-table-directory */
228/* This now only contains the vmalloc pages */
229#define pgd_offset_k(address) pgd_offset(&init_mm, address)
230
231/*
232 * The following only work if pte_present() is true.
233 * Undefined behaviour if not..
234 */
235static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
236static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
237static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
238static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
239static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
240static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
241
242static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
243static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
244
245static inline pte_t pte_rdprotect(pte_t pte) {
246 pte_val(pte) &= ~_PAGE_USER; return pte; }
247static inline pte_t pte_exprotect(pte_t pte) {
248 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
249static inline pte_t pte_wrprotect(pte_t pte) {
250 pte_val(pte) &= ~(_PAGE_RW); return pte; }
251static inline pte_t pte_mkclean(pte_t pte) {
252 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
253static inline pte_t pte_mkold(pte_t pte) {
254 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
255static inline pte_t pte_mkread(pte_t pte) {
256 pte_val(pte) |= _PAGE_USER; return pte; }
257static inline pte_t pte_mkexec(pte_t pte) {
258 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
259static inline pte_t pte_mkwrite(pte_t pte) {
260 pte_val(pte) |= _PAGE_RW; return pte; }
261static inline pte_t pte_mkdirty(pte_t pte) {
262 pte_val(pte) |= _PAGE_DIRTY; return pte; }
263static inline pte_t pte_mkyoung(pte_t pte) {
264 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
265static inline pte_t pte_mkhuge(pte_t pte) {
266 return pte; }
267
268/* Atomic PTE updates */
269static inline unsigned long pte_update(struct mm_struct *mm,
270 unsigned long addr,
271 pte_t *ptep, unsigned long clr,
272 int huge)
273{
274 unsigned long old, tmp;
275
276 __asm__ __volatile__(
277 "1: ldarx %0,0,%3 # pte_update\n\
278 andi. %1,%0,%6\n\
279 bne- 1b \n\
280 andc %1,%0,%4 \n\
281 stdcx. %1,0,%3 \n\
282 bne- 1b"
283 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
284 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
285 : "cc" );
286
287 if (old & _PAGE_HASHPTE)
288 hpte_need_flush(mm, addr, ptep, old, huge);
289 return old;
290}
291
292static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
293 unsigned long addr, pte_t *ptep)
294{
295 unsigned long old;
296
297 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
298 return 0;
299 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
300 return (old & _PAGE_ACCESSED) != 0;
301}
302#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
303#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
304({ \
305 int __r; \
306 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
307 __r; \
308})
309
310/*
311 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
312 * moment we always flush but we need to fix hpte_update and test if the
313 * optimisation is worth it.
314 */
315static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
316 unsigned long addr, pte_t *ptep)
317{
318 unsigned long old;
319
320 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
321 return 0;
322 old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
323 return (old & _PAGE_DIRTY) != 0;
324}
325#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
326#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
327({ \
328 int __r; \
329 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
330 __r; \
331})
332
333#define __HAVE_ARCH_PTEP_SET_WRPROTECT
334static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
335 pte_t *ptep)
336{
337 unsigned long old;
338
339 if ((pte_val(*ptep) & _PAGE_RW) == 0)
340 return;
341 old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
342}
343
344/*
345 * We currently remove entries from the hashtable regardless of whether
346 * the entry was young or dirty. The generic routines only flush if the
347 * entry was young or dirty which is not good enough.
348 *
349 * We should be more intelligent about this but for the moment we override
350 * these functions and force a tlb flush unconditionally
351 */
352#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
353#define ptep_clear_flush_young(__vma, __address, __ptep) \
354({ \
355 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
356 __ptep); \
357 __young; \
358})
359
360#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
361#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
362({ \
363 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
364 __ptep); \
365 __dirty; \
366})
367
368#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
369static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
370 unsigned long addr, pte_t *ptep)
371{
372 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
373 return __pte(old);
374}
375
376static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
377 pte_t * ptep)
378{
379 pte_update(mm, addr, ptep, ~0UL, 0);
380}
381
382/*
383 * set_pte stores a linux PTE into the linux page table.
384 */
385static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
386 pte_t *ptep, pte_t pte)
387{
388 if (pte_present(*ptep))
389 pte_clear(mm, addr, ptep);
390 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
391 *ptep = pte;
392}
393
394/* Set the dirty and/or accessed bits atomically in a linux PTE, this
395 * function doesn't need to flush the hash entry
396 */
397#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
398static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
399{
400 unsigned long bits = pte_val(entry) &
401 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
402 unsigned long old, tmp;
403
404 __asm__ __volatile__(
405 "1: ldarx %0,0,%4\n\
406 andi. %1,%0,%6\n\
407 bne- 1b \n\
408 or %0,%3,%0\n\
409 stdcx. %0,0,%4\n\
410 bne- 1b"
411 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
412 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
413 :"cc");
414}
415#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
416 do { \
417 __ptep_set_access_flags(__ptep, __entry, __dirty); \
418 flush_tlb_page_nohash(__vma, __address); \
419 } while(0)
420
421/*
422 * Macro to mark a page protection value as "uncacheable".
423 */
424#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
425
426struct file;
427extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
428 unsigned long size, pgprot_t vma_prot);
429#define __HAVE_PHYS_MEM_ACCESS_PROT
430
431#define __HAVE_ARCH_PTE_SAME
432#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
433
434#define pte_ERROR(e) \
435 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
436#define pmd_ERROR(e) \
437 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
438#define pgd_ERROR(e) \
439 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
440
441extern pgd_t swapper_pg_dir[];
442
443extern void paging_init(void);
444
445/* Encode and de-code a swap entry */
446#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
447#define __swp_offset(entry) ((entry).val >> 8)
448#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
449#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
450#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
451#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
452#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
453#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
454
455/*
456 * kern_addr_valid is intended to indicate whether an address is a valid
457 * kernel address. Most 32-bit archs define it as always true (like this)
458 * but most 64-bit archs actually perform a test. What should we do here?
459 * The only use is in fs/ncpfs/dir.c
460 */
461#define kern_addr_valid(addr) (1)
462
463#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
464 remap_pfn_range(vma, vaddr, pfn, size, prot)
465
466void pgtable_cache_init(void);
467
468/*
469 * find_linux_pte returns the address of a linux pte for a given
470 * effective address and directory. If not found, it returns zero.
471 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
472{
473 pgd_t *pg;
474 pud_t *pu;
475 pmd_t *pm;
476 pte_t *pt = NULL;
477
478 pg = pgdir + pgd_index(ea);
479 if (!pgd_none(*pg)) {
480 pu = pud_offset(pg, ea);
481 if (!pud_none(*pu)) {
482 pm = pmd_offset(pu, ea);
483 if (pmd_present(*pm))
484 pt = pte_offset_kernel(pm, ea);
485 }
486 }
487 return pt;
488}
489
490#endif /* __ASSEMBLY__ */
491
492#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 19edb6982b81..78bf4ae712a6 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -2,502 +2,15 @@
2#define _ASM_POWERPC_PGTABLE_H 2#define _ASM_POWERPC_PGTABLE_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#ifndef CONFIG_PPC64 5#if defined(CONFIG_PPC64)
6#include <asm-ppc/pgtable.h> 6# include <asm/pgtable-ppc64.h>
7#else 7#else
8 8# include <asm/pgtable-ppc32.h>
9/*
10 * This file contains the functions and defines necessary to modify and use
11 * the ppc64 hashed page table.
12 */
13
14#ifndef __ASSEMBLY__
15#include <linux/stddef.h>
16#include <asm/processor.h> /* For TASK_SIZE */
17#include <asm/mmu.h>
18#include <asm/page.h>
19#include <asm/tlbflush.h>
20struct mm_struct;
21#endif /* __ASSEMBLY__ */
22
23#ifdef CONFIG_PPC_64K_PAGES
24#include <asm/pgtable-64k.h>
25#else
26#include <asm/pgtable-4k.h>
27#endif
28
29#define FIRST_USER_ADDRESS 0
30
31/*
32 * Size of EA range mapped by our pagetables.
33 */
34#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
35 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
36#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
37
38#if TASK_SIZE_USER64 > PGTABLE_RANGE
39#error TASK_SIZE_USER64 exceeds pagetable range
40#endif
41
42#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
43#error TASK_SIZE_USER64 exceeds user VSID range
44#endif 9#endif
45 10
46/*
47 * Define the address range of the vmalloc VM area.
48 */
49#define VMALLOC_START ASM_CONST(0xD000000000000000)
50#define VMALLOC_SIZE ASM_CONST(0x80000000000)
51#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
52
53/*
54 * Define the address range of the imalloc VM area.
55 */
56#define PHBS_IO_BASE VMALLOC_END
57#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
58#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
59
60/*
61 * Region IDs
62 */
63#define REGION_SHIFT 60UL
64#define REGION_MASK (0xfUL << REGION_SHIFT)
65#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
66
67#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
68#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
69#define USER_REGION_ID (0UL)
70
71/*
72 * Common bits in a linux-style PTE. These match the bits in the
73 * (hardware-defined) PowerPC PTE as closely as possible. Additional
74 * bits may be defined in pgtable-*.h
75 */
76#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
77#define _PAGE_USER 0x0002 /* matches one of the PP bits */
78#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
79#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
80#define _PAGE_GUARDED 0x0008
81#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
82#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
83#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
84#define _PAGE_DIRTY 0x0080 /* C: page changed */
85#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
86#define _PAGE_RW 0x0200 /* software: user write access allowed */
87#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
88#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
89
90#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
91
92#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
93
94/* __pgprot defined in asm-powerpc/page.h */
95#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
96
97#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
98#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
99#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
100#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
101#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
102#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
103#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
104#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
105 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
106#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
107
108#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
109#define HAVE_PAGE_AGP
110
111/* PTEIDX nibble */
112#define _PTEIDX_SECONDARY 0x8
113#define _PTEIDX_GROUP_IX 0x7
114
115
116/*
117 * POWER4 and newer have per page execute protection, older chips can only
118 * do this on a segment (256MB) basis.
119 *
120 * Also, write permissions imply read permissions.
121 * This is the closest we can get..
122 *
123 * Note due to the way vm flags are laid out, the bits are XWR
124 */
125#define __P000 PAGE_NONE
126#define __P001 PAGE_READONLY
127#define __P010 PAGE_COPY
128#define __P011 PAGE_COPY
129#define __P100 PAGE_READONLY_X
130#define __P101 PAGE_READONLY_X
131#define __P110 PAGE_COPY_X
132#define __P111 PAGE_COPY_X
133
134#define __S000 PAGE_NONE
135#define __S001 PAGE_READONLY
136#define __S010 PAGE_SHARED
137#define __S011 PAGE_SHARED
138#define __S100 PAGE_READONLY_X
139#define __S101 PAGE_READONLY_X
140#define __S110 PAGE_SHARED_X
141#define __S111 PAGE_SHARED_X
142
143#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
144
145/*
146 * ZERO_PAGE is a global shared page that is always zero: used
147 * for zero-mapped memory areas etc..
148 */
149extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
150#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
151#endif /* __ASSEMBLY__ */
152
153#ifdef CONFIG_HUGETLB_PAGE
154
155#define HAVE_ARCH_UNMAPPED_AREA
156#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
157
158#endif
159
160#ifndef __ASSEMBLY__
161
162/*
163 * Conversion functions: convert a page and protection to a page entry,
164 * and a page entry and page directory to the page they refer to.
165 *
166 * mk_pte takes a (struct page *) as input
167 */
168#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
169
170static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
171{
172 pte_t pte;
173
174
175 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
176 return pte;
177}
178
179#define pte_modify(_pte, newprot) \
180 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
181
182#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
183#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
184
185/* pte_clear moved to later in this file */
186
187#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
188#define pte_page(x) pfn_to_page(pte_pfn(x))
189
190#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
191#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
192
193#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
194#define pmd_none(pmd) (!pmd_val(pmd))
195#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
196 || (pmd_val(pmd) & PMD_BAD_BITS))
197#define pmd_present(pmd) (pmd_val(pmd) != 0)
198#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
199#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
200#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
201
202#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
203#define pud_none(pud) (!pud_val(pud))
204#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
205 || (pud_val(pud) & PUD_BAD_BITS))
206#define pud_present(pud) (pud_val(pud) != 0)
207#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
208#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
209#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
210
211#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
212
213/*
214 * Find an entry in a page-table-directory. We combine the address region
215 * (the high order N bits) and the pgd portion of the address.
216 */
217/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
218#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
219
220#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
221
222#define pmd_offset(pudp,addr) \
223 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
224
225#define pte_offset_kernel(dir,addr) \
226 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
227
228#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
229#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
230#define pte_unmap(pte) do { } while(0)
231#define pte_unmap_nested(pte) do { } while(0)
232
233/* to find an entry in a kernel page-table-directory */
234/* This now only contains the vmalloc pages */
235#define pgd_offset_k(address) pgd_offset(&init_mm, address)
236
237/*
238 * The following only work if pte_present() is true.
239 * Undefined behaviour if not..
240 */
241static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
242static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
243static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
244static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
245static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
246static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
247
248static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
249static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
250
251static inline pte_t pte_rdprotect(pte_t pte) {
252 pte_val(pte) &= ~_PAGE_USER; return pte; }
253static inline pte_t pte_exprotect(pte_t pte) {
254 pte_val(pte) &= ~_PAGE_EXEC; return pte; }
255static inline pte_t pte_wrprotect(pte_t pte) {
256 pte_val(pte) &= ~(_PAGE_RW); return pte; }
257static inline pte_t pte_mkclean(pte_t pte) {
258 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
259static inline pte_t pte_mkold(pte_t pte) {
260 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
261static inline pte_t pte_mkread(pte_t pte) {
262 pte_val(pte) |= _PAGE_USER; return pte; }
263static inline pte_t pte_mkexec(pte_t pte) {
264 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
265static inline pte_t pte_mkwrite(pte_t pte) {
266 pte_val(pte) |= _PAGE_RW; return pte; }
267static inline pte_t pte_mkdirty(pte_t pte) {
268 pte_val(pte) |= _PAGE_DIRTY; return pte; }
269static inline pte_t pte_mkyoung(pte_t pte) {
270 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
271static inline pte_t pte_mkhuge(pte_t pte) {
272 return pte; }
273
274/* Atomic PTE updates */
275static inline unsigned long pte_update(struct mm_struct *mm,
276 unsigned long addr,
277 pte_t *ptep, unsigned long clr,
278 int huge)
279{
280 unsigned long old, tmp;
281
282 __asm__ __volatile__(
283 "1: ldarx %0,0,%3 # pte_update\n\
284 andi. %1,%0,%6\n\
285 bne- 1b \n\
286 andc %1,%0,%4 \n\
287 stdcx. %1,0,%3 \n\
288 bne- 1b"
289 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
290 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
291 : "cc" );
292
293 if (old & _PAGE_HASHPTE)
294 hpte_need_flush(mm, addr, ptep, old, huge);
295 return old;
296}
297
298static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
299 unsigned long addr, pte_t *ptep)
300{
301 unsigned long old;
302
303 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
304 return 0;
305 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
306 return (old & _PAGE_ACCESSED) != 0;
307}
308#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
309#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
310({ \
311 int __r; \
312 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
313 __r; \
314})
315
316/*
317 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
318 * moment we always flush but we need to fix hpte_update and test if the
319 * optimisation is worth it.
320 */
321static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
322 unsigned long addr, pte_t *ptep)
323{
324 unsigned long old;
325
326 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
327 return 0;
328 old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
329 return (old & _PAGE_DIRTY) != 0;
330}
331#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
332#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
333({ \
334 int __r; \
335 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
336 __r; \
337})
338
339#define __HAVE_ARCH_PTEP_SET_WRPROTECT
340static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
341 pte_t *ptep)
342{
343 unsigned long old;
344
345 if ((pte_val(*ptep) & _PAGE_RW) == 0)
346 return;
347 old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
348}
349
350/*
351 * We currently remove entries from the hashtable regardless of whether
352 * the entry was young or dirty. The generic routines only flush if the
353 * entry was young or dirty which is not good enough.
354 *
355 * We should be more intelligent about this but for the moment we override
356 * these functions and force a tlb flush unconditionally
357 */
358#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
359#define ptep_clear_flush_young(__vma, __address, __ptep) \
360({ \
361 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
362 __ptep); \
363 __young; \
364})
365
366#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
367#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
368({ \
369 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
370 __ptep); \
371 __dirty; \
372})
373
374#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
375static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
376 unsigned long addr, pte_t *ptep)
377{
378 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
379 return __pte(old);
380}
381
382static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
383 pte_t * ptep)
384{
385 pte_update(mm, addr, ptep, ~0UL, 0);
386}
387
388/*
389 * set_pte stores a linux PTE into the linux page table.
390 */
391static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
392 pte_t *ptep, pte_t pte)
393{
394 if (pte_present(*ptep))
395 pte_clear(mm, addr, ptep);
396 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
397 *ptep = pte;
398}
399
400/* Set the dirty and/or accessed bits atomically in a linux PTE, this
401 * function doesn't need to flush the hash entry
402 */
403#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
404static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
405{
406 unsigned long bits = pte_val(entry) &
407 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
408 unsigned long old, tmp;
409
410 __asm__ __volatile__(
411 "1: ldarx %0,0,%4\n\
412 andi. %1,%0,%6\n\
413 bne- 1b \n\
414 or %0,%3,%0\n\
415 stdcx. %0,0,%4\n\
416 bne- 1b"
417 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
418 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
419 :"cc");
420}
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422 do { \
423 __ptep_set_access_flags(__ptep, __entry, __dirty); \
424 flush_tlb_page_nohash(__vma, __address); \
425 } while(0)
426
427/*
428 * Macro to mark a page protection value as "uncacheable".
429 */
430#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
431
432struct file;
433extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
434 unsigned long size, pgprot_t vma_prot);
435#define __HAVE_PHYS_MEM_ACCESS_PROT
436
437#define __HAVE_ARCH_PTE_SAME
438#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
439
440#define pte_ERROR(e) \
441 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
442#define pmd_ERROR(e) \
443 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
444#define pgd_ERROR(e) \
445 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
446
447extern pgd_t swapper_pg_dir[];
448
449extern void paging_init(void);
450
451/* Encode and de-code a swap entry */
452#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
453#define __swp_offset(entry) ((entry).val >> 8)
454#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
455#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
456#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
457#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
458#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
459#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
460
461/*
462 * kern_addr_valid is intended to indicate whether an address is a valid
463 * kernel address. Most 32-bit archs define it as always true (like this)
464 * but most 64-bit archs actually perform a test. What should we do here?
465 * The only use is in fs/ncpfs/dir.c
466 */
467#define kern_addr_valid(addr) (1)
468
469#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
470 remap_pfn_range(vma, vaddr, pfn, size, prot)
471
472void pgtable_cache_init(void);
473
474/*
475 * find_linux_pte returns the address of a linux pte for a given
476 * effective address and directory. If not found, it returns zero.
477 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
478{
479 pgd_t *pg;
480 pud_t *pu;
481 pmd_t *pm;
482 pte_t *pt = NULL;
483
484 pg = pgdir + pgd_index(ea);
485 if (!pgd_none(*pg)) {
486 pu = pud_offset(pg, ea);
487 if (!pud_none(*pu)) {
488 pm = pmd_offset(pu, ea);
489 if (pmd_present(*pm))
490 pt = pte_offset_kernel(pm, ea);
491 }
492 }
493 return pt;
494}
495
496
497#include <asm-generic/pgtable.h> 12#include <asm-generic/pgtable.h>
498
499#endif /* __ASSEMBLY__ */ 13#endif /* __ASSEMBLY__ */
500 14
501#endif /* CONFIG_PPC64 */
502#endif /* __KERNEL__ */ 15#endif /* __KERNEL__ */
503#endif /* _ASM_POWERPC_PGTABLE_H */ 16#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
index d3599cc9aa74..d43d91beba9b 100644
--- a/include/asm-powerpc/pmac_feature.h
+++ b/include/asm-powerpc/pmac_feature.h
@@ -146,7 +146,7 @@ struct device_node;
146static inline long pmac_call_feature(int selector, struct device_node* node, 146static inline long pmac_call_feature(int selector, struct device_node* node,
147 long param, long value) 147 long param, long value)
148{ 148{
149 if (!ppc_md.feature_call) 149 if (!ppc_md.feature_call || !machine_is(powermac))
150 return -ENODEV; 150 return -ENODEV;
151 return ppc_md.feature_call(selector, node, param, value); 151 return ppc_md.feature_call(selector, node, param, value);
152} 152}
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index ec400f608e16..6845af93ba91 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -20,7 +20,6 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/atomic.h> 22#include <asm/atomic.h>
23#include <asm/io.h>
24 23
25/* Definitions used by the flattened device tree */ 24/* Definitions used by the flattened device tree */
26#define OF_DT_HEADER 0xd00dfeed /* marker */ 25#define OF_DT_HEADER 0xd00dfeed /* marker */
@@ -334,30 +333,17 @@ extern int of_irq_map_one(struct device_node *device, int index,
334struct pci_dev; 333struct pci_dev;
335extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); 334extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
336 335
337static inline int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) 336extern int of_irq_to_resource(struct device_node *dev, int index,
338{ 337 struct resource *r);
339 int irq = irq_of_parse_and_map(dev, index);
340
341 /* Only dereference the resource if both the
342 * resource and the irq are valid. */
343 if (r && irq != NO_IRQ) {
344 r->start = r->end = irq;
345 r->flags = IORESOURCE_IRQ;
346 }
347
348 return irq;
349}
350
351static inline void __iomem *of_iomap(struct device_node *np, int index)
352{
353 struct resource res;
354
355 if (of_address_to_resource(np, index, &res))
356 return NULL;
357
358 return ioremap(res.start, 1 + res.end - res.start);
359}
360 338
339/**
340 * of_iomap - Maps the memory mapped IO for a given device_node
341 * @device: the device whose io range will be mapped
342 * @index: index of the io range
343 *
344 * Returns a pointer to the mapped memory
345 */
346extern void __iomem *of_iomap(struct device_node *device, int index);
361 347
362#endif /* __KERNEL__ */ 348#endif /* __KERNEL__ */
363#endif /* _POWERPC_PROM_H */ 349#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
index 821581a8b643..13c372df99e8 100644
--- a/include/asm-powerpc/ps3.h
+++ b/include/asm-powerpc/ps3.h
@@ -167,26 +167,31 @@ enum ps3_cpu_binding {
167 PS3_BINDING_CPU_1 = 1, 167 PS3_BINDING_CPU_1 = 1,
168}; 168};
169 169
170int ps3_alloc_io_irq(enum ps3_cpu_binding cpu, unsigned int interrupt_id, 170int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
171 unsigned int *virq); 171 unsigned int *virq);
172int ps3_free_io_irq(unsigned int virq); 172int ps3_virq_destroy(unsigned int virq);
173int ps3_alloc_event_irq(enum ps3_cpu_binding cpu, unsigned int *virq); 173int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
174int ps3_free_event_irq(unsigned int virq); 174 unsigned int *virq);
175int ps3_irq_plug_destroy(unsigned int virq);
176int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
177int ps3_event_receive_port_destroy(unsigned int virq);
175int ps3_send_event_locally(unsigned int virq); 178int ps3_send_event_locally(unsigned int virq);
176int ps3_connect_event_irq(enum ps3_cpu_binding cpu, 179
177 const struct ps3_device_id *did, unsigned int interrupt_id, 180int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
178 unsigned int *virq); 181 unsigned int *virq);
179int ps3_disconnect_event_irq(const struct ps3_device_id *did, 182int ps3_io_irq_destroy(unsigned int virq);
180 unsigned int interrupt_id, unsigned int virq); 183int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
181int ps3_alloc_vuart_irq(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
182 unsigned int *virq); 184 unsigned int *virq);
183int ps3_free_vuart_irq(unsigned int virq); 185int ps3_vuart_irq_destroy(unsigned int virq);
184int ps3_alloc_spe_irq(enum ps3_cpu_binding cpu, unsigned long spe_id, 186int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
185 unsigned int class, unsigned int *virq); 187 unsigned int class, unsigned int *virq);
186int ps3_free_spe_irq(unsigned int virq); 188int ps3_spe_irq_destroy(unsigned int virq);
187int ps3_alloc_irq(enum ps3_cpu_binding cpu, unsigned long outlet, 189
190int ps3_sb_event_receive_port_setup(enum ps3_cpu_binding cpu,
191 const struct ps3_device_id *did, unsigned int interrupt_id,
188 unsigned int *virq); 192 unsigned int *virq);
189int ps3_free_irq(unsigned int virq); 193int ps3_sb_event_receive_port_destroy(const struct ps3_device_id *did,
194 unsigned int interrupt_id, unsigned int virq);
190 195
191/* lv1 result codes */ 196/* lv1 result codes */
192 197
diff --git a/include/asm-powerpc/suspend.h b/include/asm-powerpc/suspend.h
new file mode 100644
index 000000000000..cbf2c9404c37
--- /dev/null
+++ b/include/asm-powerpc/suspend.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_POWERPC_SUSPEND_H
2#define __ASM_POWERPC_SUSPEND_H
3
4static inline int arch_prepare_suspend(void) { return 0; }
5
6void save_processor_state(void);
7void restore_processor_state(void);
8
9#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d3e0906ff2bc..09621f611dbc 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -7,7 +7,6 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8 8
9#include <asm/hw_irq.h> 9#include <asm/hw_irq.h>
10#include <asm/atomic.h>
11 10
12/* 11/*
13 * Memory barrier. 12 * Memory barrier.
@@ -227,6 +226,29 @@ __xchg_u32(volatile void *p, unsigned long val)
227 return prev; 226 return prev;
228} 227}
229 228
229/*
230 * Atomic exchange
231 *
232 * Changes the memory location '*ptr' to be val and returns
233 * the previous value stored there.
234 */
235static __inline__ unsigned long
236__xchg_u32_local(volatile void *p, unsigned long val)
237{
238 unsigned long prev;
239
240 __asm__ __volatile__(
241"1: lwarx %0,0,%2 \n"
242 PPC405_ERR77(0,%2)
243" stwcx. %3,0,%2 \n\
244 bne- 1b"
245 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
246 : "r" (p), "r" (val)
247 : "cc", "memory");
248
249 return prev;
250}
251
230#ifdef CONFIG_PPC64 252#ifdef CONFIG_PPC64
231static __inline__ unsigned long 253static __inline__ unsigned long
232__xchg_u64(volatile void *p, unsigned long val) 254__xchg_u64(volatile void *p, unsigned long val)
@@ -246,6 +268,23 @@ __xchg_u64(volatile void *p, unsigned long val)
246 268
247 return prev; 269 return prev;
248} 270}
271
272static __inline__ unsigned long
273__xchg_u64_local(volatile void *p, unsigned long val)
274{
275 unsigned long prev;
276
277 __asm__ __volatile__(
278"1: ldarx %0,0,%2 \n"
279 PPC405_ERR77(0,%2)
280" stdcx. %3,0,%2 \n\
281 bne- 1b"
282 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
283 : "r" (p), "r" (val)
284 : "cc", "memory");
285
286 return prev;
287}
249#endif 288#endif
250 289
251/* 290/*
@@ -269,13 +308,32 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size)
269 return x; 308 return x;
270} 309}
271 310
311static __inline__ unsigned long
312__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
313{
314 switch (size) {
315 case 4:
316 return __xchg_u32_local(ptr, x);
317#ifdef CONFIG_PPC64
318 case 8:
319 return __xchg_u64_local(ptr, x);
320#endif
321 }
322 __xchg_called_with_bad_pointer();
323 return x;
324}
272#define xchg(ptr,x) \ 325#define xchg(ptr,x) \
273 ({ \ 326 ({ \
274 __typeof__(*(ptr)) _x_ = (x); \ 327 __typeof__(*(ptr)) _x_ = (x); \
275 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 328 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
276 }) 329 })
277 330
278#define tas(ptr) (xchg((ptr),1)) 331#define xchg_local(ptr,x) \
332 ({ \
333 __typeof__(*(ptr)) _x_ = (x); \
334 (__typeof__(*(ptr))) __xchg_local((ptr), \
335 (unsigned long)_x_, sizeof(*(ptr))); \
336 })
279 337
280/* 338/*
281 * Compare and exchange - if *p == old, set it to new, 339 * Compare and exchange - if *p == old, set it to new,
@@ -306,6 +364,28 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
306 return prev; 364 return prev;
307} 365}
308 366
367static __inline__ unsigned long
368__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
369 unsigned long new)
370{
371 unsigned int prev;
372
373 __asm__ __volatile__ (
374"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
375 cmpw 0,%0,%3\n\
376 bne- 2f\n"
377 PPC405_ERR77(0,%2)
378" stwcx. %4,0,%2\n\
379 bne- 1b"
380 "\n\
3812:"
382 : "=&r" (prev), "+m" (*p)
383 : "r" (p), "r" (old), "r" (new)
384 : "cc", "memory");
385
386 return prev;
387}
388
309#ifdef CONFIG_PPC64 389#ifdef CONFIG_PPC64
310static __inline__ unsigned long 390static __inline__ unsigned long
311__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) 391__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -328,6 +408,27 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
328 408
329 return prev; 409 return prev;
330} 410}
411
412static __inline__ unsigned long
413__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
414 unsigned long new)
415{
416 unsigned long prev;
417
418 __asm__ __volatile__ (
419"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
420 cmpd 0,%0,%3\n\
421 bne- 2f\n\
422 stdcx. %4,0,%2\n\
423 bne- 1b"
424 "\n\
4252:"
426 : "=&r" (prev), "+m" (*p)
427 : "r" (p), "r" (old), "r" (new)
428 : "cc", "memory");
429
430 return prev;
431}
331#endif 432#endif
332 433
333/* This function doesn't exist, so you'll get a linker error 434/* This function doesn't exist, so you'll get a linker error
@@ -350,6 +451,22 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
350 return old; 451 return old;
351} 452}
352 453
454static __inline__ unsigned long
455__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
456 unsigned int size)
457{
458 switch (size) {
459 case 4:
460 return __cmpxchg_u32_local(ptr, old, new);
461#ifdef CONFIG_PPC64
462 case 8:
463 return __cmpxchg_u64_local(ptr, old, new);
464#endif
465 }
466 __cmpxchg_called_with_bad_pointer();
467 return old;
468}
469
353#define cmpxchg(ptr,o,n) \ 470#define cmpxchg(ptr,o,n) \
354 ({ \ 471 ({ \
355 __typeof__(*(ptr)) _o_ = (o); \ 472 __typeof__(*(ptr)) _o_ = (o); \
@@ -358,6 +475,15 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
358 (unsigned long)_n_, sizeof(*(ptr))); \ 475 (unsigned long)_n_, sizeof(*(ptr))); \
359 }) 476 })
360 477
478
479#define cmpxchg_local(ptr,o,n) \
480 ({ \
481 __typeof__(*(ptr)) _o_ = (o); \
482 __typeof__(*(ptr)) _n_ = (n); \
483 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
484 (unsigned long)_n_, sizeof(*(ptr))); \
485 })
486
361#ifdef CONFIG_PPC64 487#ifdef CONFIG_PPC64
362/* 488/*
363 * We handle most unaligned accesses in hardware. On the other hand 489 * We handle most unaligned accesses in hardware. On the other hand
diff --git a/include/asm-powerpc/tsi108.h b/include/asm-powerpc/tsi108.h
index 4e95d153be84..f8b60793b7a9 100644
--- a/include/asm-powerpc/tsi108.h
+++ b/include/asm-powerpc/tsi108.h
@@ -68,8 +68,17 @@
68#define TSI108_PB_ERRCS_ES (1 << 1) 68#define TSI108_PB_ERRCS_ES (1 << 1)
69#define TSI108_PB_ISR_PBS_RD_ERR (1 << 8) 69#define TSI108_PB_ISR_PBS_RD_ERR (1 << 8)
70 70
71#define TSI108_PCI_CFG_BASE_PHYS (0xfb000000)
72#define TSI108_PCI_CFG_SIZE (0x01000000) 71#define TSI108_PCI_CFG_SIZE (0x01000000)
72
73/*
74 * PHY Configuration Options
75 *
76 * Specify "bcm54xx" in the compatible property of your device tree phy
77 * nodes if your board uses the Broadcom PHYs
78 */
79#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
80#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */
81
73/* Global variables */ 82/* Global variables */
74 83
75extern u32 tsi108_pci_cfg_base; 84extern u32 tsi108_pci_cfg_base;
@@ -93,6 +102,7 @@ typedef struct {
93 u16 phy; /* phy address */ 102 u16 phy; /* phy address */
94 u16 irq_num; /* irq number */ 103 u16 irq_num; /* irq number */
95 u8 mac_addr[6]; /* phy mac address */ 104 u8 mac_addr[6]; /* phy mac address */
105 u16 phy_type; /* type of phy on board */
96} hw_info; 106} hw_info;
97 107
98extern u32 get_vir_csrbase(void); 108extern u32 get_vir_csrbase(void);
diff --git a/include/asm-powerpc/tsi108_pci.h b/include/asm-powerpc/tsi108_pci.h
new file mode 100644
index 000000000000..a9f92f73232c
--- /dev/null
+++ b/include/asm-powerpc/tsi108_pci.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2007 IBM Corp
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
18 * MA 02111-1307 USA
19 */
20
21#ifndef _ASM_PPC_TSI108_PCI_H
22#define _ASM_PPC_TSI108_PCI_H
23
24#include <asm/tsi108.h>
25
26/* Register definitions */
27#define TSI108_PCI_P2O_BAR0 (TSI108_PCI_OFFSET + 0x10)
28#define TSI108_PCI_P2O_BAR0_UPPER (TSI108_PCI_OFFSET + 0x14)
29#define TSI108_PCI_P2O_BAR2 (TSI108_PCI_OFFSET + 0x18)
30#define TSI108_PCI_P2O_BAR2_UPPER (TSI108_PCI_OFFSET + 0x1c)
31#define TSI108_PCI_P2O_PAGE_SIZES (TSI108_PCI_OFFSET + 0x4c)
32#define TSI108_PCI_PFAB_BAR0 (TSI108_PCI_OFFSET + 0x204)
33#define TSI108_PCI_PFAB_BAR0_UPPER (TSI108_PCI_OFFSET + 0x208)
34#define TSI108_PCI_PFAB_IO (TSI108_PCI_OFFSET + 0x20c)
35#define TSI108_PCI_PFAB_IO_UPPER (TSI108_PCI_OFFSET + 0x210)
36#define TSI108_PCI_PFAB_MEM32 (TSI108_PCI_OFFSET + 0x214)
37#define TSI108_PCI_PFAB_PFM3 (TSI108_PCI_OFFSET + 0x220)
38#define TSI108_PCI_PFAB_PFM4 (TSI108_PCI_OFFSET + 0x230)
39
40extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
41extern void tsi108_pci_int_init(struct device_node *node);
42extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
43extern void tsi108_clear_pci_cfg_error(void);
44
45#endif /* _ASM_PPC_TSI108_PCI_H */
diff --git a/include/asm-powerpc/udbg.h b/include/asm-powerpc/udbg.h
index d03d8557f706..ce9d82fb7b68 100644
--- a/include/asm-powerpc/udbg.h
+++ b/include/asm-powerpc/udbg.h
@@ -47,6 +47,7 @@ extern void __init udbg_init_rtas_panel(void);
47extern void __init udbg_init_rtas_console(void); 47extern void __init udbg_init_rtas_console(void);
48extern void __init udbg_init_debug_beat(void); 48extern void __init udbg_init_debug_beat(void);
49extern void __init udbg_init_btext(void); 49extern void __init udbg_init_btext(void);
50extern void __init udbg_init_44x_as1(void);
50 51
51#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
52#endif /* _ASM_POWERPC_UDBG_H */ 53#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/include/asm-ppc/kdebug.h b/include/asm-ppc/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-ppc/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index b1fdbf40dba2..bed452d4a5f0 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -827,10 +827,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
827 remap_pfn_range(vma, vaddr, pfn, size, prot) 827 remap_pfn_range(vma, vaddr, pfn, size, prot)
828#endif 828#endif
829 829
830#define MK_IOSPACE_PFN(space, pfn) (pfn)
831#define GET_IOSPACE(pfn) 0
832#define GET_PFN(pfn) (pfn)
833
834/* 830/*
835 * No page table caches to initialise 831 * No page table caches to initialise
836 */ 832 */
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index 738943584c01..d84a3cf4d033 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -6,7 +6,6 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8 8
9#include <asm/atomic.h>
10#include <asm/hw_irq.h> 9#include <asm/hw_irq.h>
11 10
12/* 11/*
@@ -170,7 +169,6 @@ xchg_u32(volatile void *p, unsigned long val)
170extern void __xchg_called_with_bad_pointer(void); 169extern void __xchg_called_with_bad_pointer(void);
171 170
172#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 171#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
173#define tas(ptr) (xchg((ptr),1))
174 172
175static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 173static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
176{ 174{
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
index d2d7ad276148..04418af08f85 100644
--- a/include/asm-s390/kdebug.h
+++ b/include/asm-s390/kdebug.h
@@ -8,21 +8,6 @@
8 8
9struct pt_regs; 9struct pt_regs;
10 10
11struct die_args {
12 struct pt_regs *regs;
13 const char *str;
14 long err;
15 int trapnr;
16 int signr;
17};
18
19/* Note - you should never unregister because that can race with NMIs.
20 * If you really want to do it first unregister - then synchronize_sched
21 * - then free.
22 */
23extern int register_die_notifier(struct notifier_block *);
24extern int unregister_die_notifier(struct notifier_block *);
25
26/* 11/*
27 * These are only here because kprobes.c wants them to implement a 12 * These are only here because kprobes.c wants them to implement a
28 * blatant layering violation. Will hopefully go away soon once all 13 * blatant layering violation. Will hopefully go away soon once all
@@ -37,8 +22,6 @@ static inline int unregister_page_fault_notifier(struct notifier_block *nb)
37 return 0; 22 return 0;
38} 23}
39 24
40extern struct atomic_notifier_head s390die_chain;
41
42enum die_val { 25enum die_val {
43 DIE_OOPS = 1, 26 DIE_OOPS = 1,
44 DIE_BPT, 27 DIE_BPT,
@@ -54,19 +37,6 @@ enum die_val {
54 DIE_NMI_IPI, 37 DIE_NMI_IPI,
55}; 38};
56 39
57static inline int notify_die(enum die_val val, const char *str,
58 struct pt_regs *regs, long err, int trap, int sig)
59{
60 struct die_args args = {
61 .regs = regs,
62 .str = str,
63 .err = err,
64 .trapnr = trap,
65 .signr = sig
66 };
67 return atomic_notifier_call_chain(&s390die_chain, val, &args);
68}
69
70extern void die(const char *, struct pt_regs *, long); 40extern void die(const char *, struct pt_regs *, long);
71 41
72#endif 42#endif
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h
index 9c35c8ad1afd..7592af708b41 100644
--- a/include/asm-s390/kexec.h
+++ b/include/asm-s390/kexec.h
@@ -34,8 +34,6 @@
34/* The native architecture */ 34/* The native architecture */
35#define KEXEC_ARCH KEXEC_ARCH_S390 35#define KEXEC_ARCH KEXEC_ARCH_S390
36 36
37#define MAX_NOTE_BYTES 1024
38
39/* Provide a dummy definition to avoid build failures. */ 37/* Provide a dummy definition to avoid build failures. */
40static inline void crash_setup_regs(struct pt_regs *newregs, 38static inline void crash_setup_regs(struct pt_regs *newregs,
41 struct pt_regs *oldregs) { } 39 struct pt_regs *oldregs) { }
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 127f72e77419..74db1dc10a7d 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -120,6 +120,7 @@ extern unsigned long qdio_get_status(int irq);
120#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on 120#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
121 adapter interrupts */ 121 adapter interrupts */
122#define QDIO_FLAG_DONT_SIGA 0x10 122#define QDIO_FLAG_DONT_SIGA 0x10
123#define QDIO_FLAG_PCI_OUT 0x20
123 124
124extern int do_QDIO(struct ccw_device*, unsigned int flags, 125extern int do_QDIO(struct ccw_device*, unsigned int flags,
125 unsigned int queue_number, 126 unsigned int queue_number,
diff --git a/include/asm-sh/kdebug.h b/include/asm-sh/kdebug.h
index ef009baf5a11..493c20629747 100644
--- a/include/asm-sh/kdebug.h
+++ b/include/asm-sh/kdebug.h
@@ -2,6 +2,7 @@
2#define __ASM_SH_KDEBUG_H 2#define __ASM_SH_KDEBUG_H
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5#include <asm-generic/kdebug.h>
5 6
6struct pt_regs; 7struct pt_regs;
7 8
diff --git a/include/asm-sh/kexec.h b/include/asm-sh/kexec.h
index da36a7548601..00f4260ef09b 100644
--- a/include/asm-sh/kexec.h
+++ b/include/asm-sh/kexec.h
@@ -26,8 +26,6 @@
26/* The native architecture */ 26/* The native architecture */
27#define KEXEC_ARCH KEXEC_ARCH_SH 27#define KEXEC_ARCH KEXEC_ARCH_SH
28 28
29#define MAX_NOTE_BYTES 1024
30
31static inline void crash_setup_regs(struct pt_regs *newregs, 29static inline void crash_setup_regs(struct pt_regs *newregs,
32 struct pt_regs *oldregs) 30 struct pt_regs *oldregs)
33{ 31{
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 184d7fcaaf10..5b523c7e7d99 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -568,10 +568,6 @@ typedef pte_t *pte_addr_t;
568#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 568#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
569 remap_pfn_range(vma, vaddr, pfn, size, prot) 569 remap_pfn_range(vma, vaddr, pfn, size, prot)
570 570
571#define MK_IOSPACE_PFN(space, pfn) (pfn)
572#define GET_IOSPACE(pfn) 0
573#define GET_PFN(pfn) (pfn)
574
575struct mm_struct; 571struct mm_struct;
576 572
577/* 573/*
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 127af304865f..e7e96ee0c8a5 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -82,16 +82,6 @@ static inline void sched_cacheflush(void)
82} 82}
83#endif 83#endif
84 84
85static inline unsigned long tas(volatile int *m)
86{
87 unsigned long retval;
88
89 __asm__ __volatile__ ("tas.b @%1\n\t"
90 "movt %0"
91 : "=r" (retval): "r" (m): "t", "memory");
92 return retval;
93}
94
95/* 85/*
96 * A brief note on ctrl_barrier(), the control register write barrier. 86 * A brief note on ctrl_barrier(), the control register write barrier.
97 * 87 *
diff --git a/include/asm-sh64/kdebug.h b/include/asm-sh64/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-sh64/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
index 6b97c4cb1d64..b875482eb592 100644
--- a/include/asm-sh64/pgtable.h
+++ b/include/asm-sh64/pgtable.h
@@ -485,10 +485,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
485#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 485#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
486 remap_pfn_range(vma, vaddr, pfn, size, prot) 486 remap_pfn_range(vma, vaddr, pfn, size, prot)
487 487
488#define MK_IOSPACE_PFN(space, pfn) (pfn)
489#define GET_IOSPACE(pfn) 0
490#define GET_PFN(pfn) (pfn)
491
492#endif /* !__ASSEMBLY__ */ 488#endif /* !__ASSEMBLY__ */
493 489
494/* 490/*
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
index b1598c26fcb0..5ff94644e8c8 100644
--- a/include/asm-sh64/system.h
+++ b/include/asm-sh64/system.h
@@ -43,8 +43,6 @@ extern struct task_struct *sh64_switch_to(struct task_struct *prev,
43 43
44#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 44#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
45 45
46#define tas(ptr) (xchg((ptr), 1))
47
48extern void __xchg_called_with_bad_pointer(void); 46extern void __xchg_called_with_bad_pointer(void);
49 47
50#define mb() __asm__ __volatile__ ("synco": : :"memory") 48#define mb() __asm__ __volatile__ ("synco": : :"memory")
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index fba92485fdba..404d80767323 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -66,4 +66,8 @@ static inline void sp_enter_debugger(void)
66#define KDEBUG_DUNNO2_OFF 0x8 66#define KDEBUG_DUNNO2_OFF 0x8
67#define KDEBUG_TEACH_OFF 0xc 67#define KDEBUG_TEACH_OFF 0xc
68 68
69enum die_val {
70 DIE_UNUSED,
71};
72
69#endif /* !(_SPARC_KDEBUG_H) */ 73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 100c3eaf3c1f..8b6d9c9c8b93 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -241,7 +241,6 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
241} 241}
242 242
243#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 243#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
244#define tas(ptr) (xchg((ptr),1))
245 244
246extern void __xchg_called_with_bad_pointer(void); 245extern void __xchg_called_with_bad_pointer(void);
247 246
diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild
index a7f44408c93b..854fd3a65acf 100644
--- a/include/asm-sparc64/Kbuild
+++ b/include/asm-sparc64/Kbuild
@@ -8,7 +8,6 @@ header-y += apb.h
8header-y += asi.h 8header-y += asi.h
9header-y += bbc.h 9header-y += bbc.h
10header-y += bpp.h 10header-y += bpp.h
11header-y += const.h
12header-y += display7seg.h 11header-y += display7seg.h
13header-y += envctrl.h 12header-y += envctrl.h
14header-y += ipc.h 13header-y += ipc.h
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 2f0bec26a695..3fb4e1f7f186 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -9,6 +9,7 @@
9#define __ARCH_SPARC64_ATOMIC__ 9#define __ARCH_SPARC64_ATOMIC__
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/system.h>
12 13
13typedef struct { volatile int counter; } atomic_t; 14typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t; 15typedef struct { volatile __s64 counter; } atomic64_t;
@@ -70,25 +71,47 @@ extern int atomic64_sub_ret(int, atomic64_t *);
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 71#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 72#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72 73
73#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 74#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
74#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 75#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75 76
76#define atomic_add_unless(v, a, u) \ 77static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
77({ \ 78{
78 int c, old; \ 79 int c, old;
79 c = atomic_read(v); \ 80 c = atomic_read(v);
80 for (;;) { \ 81 for (;;) {
81 if (unlikely(c == (u))) \ 82 if (unlikely(c == (u)))
82 break; \ 83 break;
83 old = atomic_cmpxchg((v), c, c + (a)); \ 84 old = atomic_cmpxchg((v), c, c + (a));
84 if (likely(old == c)) \ 85 if (likely(old == c))
85 break; \ 86 break;
86 c = old; \ 87 c = old;
87 } \ 88 }
88 likely(c != (u)); \ 89 return c != (u);
89}) 90}
91
90#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 92#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
91 93
94#define atomic64_cmpxchg(v, o, n) \
95 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
96#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
97
98static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
99{
100 long c, old;
101 c = atomic64_read(v);
102 for (;;) {
103 if (unlikely(c == (u)))
104 break;
105 old = atomic64_cmpxchg((v), c, c + (a));
106 if (likely(old == c))
107 break;
108 c = old;
109 }
110 return c != (u);
111}
112
113#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
114
92/* Atomic operations are already serializing */ 115/* Atomic operations are already serializing */
93#ifdef CONFIG_SMP 116#ifdef CONFIG_SMP
94#define smp_mb__before_atomic_dec() membar_storeload_loadload(); 117#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-sparc64/const.h b/include/asm-sparc64/const.h
deleted file mode 100644
index 8ad902b2ce04..000000000000
--- a/include/asm-sparc64/const.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/* const.h: Macros for dealing with constants. */
2
3#ifndef _SPARC64_CONST_H
4#define _SPARC64_CONST_H
5
6/* Some constant macros are used in both assembler and
7 * C code. Therefore we cannot annotate them always with
8 * 'UL' and other type specificers unilaterally. We
9 * use the following macros to deal with this.
10 */
11
12#ifdef __ASSEMBLY__
13#define _AC(X,Y) X
14#else
15#define _AC(X,Y) (X##Y)
16#endif
17
18
19#endif /* !(_SPARC64_CONST_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 11251bdd00cb..f8032e73f384 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -7,19 +7,8 @@
7 7
8struct pt_regs; 8struct pt_regs;
9 9
10struct die_args {
11 struct pt_regs *regs;
12 const char *str;
13 long err;
14 int trapnr;
15 int signr;
16};
17
18extern int register_die_notifier(struct notifier_block *);
19extern int unregister_die_notifier(struct notifier_block *);
20extern int register_page_fault_notifier(struct notifier_block *); 10extern int register_page_fault_notifier(struct notifier_block *);
21extern int unregister_page_fault_notifier(struct notifier_block *); 11extern int unregister_page_fault_notifier(struct notifier_block *);
22extern struct atomic_notifier_head sparc64die_chain;
23 12
24extern void bad_trap(struct pt_regs *, long); 13extern void bad_trap(struct pt_regs *, long);
25 14
@@ -36,16 +25,4 @@ enum die_val {
36 DIE_PAGE_FAULT, 25 DIE_PAGE_FAULT,
37}; 26};
38 27
39static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs,
40 long err, int trap, int sig)
41{
42 struct die_args args = { .regs = regs,
43 .str = str,
44 .err = err,
45 .trapnr = trap,
46 .signr = sig };
47
48 return atomic_notifier_call_chain(&sparc64die_chain, val, &args);
49}
50
51#endif 28#endif
diff --git a/include/asm-sparc64/local.h b/include/asm-sparc64/local.h
index dfde115ac892..c11c530f74d0 100644
--- a/include/asm-sparc64/local.h
+++ b/include/asm-sparc64/local.h
@@ -1,40 +1 @@
1#ifndef _ARCH_SPARC64_LOCAL_H #include <asm-generic/local.h>
2#define _ARCH_SPARC64_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef atomic64_t local_t;
8
9#define LOCAL_INIT(i) ATOMIC64_INIT(i)
10#define local_read(v) atomic64_read(v)
11#define local_set(v,i) atomic64_set(v,i)
12
13#define local_inc(v) atomic64_inc(v)
14#define local_dec(v) atomic64_dec(v)
15#define local_add(i, v) atomic64_add(i, v)
16#define local_sub(i, v) atomic64_sub(i, v)
17
18#define __local_inc(v) ((v)->counter++)
19#define __local_dec(v) ((v)->counter--)
20#define __local_add(i,v) ((v)->counter+=(i))
21#define __local_sub(i,v) ((v)->counter-=(i))
22
23/* Use these for per-cpu local_t variables: on some archs they are
24 * much more efficient than these naive implementations. Note they take
25 * a variable, not an address.
26 */
27#define cpu_local_read(v) local_read(&__get_cpu_var(v))
28#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
29
30#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
31#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
32#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
33#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
34
35#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
36#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
37#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
38#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
39
40#endif /* _ARCH_SPARC64_LOCAL_H */
diff --git a/include/asm-sparc64/lsu.h b/include/asm-sparc64/lsu.h
index e5329c7f5833..79f109840c39 100644
--- a/include/asm-sparc64/lsu.h
+++ b/include/asm-sparc64/lsu.h
@@ -2,7 +2,7 @@
2#ifndef _SPARC64_LSU_H 2#ifndef _SPARC64_LSU_H
3#define _SPARC64_LSU_H 3#define _SPARC64_LSU_H
4 4
5#include <asm/const.h> 5#include <linux/const.h>
6 6
7/* LSU Control Register */ 7/* LSU Control Register */
8#define LSU_CONTROL_PM _AC(0x000001fe00000000,UL) /* Phys-watchpoint byte mask*/ 8#define LSU_CONTROL_PM _AC(0x000001fe00000000,UL) /* Phys-watchpoint byte mask*/
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 70af4b6ce136..8abc58f0f9d7 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -1,8 +1,8 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4#include <linux/const.h>
4#include <asm/page.h> 5#include <asm/page.h>
5#include <asm/const.h>
6#include <asm/hypervisor.h> 6#include <asm/hypervisor.h>
7 7
8#define CTX_NR_BITS 13 8#define CTX_NR_BITS 13
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index ff736eafa64d..7af1077451ff 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -5,7 +5,7 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <asm/const.h> 8#include <linux/const.h>
9 9
10#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) 10#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
11#define PAGE_SHIFT 13 11#define PAGE_SHIFT 13
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 46705ef47d27..9e80ad43b29c 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -15,13 +15,13 @@
15#include <asm-generic/pgtable-nopud.h> 15#include <asm-generic/pgtable-nopud.h>
16 16
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <linux/const.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/spitfire.h> 20#include <asm/spitfire.h>
20#include <asm/asi.h> 21#include <asm/asi.h>
21#include <asm/system.h> 22#include <asm/system.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/processor.h> 24#include <asm/processor.h>
24#include <asm/const.h>
25 25
26/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB). 26/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
27 * The page copy blockops can use 0x2000000 to 0x4000000. 27 * The page copy blockops can use 0x2000000 to 0x4000000.
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 49a7924a89ab..f3c45484c636 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -2,7 +2,7 @@
2#ifndef _SPARC64_PSTATE_H 2#ifndef _SPARC64_PSTATE_H
3#define _SPARC64_PSTATE_H 3#define _SPARC64_PSTATE_H
4 4
5#include <asm/const.h> 5#include <linux/const.h>
6 6
7/* The V9 PSTATE Register (with SpitFire extensions). 7/* The V9 PSTATE Register (with SpitFire extensions).
8 * 8 *
diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h
index 2f792c20b53c..e96137b04a4f 100644
--- a/include/asm-sparc64/sfafsr.h
+++ b/include/asm-sparc64/sfafsr.h
@@ -1,7 +1,7 @@
1#ifndef _SPARC64_SFAFSR_H 1#ifndef _SPARC64_SFAFSR_H
2#define _SPARC64_SFAFSR_H 2#define _SPARC64_SFAFSR_H
3 3
4#include <asm/const.h> 4#include <linux/const.h>
5 5
6/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */ 6/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
7 7
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 32281acb878b..8ba380ec6daa 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -253,7 +253,6 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
253} 253}
254 254
255#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 255#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
256#define tas(ptr) (xchg((ptr),1))
257 256
258extern void __xchg_called_with_bad_pointer(void); 257extern void __xchg_called_with_bad_pointer(void);
259 258
diff --git a/include/asm-um/cmpxchg.h b/include/asm-um/cmpxchg.h
new file mode 100644
index 000000000000..529376a99885
--- /dev/null
+++ b/include/asm-um/cmpxchg.h
@@ -0,0 +1,6 @@
1#ifndef __UM_CMPXCHG_H
2#define __UM_CMPXCHG_H
3
4#include "asm/arch/cmpxchg.h"
5
6#endif
diff --git a/include/asm-um/kdebug.h b/include/asm-um/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-um/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-v850/kdebug.h b/include/asm-v850/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-v850/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h
index da39916f10b0..0de2481fd990 100644
--- a/include/asm-v850/system.h
+++ b/include/asm-v850/system.h
@@ -76,7 +76,6 @@ static inline int irqs_disabled (void)
76 76
77#define xchg(ptr, with) \ 77#define xchg(ptr, with) \
78 ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr)))) 78 ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
79#define tas(ptr) (xchg ((ptr), 1))
80 79
81static inline unsigned long __xchg (unsigned long with, 80static inline unsigned long __xchg (unsigned long with,
82 __volatile__ void *ptr, int size) 81 __volatile__ void *ptr, int size)
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 89ad1fc27c8b..75a2deffca68 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -19,4 +19,3 @@ unifdef-y += mce.h
19unifdef-y += msr.h 19unifdef-y += msr.h
20unifdef-y += mtrr.h 20unifdef-y += mtrr.h
21unifdef-y += vsyscall.h 21unifdef-y += vsyscall.h
22unifdef-y += const.h
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 706ca4b60000..f2e64634fa48 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
2#define __ARCH_X86_64_ATOMIC__ 2#define __ARCH_X86_64_ATOMIC__
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/cmpxchg.h>
5 6
6/* atomic_t should be 32 bit signed type */ 7/* atomic_t should be 32 bit signed type */
7 8
@@ -375,8 +376,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
375 long __i = i; 376 long __i = i;
376 __asm__ __volatile__( 377 __asm__ __volatile__(
377 LOCK_PREFIX "xaddq %0, %1;" 378 LOCK_PREFIX "xaddq %0, %1;"
378 :"=r"(i) 379 :"+r" (i), "+m" (v->counter)
379 :"m"(v->counter), "0"(i)); 380 : : "memory");
380 return i + __i; 381 return i + __i;
381} 382}
382 383
@@ -388,7 +389,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
388#define atomic64_inc_return(v) (atomic64_add_return(1,v)) 389#define atomic64_inc_return(v) (atomic64_add_return(1,v))
389#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) 390#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
390 391
391#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 392#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
393#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
394
395#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
392#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 396#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
393 397
394/** 398/**
@@ -400,22 +404,49 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
400 * Atomically adds @a to @v, so long as it was not @u. 404 * Atomically adds @a to @v, so long as it was not @u.
401 * Returns non-zero if @v was not @u, and zero otherwise. 405 * Returns non-zero if @v was not @u, and zero otherwise.
402 */ 406 */
403#define atomic_add_unless(v, a, u) \ 407static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
404({ \ 408{
405 int c, old; \ 409 int c, old;
406 c = atomic_read(v); \ 410 c = atomic_read(v);
407 for (;;) { \ 411 for (;;) {
408 if (unlikely(c == (u))) \ 412 if (unlikely(c == (u)))
409 break; \ 413 break;
410 old = atomic_cmpxchg((v), c, c + (a)); \ 414 old = atomic_cmpxchg((v), c, c + (a));
411 if (likely(old == c)) \ 415 if (likely(old == c))
412 break; \ 416 break;
413 c = old; \ 417 c = old;
414 } \ 418 }
415 c != (u); \ 419 return c != (u);
416}) 420}
421
417#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 422#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
418 423
424/**
425 * atomic64_add_unless - add unless the number is a given value
426 * @v: pointer of type atomic64_t
427 * @a: the amount to add to v...
428 * @u: ...unless v is equal to u.
429 *
430 * Atomically adds @a to @v, so long as it was not @u.
431 * Returns non-zero if @v was not @u, and zero otherwise.
432 */
433static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
434{
435 long c, old;
436 c = atomic64_read(v);
437 for (;;) {
438 if (unlikely(c == (u)))
439 break;
440 old = atomic64_cmpxchg((v), c, c + (a));
441 if (likely(old == c))
442 break;
443 c = old;
444 }
445 return c != (u);
446}
447
448#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
449
419/* These are x86-specific, used by some header files */ 450/* These are x86-specific, used by some header files */
420#define atomic_clear_mask(mask, addr) \ 451#define atomic_clear_mask(mask, addr) \
421__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ 452__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
new file mode 100644
index 000000000000..09a6b6b6b74d
--- /dev/null
+++ b/include/asm-x86_64/cmpxchg.h
@@ -0,0 +1,134 @@
1#ifndef __ASM_CMPXCHG_H
2#define __ASM_CMPXCHG_H
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
7
8#define __xg(x) ((volatile long *)(x))
9
10static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
11{
12 *ptr = val;
13}
14
15#define _set_64bit set_64bit
16
17/*
18 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
19 * Note 2: xchg has side effect, so that attribute volatile is necessary,
20 * but generally the primitive is invalid, *ptr is output argument. --ANK
21 */
22static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
23{
24 switch (size) {
25 case 1:
26 __asm__ __volatile__("xchgb %b0,%1"
27 :"=q" (x)
28 :"m" (*__xg(ptr)), "0" (x)
29 :"memory");
30 break;
31 case 2:
32 __asm__ __volatile__("xchgw %w0,%1"
33 :"=r" (x)
34 :"m" (*__xg(ptr)), "0" (x)
35 :"memory");
36 break;
37 case 4:
38 __asm__ __volatile__("xchgl %k0,%1"
39 :"=r" (x)
40 :"m" (*__xg(ptr)), "0" (x)
41 :"memory");
42 break;
43 case 8:
44 __asm__ __volatile__("xchgq %0,%1"
45 :"=r" (x)
46 :"m" (*__xg(ptr)), "0" (x)
47 :"memory");
48 break;
49 }
50 return x;
51}
52
53/*
54 * Atomic compare and exchange. Compare OLD with MEM, if identical,
55 * store NEW in MEM. Return the initial value in MEM. Success is
56 * indicated by comparing RETURN with OLD.
57 */
58
59#define __HAVE_ARCH_CMPXCHG 1
60
61static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
62 unsigned long new, int size)
63{
64 unsigned long prev;
65 switch (size) {
66 case 1:
67 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
68 : "=a"(prev)
69 : "q"(new), "m"(*__xg(ptr)), "0"(old)
70 : "memory");
71 return prev;
72 case 2:
73 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
74 : "=a"(prev)
75 : "r"(new), "m"(*__xg(ptr)), "0"(old)
76 : "memory");
77 return prev;
78 case 4:
79 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
80 : "=a"(prev)
81 : "r"(new), "m"(*__xg(ptr)), "0"(old)
82 : "memory");
83 return prev;
84 case 8:
85 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
86 : "=a"(prev)
87 : "r"(new), "m"(*__xg(ptr)), "0"(old)
88 : "memory");
89 return prev;
90 }
91 return old;
92}
93
94static inline unsigned long __cmpxchg_local(volatile void *ptr,
95 unsigned long old, unsigned long new, int size)
96{
97 unsigned long prev;
98 switch (size) {
99 case 1:
100 __asm__ __volatile__("cmpxchgb %b1,%2"
101 : "=a"(prev)
102 : "q"(new), "m"(*__xg(ptr)), "0"(old)
103 : "memory");
104 return prev;
105 case 2:
106 __asm__ __volatile__("cmpxchgw %w1,%2"
107 : "=a"(prev)
108 : "r"(new), "m"(*__xg(ptr)), "0"(old)
109 : "memory");
110 return prev;
111 case 4:
112 __asm__ __volatile__("cmpxchgl %k1,%2"
113 : "=a"(prev)
114 : "r"(new), "m"(*__xg(ptr)), "0"(old)
115 : "memory");
116 return prev;
117 case 8:
118 __asm__ __volatile__("cmpxchgq %1,%2"
119 : "=a"(prev)
120 : "r"(new), "m"(*__xg(ptr)), "0"(old)
121 : "memory");
122 return prev;
123 }
124 return old;
125}
126
127#define cmpxchg(ptr,o,n)\
128 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
129 (unsigned long)(n),sizeof(*(ptr))))
130#define cmpxchg_local(ptr,o,n)\
131 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
132 (unsigned long)(n),sizeof(*(ptr))))
133
134#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index 2b0c088e2957..74feae945a26 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,19 +5,8 @@
5 5
6struct pt_regs; 6struct pt_regs;
7 7
8struct die_args {
9 struct pt_regs *regs;
10 const char *str;
11 long err;
12 int trapnr;
13 int signr;
14};
15
16extern int register_die_notifier(struct notifier_block *);
17extern int unregister_die_notifier(struct notifier_block *);
18extern int register_page_fault_notifier(struct notifier_block *); 8extern int register_page_fault_notifier(struct notifier_block *);
19extern int unregister_page_fault_notifier(struct notifier_block *); 9extern int unregister_page_fault_notifier(struct notifier_block *);
20extern struct atomic_notifier_head die_chain;
21 10
22/* Grossly misnamed. */ 11/* Grossly misnamed. */
23enum die_val { 12enum die_val {
@@ -33,22 +22,10 @@ enum die_val {
33 DIE_GPF, 22 DIE_GPF,
34 DIE_CALL, 23 DIE_CALL,
35 DIE_NMI_IPI, 24 DIE_NMI_IPI,
25 DIE_NMI_POST,
36 DIE_PAGE_FAULT, 26 DIE_PAGE_FAULT,
37}; 27};
38 28
39static inline int notify_die(enum die_val val, const char *str,
40 struct pt_regs *regs, long err, int trap, int sig)
41{
42 struct die_args args = {
43 .regs = regs,
44 .str = str,
45 .err = err,
46 .trapnr = trap,
47 .signr = sig
48 };
49 return atomic_notifier_call_chain(&die_chain, val, &args);
50}
51
52extern void printk_address(unsigned long address); 29extern void printk_address(unsigned long address);
53extern void die(const char *,struct pt_regs *,long); 30extern void die(const char *,struct pt_regs *,long);
54extern void __die(const char *,struct pt_regs *,long); 31extern void __die(const char *,struct pt_regs *,long);
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 5fab957e1091..738e581b67f8 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -48,8 +48,6 @@
48/* The native architecture */ 48/* The native architecture */
49#define KEXEC_ARCH KEXEC_ARCH_X86_64 49#define KEXEC_ARCH KEXEC_ARCH_X86_64
50 50
51#define MAX_NOTE_BYTES 1024
52
53/* 51/*
54 * Saving the registers of the cpu on which panic occured in 52 * Saving the registers of the cpu on which panic occured in
55 * crash_kexec to save a valid sp. The registers of other cpus 53 * crash_kexec to save a valid sp. The registers of other cpus
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index e769e6200225..e87492bb0693 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -2,49 +2,183 @@
2#define _ARCH_X8664_LOCAL_H 2#define _ARCH_X8664_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/atomic.h>
5 6
6typedef struct 7typedef struct
7{ 8{
8 volatile long counter; 9 atomic_long_t a;
9} local_t; 10} local_t;
10 11
11#define LOCAL_INIT(i) { (i) } 12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
12 13
13#define local_read(v) ((v)->counter) 14#define local_read(l) atomic_long_read(&(l)->a)
14#define local_set(v,i) (((v)->counter) = (i)) 15#define local_set(l,i) atomic_long_set(&(l)->a, (i))
15 16
16static inline void local_inc(local_t *v) 17static inline void local_inc(local_t *l)
17{ 18{
18 __asm__ __volatile__( 19 __asm__ __volatile__(
19 "incq %0" 20 "incq %0"
20 :"=m" (v->counter) 21 :"=m" (l->a.counter)
21 :"m" (v->counter)); 22 :"m" (l->a.counter));
22} 23}
23 24
24static inline void local_dec(local_t *v) 25static inline void local_dec(local_t *l)
25{ 26{
26 __asm__ __volatile__( 27 __asm__ __volatile__(
27 "decq %0" 28 "decq %0"
28 :"=m" (v->counter) 29 :"=m" (l->a.counter)
29 :"m" (v->counter)); 30 :"m" (l->a.counter));
30} 31}
31 32
32static inline void local_add(long i, local_t *v) 33static inline void local_add(long i, local_t *l)
33{ 34{
34 __asm__ __volatile__( 35 __asm__ __volatile__(
35 "addq %1,%0" 36 "addq %1,%0"
36 :"=m" (v->counter) 37 :"=m" (l->a.counter)
37 :"ir" (i), "m" (v->counter)); 38 :"ir" (i), "m" (l->a.counter));
38} 39}
39 40
40static inline void local_sub(long i, local_t *v) 41static inline void local_sub(long i, local_t *l)
41{ 42{
42 __asm__ __volatile__( 43 __asm__ __volatile__(
43 "subq %1,%0" 44 "subq %1,%0"
44 :"=m" (v->counter) 45 :"=m" (l->a.counter)
45 :"ir" (i), "m" (v->counter)); 46 :"ir" (i), "m" (l->a.counter));
46} 47}
47 48
49/**
50 * local_sub_and_test - subtract value from variable and test result
51 * @i: integer value to subtract
52 * @l: pointer to type local_t
53 *
54 * Atomically subtracts @i from @l and returns
55 * true if the result is zero, or false for all
56 * other cases.
57 */
58static __inline__ int local_sub_and_test(long i, local_t *l)
59{
60 unsigned char c;
61
62 __asm__ __volatile__(
63 "subq %2,%0; sete %1"
64 :"=m" (l->a.counter), "=qm" (c)
65 :"ir" (i), "m" (l->a.counter) : "memory");
66 return c;
67}
68
69/**
70 * local_dec_and_test - decrement and test
71 * @l: pointer to type local_t
72 *
73 * Atomically decrements @l by 1 and
74 * returns true if the result is 0, or false for all other
75 * cases.
76 */
77static __inline__ int local_dec_and_test(local_t *l)
78{
79 unsigned char c;
80
81 __asm__ __volatile__(
82 "decq %0; sete %1"
83 :"=m" (l->a.counter), "=qm" (c)
84 :"m" (l->a.counter) : "memory");
85 return c != 0;
86}
87
88/**
89 * local_inc_and_test - increment and test
90 * @l: pointer to type local_t
91 *
92 * Atomically increments @l by 1
93 * and returns true if the result is zero, or false for all
94 * other cases.
95 */
96static __inline__ int local_inc_and_test(local_t *l)
97{
98 unsigned char c;
99
100 __asm__ __volatile__(
101 "incq %0; sete %1"
102 :"=m" (l->a.counter), "=qm" (c)
103 :"m" (l->a.counter) : "memory");
104 return c != 0;
105}
106
107/**
108 * local_add_negative - add and test if negative
109 * @i: integer value to add
110 * @l: pointer to type local_t
111 *
112 * Atomically adds @i to @l and returns true
113 * if the result is negative, or false when
114 * result is greater than or equal to zero.
115 */
116static __inline__ int local_add_negative(long i, local_t *l)
117{
118 unsigned char c;
119
120 __asm__ __volatile__(
121 "addq %2,%0; sets %1"
122 :"=m" (l->a.counter), "=qm" (c)
123 :"ir" (i), "m" (l->a.counter) : "memory");
124 return c;
125}
126
127/**
128 * local_add_return - add and return
129 * @i: integer value to add
130 * @l: pointer to type local_t
131 *
132 * Atomically adds @i to @l and returns @i + @l
133 */
134static __inline__ long local_add_return(long i, local_t *l)
135{
136 long __i = i;
137 __asm__ __volatile__(
138 "xaddq %0, %1;"
139 :"+r" (i), "+m" (l->a.counter)
140 : : "memory");
141 return i + __i;
142}
143
144static __inline__ long local_sub_return(long i, local_t *l)
145{
146 return local_add_return(-i,l);
147}
148
149#define local_inc_return(l) (local_add_return(1,l))
150#define local_dec_return(l) (local_sub_return(1,l))
151
152#define local_cmpxchg(l, o, n) \
153 (cmpxchg_local(&((l)->a.counter), (o), (n)))
154/* Always has a lock prefix */
155#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
156
157/**
158 * atomic_up_add_unless - add unless the number is a given value
159 * @l: pointer of type local_t
160 * @a: the amount to add to l...
161 * @u: ...unless l is equal to u.
162 *
163 * Atomically adds @a to @l, so long as it was not @u.
164 * Returns non-zero if @l was not @u, and zero otherwise.
165 */
166#define local_add_unless(l, a, u) \
167({ \
168 long c, old; \
169 c = local_read(l); \
170 for (;;) { \
171 if (unlikely(c == (u))) \
172 break; \
173 old = local_cmpxchg((l), c, c + (a)); \
174 if (likely(old == c)) \
175 break; \
176 c = old; \
177 } \
178 c != (u); \
179})
180#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
181
48/* On x86-64 these are better than the atomic variants on SMP kernels 182/* On x86-64 these are better than the atomic variants on SMP kernels
49 because they dont use a lock prefix. */ 183 because they dont use a lock prefix. */
50#define __local_inc(l) local_inc(l) 184#define __local_inc(l) local_inc(l)
@@ -62,27 +196,27 @@ static inline void local_sub(long i, local_t *v)
62 196
63/* Need to disable preemption for the cpu local counters otherwise we could 197/* Need to disable preemption for the cpu local counters otherwise we could
64 still access a variable of a previous CPU in a non atomic way. */ 198 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_wrap_v(v) \ 199#define cpu_local_wrap_v(l) \
66 ({ local_t res__; \ 200 ({ local_t res__; \
67 preempt_disable(); \ 201 preempt_disable(); \
68 res__ = (v); \ 202 res__ = (l); \
69 preempt_enable(); \ 203 preempt_enable(); \
70 res__; }) 204 res__; })
71#define cpu_local_wrap(v) \ 205#define cpu_local_wrap(l) \
72 ({ preempt_disable(); \ 206 ({ preempt_disable(); \
73 v; \ 207 l; \
74 preempt_enable(); }) \ 208 preempt_enable(); }) \
75 209
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) 210#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) 211#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) 212#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) 213#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) 214#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) 215#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
82 216
83#define __cpu_local_inc(v) cpu_local_inc(v) 217#define __cpu_local_inc(l) cpu_local_inc(l)
84#define __cpu_local_dec(v) cpu_local_dec(v) 218#define __cpu_local_dec(l) cpu_local_dec(l)
85#define __cpu_local_add(i, v) cpu_local_add((i), (v)) 219#define __cpu_local_add(i, l) cpu_local_add((i), (l))
86#define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) 220#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
87 221
88#endif /* _ARCH_I386_LOCAL_H */ 222#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 4d04e2479569..dee632fa457d 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,7 +1,7 @@
1#ifndef _X86_64_PAGE_H 1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H 2#define _X86_64_PAGE_H
3 3
4#include <asm/const.h> 4#include <linux/const.h>
5 5
6/* PAGE_SHIFT determines the page size */ 6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12 7#define PAGE_SHIFT 12
@@ -79,9 +79,10 @@ extern unsigned long phys_base;
79 79
80#define __PHYSICAL_START CONFIG_PHYSICAL_START 80#define __PHYSICAL_START CONFIG_PHYSICAL_START
81#define __KERNEL_ALIGN 0x200000 81#define __KERNEL_ALIGN 0x200000
82
82#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 83#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
83#define __START_KERNEL_map 0xffffffff80000000 84#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
84#define __PAGE_OFFSET 0xffff810000000000 85#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
85 86
86/* to align the pointer to the (next) page boundary */ 87/* to align the pointer to the (next) page boundary */
87#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 88#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
@@ -93,7 +94,7 @@ extern unsigned long phys_base;
93#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) 94#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
94 95
95#define KERNEL_TEXT_SIZE (40*1024*1024) 96#define KERNEL_TEXT_SIZE (40*1024*1024)
96#define KERNEL_TEXT_START 0xffffffff80000000 97#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
97#define PAGE_OFFSET __PAGE_OFFSET 98#define PAGE_OFFSET __PAGE_OFFSET
98 99
99#ifndef __ASSEMBLY__ 100#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index da3390faaea6..08b9831f2e14 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,7 +1,7 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H 2#define _X86_64_PGTABLE_H
3 3
4#include <asm/const.h> 4#include <linux/const.h>
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6 6
7/* 7/*
@@ -134,11 +134,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
134#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) 134#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
135#define FIRST_USER_ADDRESS 0 135#define FIRST_USER_ADDRESS 0
136 136
137#define MAXMEM 0x3fffffffffff 137#define MAXMEM _AC(0x3fffffffffff, UL)
138#define VMALLOC_START 0xffffc20000000000 138#define VMALLOC_START _AC(0xffffc20000000000, UL)
139#define VMALLOC_END 0xffffe1ffffffffff 139#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
140#define MODULES_VADDR 0xffffffff88000000 140#define MODULES_VADDR _AC(0xffffffff88000000, UL)
141#define MODULES_END 0xfffffffffff00000 141#define MODULES_END _AC(0xfffffffffff00000, UL)
142#define MODULES_LEN (MODULES_END - MODULES_VADDR) 142#define MODULES_LEN (MODULES_END - MODULES_VADDR)
143 143
144#define _PAGE_BIT_PRESENT 0 144#define _PAGE_BIT_PRESENT 0
@@ -411,17 +411,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
411 411
412extern spinlock_t pgd_lock; 412extern spinlock_t pgd_lock;
413extern struct list_head pgd_list; 413extern struct list_head pgd_list;
414void vmalloc_sync_all(void);
415 414
416extern int kern_addr_valid(unsigned long addr); 415extern int kern_addr_valid(unsigned long addr);
417 416
418#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 417#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
419 remap_pfn_range(vma, vaddr, pfn, size, prot) 418 remap_pfn_range(vma, vaddr, pfn, size, prot)
420 419
421#define MK_IOSPACE_PFN(space, pfn) (pfn)
422#define GET_IOSPACE(pfn) 0
423#define GET_PFN(pfn) (pfn)
424
425#define HAVE_ARCH_UNMAPPED_AREA 420#define HAVE_ARCH_UNMAPPED_AREA
426 421
427#define pgtable_cache_init() do { } while (0) 422#define pgtable_cache_init() do { } while (0)
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
index b0496e0d72a6..8ebd765c674a 100644
--- a/include/asm-x86_64/serial.h
+++ b/include/asm-x86_64/serial.h
@@ -11,19 +11,3 @@
11 * megabits/second; but this requires the faster clock. 11 * megabits/second; but this requires the faster clock.
12 */ 12 */
13#define BASE_BAUD ( 1843200 / 16 ) 13#define BASE_BAUD ( 1843200 / 16 )
14
15/* Standard COM flags (except for COM4, because of the 8514 problem) */
16#ifdef CONFIG_SERIAL_DETECT_IRQ
17#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
18#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
19#else
20#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
21#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
22#endif
23
24#define SERIAL_PORT_DFNS \
25 /* UART CLK PORT IRQ FLAGS */ \
26 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
27 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
28 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
29 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 213b7fe5d998..b7b8021e8c43 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
6#include <asm/alternative.h> 6#include <asm/cmpxchg.h>
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9 9
@@ -124,100 +124,6 @@ static inline void sched_cacheflush(void)
124 124
125#define nop() __asm__ __volatile__ ("nop") 125#define nop() __asm__ __volatile__ ("nop")
126 126
127#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
128
129#define tas(ptr) (xchg((ptr),1))
130
131#define __xg(x) ((volatile long *)(x))
132
133static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
134{
135 *ptr = val;
136}
137
138#define _set_64bit set_64bit
139
140/*
141 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
142 * Note 2: xchg has side effect, so that attribute volatile is necessary,
143 * but generally the primitive is invalid, *ptr is output argument. --ANK
144 */
145static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
146{
147 switch (size) {
148 case 1:
149 __asm__ __volatile__("xchgb %b0,%1"
150 :"=q" (x)
151 :"m" (*__xg(ptr)), "0" (x)
152 :"memory");
153 break;
154 case 2:
155 __asm__ __volatile__("xchgw %w0,%1"
156 :"=r" (x)
157 :"m" (*__xg(ptr)), "0" (x)
158 :"memory");
159 break;
160 case 4:
161 __asm__ __volatile__("xchgl %k0,%1"
162 :"=r" (x)
163 :"m" (*__xg(ptr)), "0" (x)
164 :"memory");
165 break;
166 case 8:
167 __asm__ __volatile__("xchgq %0,%1"
168 :"=r" (x)
169 :"m" (*__xg(ptr)), "0" (x)
170 :"memory");
171 break;
172 }
173 return x;
174}
175
176/*
177 * Atomic compare and exchange. Compare OLD with MEM, if identical,
178 * store NEW in MEM. Return the initial value in MEM. Success is
179 * indicated by comparing RETURN with OLD.
180 */
181
182#define __HAVE_ARCH_CMPXCHG 1
183
184static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
185 unsigned long new, int size)
186{
187 unsigned long prev;
188 switch (size) {
189 case 1:
190 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
191 : "=a"(prev)
192 : "q"(new), "m"(*__xg(ptr)), "0"(old)
193 : "memory");
194 return prev;
195 case 2:
196 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
197 : "=a"(prev)
198 : "r"(new), "m"(*__xg(ptr)), "0"(old)
199 : "memory");
200 return prev;
201 case 4:
202 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
203 : "=a"(prev)
204 : "r"(new), "m"(*__xg(ptr)), "0"(old)
205 : "memory");
206 return prev;
207 case 8:
208 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
209 : "=a"(prev)
210 : "r"(new), "m"(*__xg(ptr)), "0"(old)
211 : "memory");
212 return prev;
213 }
214 return old;
215}
216
217#define cmpxchg(ptr,o,n)\
218 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
219 (unsigned long)(n),sizeof(*(ptr))))
220
221#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
222#define smp_mb() mb() 128#define smp_mb() mb()
223#define smp_rmb() rmb() 129#define smp_rmb() rmb()
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
index 6cfc3bb10c1a..7405756dd41b 100644
--- a/include/asm-x86_64/termbits.h
+++ b/include/asm-x86_64/termbits.h
@@ -160,7 +160,7 @@ struct ktermios {
160#define CMSPAR 010000000000 /* mark or space (stick) parity */ 160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */ 161#define CRTSCTS 020000000000 /* flow control */
162 162
163#define IBSHIFT 8 /* Shift from CBAUD to CIBAUD */ 163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164 164
165/* c_lflag bits */ 165/* c_lflag bits */
166#define ISIG 0000001 166#define ISIG 0000001
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 26e23e01c54a..595703949df3 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -619,6 +619,8 @@ __SYSCALL(__NR_sync_file_range, sys_sync_file_range)
619__SYSCALL(__NR_vmsplice, sys_vmsplice) 619__SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 621__SYSCALL(__NR_move_pages, sys_move_pages)
622#define __NR_utimensat 280
623__SYSCALL(__NR_utimensat, sys_utimensat)
622 624
623#ifndef __NO_STUBS 625#ifndef __NO_STUBS
624#define __ARCH_WANT_OLD_READDIR 626#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 5c2672021068..b3b23540f14d 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
234 * Atomically adds @a to @v, so long as it was not @u. 234 * Atomically adds @a to @v, so long as it was not @u.
235 * Returns non-zero if @v was not @u, and zero otherwise. 235 * Returns non-zero if @v was not @u, and zero otherwise.
236 */ 236 */
237#define atomic_add_unless(v, a, u) \ 237static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
238({ \ 238{
239 int c, old; \ 239 int c, old;
240 c = atomic_read(v); \ 240 c = atomic_read(v);
241 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 241 for (;;) {
242 c = old; \ 242 if (unlikely(c == (u)))
243 c != (u); \ 243 break;
244}) 244 old = atomic_cmpxchg((v), c, c + (a));
245 if (likely(old == c))
246 break;
247 c = old;
248 }
249 return c != (u);
250}
251
245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 252#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
246 253
247static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 254static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
diff --git a/include/asm-xtensa/kdebug.h b/include/asm-xtensa/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-xtensa/kdebug.h
@@ -0,0 +1 @@
#include <asm-generic/kdebug.h>
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index 4aaed7fe6cfe..ddc970847ae9 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -183,8 +183,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
183 return tmp; 183 return tmp;
184} 184}
185 185
186#define tas(ptr) (xchg((ptr),1))
187
188#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 186#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
189 187
190/* 188/*
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 9f05279e7dd3..94cc04a143f2 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -4,6 +4,7 @@ header-y += hdlc/
4header-y += isdn/ 4header-y += isdn/
5header-y += nfsd/ 5header-y += nfsd/
6header-y += raid/ 6header-y += raid/
7header-y += spi/
7header-y += sunrpc/ 8header-y += sunrpc/
8header-y += tc_act/ 9header-y += tc_act/
9header-y += netfilter/ 10header-y += netfilter/
@@ -33,7 +34,6 @@ header-y += atmsvc.h
33header-y += atm_zatm.h 34header-y += atm_zatm.h
34header-y += auto_fs4.h 35header-y += auto_fs4.h
35header-y += auxvec.h 36header-y += auxvec.h
36header-y += awe_voice.h
37header-y += ax25.h 37header-y += ax25.h
38header-y += b1lli.h 38header-y += b1lli.h
39header-y += baycom.h 39header-y += baycom.h
@@ -46,6 +46,7 @@ header-y += coda_psdev.h
46header-y += coff.h 46header-y += coff.h
47header-y += comstats.h 47header-y += comstats.h
48header-y += consolemap.h 48header-y += consolemap.h
49header-y += const.h
49header-y += cycx_cfm.h 50header-y += cycx_cfm.h
50header-y += dlm_device.h 51header-y += dlm_device.h
51header-y += dm-ioctl.h 52header-y += dm-ioctl.h
@@ -91,7 +92,6 @@ header-y += ip_mp_alg.h
91header-y += ipsec.h 92header-y += ipsec.h
92header-y += ipx.h 93header-y += ipx.h
93header-y += irda.h 94header-y += irda.h
94header-y += isdn_divertif.h
95header-y += iso_fs.h 95header-y += iso_fs.h
96header-y += ixjuser.h 96header-y += ixjuser.h
97header-y += jffs2.h 97header-y += jffs2.h
@@ -121,6 +121,7 @@ header-y += pci_regs.h
121header-y += personality.h 121header-y += personality.h
122header-y += pfkeyv2.h 122header-y += pfkeyv2.h
123header-y += pg.h 123header-y += pg.h
124header-y += phantom.h
124header-y += pkt_cls.h 125header-y += pkt_cls.h
125header-y += pkt_sched.h 126header-y += pkt_sched.h
126header-y += posix_types.h 127header-y += posix_types.h
@@ -140,6 +141,7 @@ header-y += sockios.h
140header-y += som.h 141header-y += som.h
141header-y += sound.h 142header-y += sound.h
142header-y += synclink.h 143header-y += synclink.h
144header-y += taskstats.h
143header-y += telephony.h 145header-y += telephony.h
144header-y += termios.h 146header-y += termios.h
145header-y += ticable.h 147header-y += ticable.h
@@ -239,6 +241,7 @@ unifdef-y += ipv6.h
239unifdef-y += ipv6_route.h 241unifdef-y += ipv6_route.h
240unifdef-y += isdn.h 242unifdef-y += isdn.h
241unifdef-y += isdnif.h 243unifdef-y += isdnif.h
244unifdef-y += isdn_divertif.h
242unifdef-y += isdn_ppp.h 245unifdef-y += isdn_ppp.h
243unifdef-y += isicom.h 246unifdef-y += isicom.h
244unifdef-y += jbd.h 247unifdef-y += jbd.h
diff --git a/include/linux/awe_voice.h b/include/linux/awe_voice.h
deleted file mode 100644
index bf33f17bea99..000000000000
--- a/include/linux/awe_voice.h
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * include/linux/awe_voice.h
3 *
4 * Voice information definitions for the low level driver for the
5 * AWE32/SB32/AWE64 wave table synth.
6 * version 0.4.4; Jan. 4, 2000
7 *
8 * Copyright (C) 1996-2000 Takashi Iwai
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#ifndef AWE_VOICE_H
26#define AWE_VOICE_H
27
28#ifndef SAMPLE_TYPE_AWE32
29#define SAMPLE_TYPE_AWE32 0x20
30#endif
31
32#define _LINUX_PATCHKEY_H_INDIRECT
33#include <linux/patchkey.h>
34#undef _LINUX_PATCHKEY_H_INDIRECT
35
36/*----------------------------------------------------------------
37 * patch information record
38 *----------------------------------------------------------------*/
39
40/* patch interface header: 16 bytes */
41typedef struct awe_patch_info {
42 short key; /* use AWE_PATCH here */
43#define AWE_PATCH _PATCHKEY(0x07)
44
45 short device_no; /* synthesizer number */
46 unsigned short sf_id; /* file id (should be zero) */
47 short optarg; /* optional argument */
48 int len; /* data length (without this header) */
49
50 short type; /* patch operation type */
51#define AWE_LOAD_INFO 0 /* awe_voice_rec */
52#define AWE_LOAD_DATA 1 /* awe_sample_info */
53#define AWE_OPEN_PATCH 2 /* awe_open_parm */
54#define AWE_CLOSE_PATCH 3 /* none */
55#define AWE_UNLOAD_PATCH 4 /* none */
56#define AWE_REPLACE_DATA 5 /* awe_sample_info (optarg=#channels)*/
57#define AWE_MAP_PRESET 6 /* awe_voice_map */
58/*#define AWE_PROBE_INFO 7*/ /* awe_voice_map (pat only) */
59#define AWE_PROBE_DATA 8 /* optarg=sample */
60#define AWE_REMOVE_INFO 9 /* optarg=(bank<<8)|instr */
61#define AWE_LOAD_CHORUS_FX 0x10 /* awe_chorus_fx_rec (optarg=mode) */
62#define AWE_LOAD_REVERB_FX 0x11 /* awe_reverb_fx_rec (optarg=mode) */
63
64 short reserved; /* word alignment data */
65
66 /* the actual patch data begins after this */
67#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
68 char data[0];
69#endif
70} awe_patch_info;
71
72/*#define AWE_PATCH_INFO_SIZE 16*/
73#define AWE_PATCH_INFO_SIZE sizeof(awe_patch_info)
74
75
76/*----------------------------------------------------------------
77 * open patch
78 *----------------------------------------------------------------*/
79
80#define AWE_PATCH_NAME_LEN 32
81
82typedef struct _awe_open_parm {
83 unsigned short type; /* sample type */
84#define AWE_PAT_TYPE_MISC 0
85#define AWE_PAT_TYPE_GM 1
86#define AWE_PAT_TYPE_GS 2
87#define AWE_PAT_TYPE_MT32 3
88#define AWE_PAT_TYPE_XG 4
89#define AWE_PAT_TYPE_SFX 5
90#define AWE_PAT_TYPE_GUS 6
91#define AWE_PAT_TYPE_MAP 7
92
93#define AWE_PAT_LOCKED 0x100 /* lock the samples */
94#define AWE_PAT_SHARED 0x200 /* sample is shared */
95
96 short reserved;
97 char name[AWE_PATCH_NAME_LEN];
98} awe_open_parm;
99
100/*#define AWE_OPEN_PARM_SIZE 28*/
101#define AWE_OPEN_PARM_SIZE sizeof(awe_open_parm)
102
103
104/*----------------------------------------------------------------
105 * raw voice information record
106 *----------------------------------------------------------------*/
107
108/* wave table envelope & effect parameters to control EMU8000 */
109typedef struct _awe_voice_parm {
110 unsigned short moddelay; /* modulation delay (0x8000) */
111 unsigned short modatkhld; /* modulation attack & hold time (0x7f7f) */
112 unsigned short moddcysus; /* modulation decay & sustain (0x7f7f) */
113 unsigned short modrelease; /* modulation release time (0x807f) */
114 short modkeyhold, modkeydecay; /* envelope change per key (not used) */
115 unsigned short voldelay; /* volume delay (0x8000) */
116 unsigned short volatkhld; /* volume attack & hold time (0x7f7f) */
117 unsigned short voldcysus; /* volume decay & sustain (0x7f7f) */
118 unsigned short volrelease; /* volume release time (0x807f) */
119 short volkeyhold, volkeydecay; /* envelope change per key (not used) */
120 unsigned short lfo1delay; /* LFO1 delay (0x8000) */
121 unsigned short lfo2delay; /* LFO2 delay (0x8000) */
122 unsigned short pefe; /* modulation pitch & cutoff (0x0000) */
123 unsigned short fmmod; /* LFO1 pitch & cutoff (0x0000) */
124 unsigned short tremfrq; /* LFO1 volume & freq (0x0000) */
125 unsigned short fm2frq2; /* LFO2 pitch & freq (0x0000) */
126 unsigned char cutoff; /* initial cutoff (0xff) */
127 unsigned char filterQ; /* initial filter Q [0-15] (0x0) */
128 unsigned char chorus; /* chorus send (0x00) */
129 unsigned char reverb; /* reverb send (0x00) */
130 unsigned short reserved[4]; /* not used */
131} awe_voice_parm;
132
133typedef struct _awe_voice_parm_block {
134 unsigned short moddelay; /* modulation delay (0x8000) */
135 unsigned char modatk, modhld;
136 unsigned char moddcy, modsus;
137 unsigned char modrel, moddummy;
138 short modkeyhold, modkeydecay; /* envelope change per key (not used) */
139 unsigned short voldelay; /* volume delay (0x8000) */
140 unsigned char volatk, volhld;
141 unsigned char voldcy, volsus;
142 unsigned char volrel, voldummy;
143 short volkeyhold, volkeydecay; /* envelope change per key (not used) */
144 unsigned short lfo1delay; /* LFO1 delay (0x8000) */
145 unsigned short lfo2delay; /* LFO2 delay (0x8000) */
146 unsigned char env1fc, env1pit;
147 unsigned char lfo1fc, lfo1pit;
148 unsigned char lfo1freq, lfo1vol;
149 unsigned char lfo2freq, lfo2pit;
150 unsigned char cutoff; /* initial cutoff (0xff) */
151 unsigned char filterQ; /* initial filter Q [0-15] (0x0) */
152 unsigned char chorus; /* chorus send (0x00) */
153 unsigned char reverb; /* reverb send (0x00) */
154 unsigned short reserved[4]; /* not used */
155} awe_voice_parm_block;
156
157#define AWE_VOICE_PARM_SIZE 48
158
159
160/* wave table parameters: 92 bytes */
161typedef struct _awe_voice_info {
162 unsigned short sf_id; /* file id (should be zero) */
163 unsigned short sample; /* sample id */
164 int start, end; /* sample offset correction */
165 int loopstart, loopend; /* loop offset correction */
166 short rate_offset; /* sample rate pitch offset */
167 unsigned short mode; /* sample mode */
168#define AWE_MODE_ROMSOUND 0x8000
169#define AWE_MODE_STEREO 1
170#define AWE_MODE_LOOPING 2
171#define AWE_MODE_NORELEASE 4 /* obsolete */
172#define AWE_MODE_INIT_PARM 8
173
174 short root; /* midi root key */
175 short tune; /* pitch tuning (in cents) */
176 signed char low, high; /* key note range */
177 signed char vellow, velhigh; /* velocity range */
178 signed char fixkey, fixvel; /* fixed key, velocity */
179 signed char pan, fixpan; /* panning, fixed panning */
180 short exclusiveClass; /* exclusive class (0 = none) */
181 unsigned char amplitude; /* sample volume (127 max) */
182 unsigned char attenuation; /* attenuation (0.375dB) */
183 short scaleTuning; /* pitch scale tuning(%), normally 100 */
184 awe_voice_parm parm; /* voice envelope parameters */
185 short index; /* internal index (set by driver) */
186} awe_voice_info;
187
188/*#define AWE_VOICE_INFO_SIZE 92*/
189#define AWE_VOICE_INFO_SIZE sizeof(awe_voice_info)
190
191/*----------------------------------------------------------------*/
192
193/* The info entry of awe_voice_rec is changed from 0 to 1
194 * for some compilers refusing zero size array.
195 * Due to this change, sizeof(awe_voice_rec) becomes different
196 * from older versions.
197 * Use AWE_VOICE_REC_SIZE instead.
198 */
199
200/* instrument info header: 4 bytes */
201typedef struct _awe_voice_rec_hdr {
202 unsigned char bank; /* midi bank number */
203 unsigned char instr; /* midi preset number */
204 char nvoices; /* number of voices */
205 char write_mode; /* write mode; normally 0 */
206#define AWE_WR_APPEND 0 /* append anyway */
207#define AWE_WR_EXCLUSIVE 1 /* skip if already exists */
208#define AWE_WR_REPLACE 2 /* replace if already exists */
209} awe_voice_rec_hdr;
210
211/*#define AWE_VOICE_REC_SIZE 4*/
212#define AWE_VOICE_REC_SIZE sizeof(awe_voice_rec_hdr)
213
214/* the standard patch structure for one sample */
215typedef struct _awe_voice_rec_patch {
216 awe_patch_info patch;
217 awe_voice_rec_hdr hdr;
218 awe_voice_info info;
219} awe_voice_rec_patch;
220
221
222/* obsolete data type */
223#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
224#define AWE_INFOARRAY_SIZE 0
225#else
226#define AWE_INFOARRAY_SIZE 1
227#endif
228
229typedef struct _awe_voice_rec {
230 unsigned char bank; /* midi bank number */
231 unsigned char instr; /* midi preset number */
232 short nvoices; /* number of voices */
233 /* voice information follows here */
234 awe_voice_info info[AWE_INFOARRAY_SIZE];
235} awe_voice_rec;
236
237
238/*----------------------------------------------------------------
239 * sample wave information
240 *----------------------------------------------------------------*/
241
242/* wave table sample header: 32 bytes */
243typedef struct awe_sample_info {
244 unsigned short sf_id; /* file id (should be zero) */
245 unsigned short sample; /* sample id */
246 int start, end; /* start & end offset */
247 int loopstart, loopend; /* loop start & end offset */
248 int size; /* size (0 = ROM) */
249 short checksum_flag; /* use check sum = 1 */
250 unsigned short mode_flags; /* mode flags */
251#define AWE_SAMPLE_8BITS 1 /* wave data is 8bits */
252#define AWE_SAMPLE_UNSIGNED 2 /* wave data is unsigned */
253#define AWE_SAMPLE_NO_BLANK 4 /* no blank loop is attached */
254#define AWE_SAMPLE_SINGLESHOT 8 /* single-shot w/o loop */
255#define AWE_SAMPLE_BIDIR_LOOP 16 /* bidirectional looping */
256#define AWE_SAMPLE_STEREO_LEFT 32 /* stereo left sound */
257#define AWE_SAMPLE_STEREO_RIGHT 64 /* stereo right sound */
258#define AWE_SAMPLE_REVERSE_LOOP 128 /* reverse looping */
259 unsigned int checksum; /* check sum */
260#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
261 unsigned short data[0]; /* sample data follows here */
262#endif
263} awe_sample_info;
264
265/*#define AWE_SAMPLE_INFO_SIZE 32*/
266#define AWE_SAMPLE_INFO_SIZE sizeof(awe_sample_info)
267
268
269/*----------------------------------------------------------------
270 * voice preset mapping
271 *----------------------------------------------------------------*/
272
273typedef struct awe_voice_map {
274 int map_bank, map_instr, map_key; /* key = -1 means all keys */
275 int src_bank, src_instr, src_key;
276} awe_voice_map;
277
278#define AWE_VOICE_MAP_SIZE sizeof(awe_voice_map)
279
280
281/*----------------------------------------------------------------
282 * awe hardware controls
283 *----------------------------------------------------------------*/
284
285#define _AWE_DEBUG_MODE 0x00
286#define _AWE_REVERB_MODE 0x01
287#define _AWE_CHORUS_MODE 0x02
288#define _AWE_REMOVE_LAST_SAMPLES 0x03
289#define _AWE_INITIALIZE_CHIP 0x04
290#define _AWE_SEND_EFFECT 0x05
291#define _AWE_TERMINATE_CHANNEL 0x06
292#define _AWE_TERMINATE_ALL 0x07
293#define _AWE_INITIAL_VOLUME 0x08
294#define _AWE_INITIAL_ATTEN _AWE_INITIAL_VOLUME
295#define _AWE_RESET_CHANNEL 0x09
296#define _AWE_CHANNEL_MODE 0x0a
297#define _AWE_DRUM_CHANNELS 0x0b
298#define _AWE_MISC_MODE 0x0c
299#define _AWE_RELEASE_ALL 0x0d
300#define _AWE_NOTEOFF_ALL 0x0e
301#define _AWE_CHN_PRESSURE 0x0f
302/*#define _AWE_GET_CURRENT_MODE 0x10*/
303#define _AWE_EQUALIZER 0x11
304/*#define _AWE_GET_MISC_MODE 0x12*/
305/*#define _AWE_GET_FONTINFO 0x13*/
306
307#define _AWE_MODE_FLAG 0x80
308#define _AWE_COOKED_FLAG 0x40 /* not supported */
309#define _AWE_MODE_VALUE_MASK 0x3F
310
311/*----------------------------------------------------------------*/
312
313#define _AWE_SET_CMD(p,dev,voice,cmd,p1,p2) \
314{((char*)(p))[0] = SEQ_PRIVATE;\
315 ((char*)(p))[1] = dev;\
316 ((char*)(p))[2] = _AWE_MODE_FLAG|(cmd);\
317 ((char*)(p))[3] = voice;\
318 ((unsigned short*)(p))[2] = p1;\
319 ((unsigned short*)(p))[3] = p2;}
320
321/* buffered access */
322#define _AWE_CMD(dev, voice, cmd, p1, p2) \
323{_SEQ_NEEDBUF(8);\
324 _AWE_SET_CMD(_seqbuf + _seqbufptr, dev, voice, cmd, p1, p2);\
325 _SEQ_ADVBUF(8);}
326
327/* direct access */
328#define _AWE_CMD_NOW(seqfd,dev,voice,cmd,p1,p2) \
329{struct seq_event_rec tmp;\
330 _AWE_SET_CMD(&tmp, dev, voice, cmd, p1, p2);\
331 ioctl(seqfd, SNDCTL_SEQ_OUTOFBAND, &tmp);}
332
333/*----------------------------------------------------------------*/
334
335/* set debugging mode */
336#define AWE_DEBUG_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_DEBUG_MODE, p1, 0)
337/* set reverb mode; from 0 to 7 */
338#define AWE_REVERB_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_REVERB_MODE, p1, 0)
339/* set chorus mode; from 0 to 7 */
340#define AWE_CHORUS_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_CHORUS_MODE, p1, 0)
341
342/* reset channel */
343#define AWE_RESET_CHANNEL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 0, 0)
344#define AWE_RESET_CONTROL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 1, 0)
345
346/* send an effect to all layers */
347#define AWE_SEND_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,type,value)
348#define AWE_ADD_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x80),value)
349#define AWE_UNSET_EFFECT(dev,voice,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x40),0)
350/* send an effect to a layer */
351#define AWE_SEND_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)),value)
352#define AWE_ADD_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x80),value)
353#define AWE_UNSET_LAYER_EFFECT(dev,voice,layer,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x40),0)
354
355/* terminate sound on the channel/voice */
356#define AWE_TERMINATE_CHANNEL(dev,voice) _AWE_CMD(dev,voice,_AWE_TERMINATE_CHANNEL,0,0)
357/* terminate all sounds */
358#define AWE_TERMINATE_ALL(dev) _AWE_CMD(dev, 0, _AWE_TERMINATE_ALL, 0, 0)
359/* release all sounds (w/o sustain effect) */
360#define AWE_RELEASE_ALL(dev) _AWE_CMD(dev, 0, _AWE_RELEASE_ALL, 0, 0)
361/* note off all sounds (w sustain effect) */
362#define AWE_NOTEOFF_ALL(dev) _AWE_CMD(dev, 0, _AWE_NOTEOFF_ALL, 0, 0)
363
364/* set initial attenuation */
365#define AWE_INITIAL_VOLUME(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 0)
366#define AWE_INITIAL_ATTEN AWE_INITIAL_VOLUME
367/* relative attenuation */
368#define AWE_SET_ATTEN(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 1)
369
370/* set channel playing mode; mode=0/1/2 */
371#define AWE_SET_CHANNEL_MODE(dev,mode) _AWE_CMD(dev, 0, _AWE_CHANNEL_MODE, mode, 0)
372#define AWE_PLAY_INDIRECT 0 /* indirect voice mode (default) */
373#define AWE_PLAY_MULTI 1 /* multi note voice mode */
374#define AWE_PLAY_DIRECT 2 /* direct single voice mode */
375#define AWE_PLAY_MULTI2 3 /* sequencer2 mode; used internally */
376
377/* set drum channel mask; channels is 32bit long value */
378#define AWE_DRUM_CHANNELS(dev,channels) _AWE_CMD(dev, 0, _AWE_DRUM_CHANNELS, ((channels) & 0xffff), ((channels) >> 16))
379
380/* set bass and treble control; values are from 0 to 11 */
381#define AWE_EQUALIZER(dev,bass,treble) _AWE_CMD(dev, 0, _AWE_EQUALIZER, bass, treble)
382
383/* remove last loaded samples */
384#define AWE_REMOVE_LAST_SAMPLES(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_REMOVE_LAST_SAMPLES, 0, 0)
385/* initialize emu8000 chip */
386#define AWE_INITIALIZE_CHIP(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_INITIALIZE_CHIP, 0, 0)
387
388/* set miscellaneous modes; meta command */
389#define AWE_MISC_MODE(dev,mode,value) _AWE_CMD(dev, 0, _AWE_MISC_MODE, mode, value)
390/* exclusive sound off; 1=off */
391#define AWE_EXCLUSIVE_SOUND(dev,mode) AWE_MISC_MODE(dev,AWE_MD_EXCLUSIVE_SOUND,mode)
392/* default GUS bank number */
393#define AWE_SET_GUS_BANK(dev,bank) AWE_MISC_MODE(dev,AWE_MD_GUS_BANK,bank)
394/* change panning position in realtime; 0=don't 1=do */
395#define AWE_REALTIME_PAN(dev,mode) AWE_MISC_MODE(dev,AWE_MD_REALTIME_PAN,mode)
396
397/* extended pressure controls; not portable with other sound drivers */
398#define AWE_KEY_PRESSURE(dev,ch,note,vel) SEQ_START_NOTE(dev,ch,(note)+128,vel)
399#define AWE_CHN_PRESSURE(dev,ch,vel) _AWE_CMD(dev,ch,_AWE_CHN_PRESSURE,vel,0)
400
401/*----------------------------------------------------------------*/
402
403/* reverb mode parameters */
404#define AWE_REVERB_ROOM1 0
405#define AWE_REVERB_ROOM2 1
406#define AWE_REVERB_ROOM3 2
407#define AWE_REVERB_HALL1 3
408#define AWE_REVERB_HALL2 4
409#define AWE_REVERB_PLATE 5
410#define AWE_REVERB_DELAY 6
411#define AWE_REVERB_PANNINGDELAY 7
412#define AWE_REVERB_PREDEFINED 8
413/* user can define reverb modes up to 32 */
414#define AWE_REVERB_NUMBERS 32
415
416typedef struct awe_reverb_fx_rec {
417 unsigned short parms[28];
418} awe_reverb_fx_rec;
419
420/*----------------------------------------------------------------*/
421
422/* chorus mode parameters */
423#define AWE_CHORUS_1 0
424#define AWE_CHORUS_2 1
425#define AWE_CHORUS_3 2
426#define AWE_CHORUS_4 3
427#define AWE_CHORUS_FEEDBACK 4
428#define AWE_CHORUS_FLANGER 5
429#define AWE_CHORUS_SHORTDELAY 6
430#define AWE_CHORUS_SHORTDELAY2 7
431#define AWE_CHORUS_PREDEFINED 8
432/* user can define chorus modes up to 32 */
433#define AWE_CHORUS_NUMBERS 32
434
435typedef struct awe_chorus_fx_rec {
436 unsigned short feedback; /* feedback level (0xE600-0xE6FF) */
437 unsigned short delay_offset; /* delay (0-0x0DA3) [1/44100 sec] */
438 unsigned short lfo_depth; /* LFO depth (0xBC00-0xBCFF) */
439 unsigned int delay; /* right delay (0-0xFFFFFFFF) [1/256/44100 sec] */
440 unsigned int lfo_freq; /* LFO freq LFO freq (0-0xFFFFFFFF) */
441} awe_chorus_fx_rec;
442
443/*----------------------------------------------------------------*/
444
445/* misc mode types */
446enum {
447/* 0*/ AWE_MD_EXCLUSIVE_OFF, /* obsolete */
448/* 1*/ AWE_MD_EXCLUSIVE_ON, /* obsolete */
449/* 2*/ AWE_MD_VERSION, /* read only */
450/* 3*/ AWE_MD_EXCLUSIVE_SOUND, /* 0/1: exclusive note on (default=1) */
451/* 4*/ AWE_MD_REALTIME_PAN, /* 0/1: do realtime pan change (default=1) */
452/* 5*/ AWE_MD_GUS_BANK, /* bank number for GUS patches (default=0) */
453/* 6*/ AWE_MD_KEEP_EFFECT, /* 0/1: keep effect values, (default=0) */
454/* 7*/ AWE_MD_ZERO_ATTEN, /* attenuation of max volume (default=32) */
455/* 8*/ AWE_MD_CHN_PRIOR, /* 0/1: set MIDI channel priority mode (default=1) */
456/* 9*/ AWE_MD_MOD_SENSE, /* integer: modwheel sensitivity (def=18) */
457/*10*/ AWE_MD_DEF_PRESET, /* integer: default preset number (def=0) */
458/*11*/ AWE_MD_DEF_BANK, /* integer: default bank number (def=0) */
459/*12*/ AWE_MD_DEF_DRUM, /* integer: default drumset number (def=0) */
460/*13*/ AWE_MD_TOGGLE_DRUM_BANK, /* 0/1: toggle drum flag with bank# (def=0) */
461/*14*/ AWE_MD_NEW_VOLUME_CALC, /* 0/1: volume calculation mode (def=1) */
462/*15*/ AWE_MD_CHORUS_MODE, /* integer: chorus mode (def=2) */
463/*16*/ AWE_MD_REVERB_MODE, /* integer: chorus mode (def=4) */
464/*17*/ AWE_MD_BASS_LEVEL, /* integer: bass level (def=5) */
465/*18*/ AWE_MD_TREBLE_LEVEL, /* integer: treble level (def=9) */
466/*19*/ AWE_MD_DEBUG_MODE, /* integer: debug level (def=0) */
467/*20*/ AWE_MD_PAN_EXCHANGE, /* 0/1: exchange panning direction (def=0) */
468 AWE_MD_END,
469};
470
471/*----------------------------------------------------------------*/
472
473/* effect parameters */
474enum {
475
476/* modulation envelope parameters */
477/* 0*/ AWE_FX_ENV1_DELAY, /* WORD: ENVVAL */
478/* 1*/ AWE_FX_ENV1_ATTACK, /* BYTE: up ATKHLD */
479/* 2*/ AWE_FX_ENV1_HOLD, /* BYTE: lw ATKHLD */
480/* 3*/ AWE_FX_ENV1_DECAY, /* BYTE: lw DCYSUS */
481/* 4*/ AWE_FX_ENV1_RELEASE, /* BYTE: lw DCYSUS */
482/* 5*/ AWE_FX_ENV1_SUSTAIN, /* BYTE: up DCYSUS */
483/* 6*/ AWE_FX_ENV1_PITCH, /* BYTE: up PEFE */
484/* 7*/ AWE_FX_ENV1_CUTOFF, /* BYTE: lw PEFE */
485
486/* volume envelope parameters */
487/* 8*/ AWE_FX_ENV2_DELAY, /* WORD: ENVVOL */
488/* 9*/ AWE_FX_ENV2_ATTACK, /* BYTE: up ATKHLDV */
489/*10*/ AWE_FX_ENV2_HOLD, /* BYTE: lw ATKHLDV */
490/*11*/ AWE_FX_ENV2_DECAY, /* BYTE: lw DCYSUSV */
491/*12*/ AWE_FX_ENV2_RELEASE, /* BYTE: lw DCYSUSV */
492/*13*/ AWE_FX_ENV2_SUSTAIN, /* BYTE: up DCYSUSV */
493
494/* LFO1 (tremolo & vibrato) parameters */
495/*14*/ AWE_FX_LFO1_DELAY, /* WORD: LFO1VAL */
496/*15*/ AWE_FX_LFO1_FREQ, /* BYTE: lo TREMFRQ */
497/*16*/ AWE_FX_LFO1_VOLUME, /* BYTE: up TREMFRQ */
498/*17*/ AWE_FX_LFO1_PITCH, /* BYTE: up FMMOD */
499/*18*/ AWE_FX_LFO1_CUTOFF, /* BYTE: lo FMMOD */
500
501/* LFO2 (vibrato) parameters */
502/*19*/ AWE_FX_LFO2_DELAY, /* WORD: LFO2VAL */
503/*20*/ AWE_FX_LFO2_FREQ, /* BYTE: lo FM2FRQ2 */
504/*21*/ AWE_FX_LFO2_PITCH, /* BYTE: up FM2FRQ2 */
505
506/* Other overall effect parameters */
507/*22*/ AWE_FX_INIT_PITCH, /* SHORT: pitch offset */
508/*23*/ AWE_FX_CHORUS, /* BYTE: chorus effects send (0-255) */
509/*24*/ AWE_FX_REVERB, /* BYTE: reverb effects send (0-255) */
510/*25*/ AWE_FX_CUTOFF, /* BYTE: up IFATN */
511/*26*/ AWE_FX_FILTERQ, /* BYTE: up CCCA */
512
513/* Sample / loop offset changes */
514/*27*/ AWE_FX_SAMPLE_START, /* SHORT: offset */
515/*28*/ AWE_FX_LOOP_START, /* SHORT: offset */
516/*29*/ AWE_FX_LOOP_END, /* SHORT: offset */
517/*30*/ AWE_FX_COARSE_SAMPLE_START, /* SHORT: upper word offset */
518/*31*/ AWE_FX_COARSE_LOOP_START, /* SHORT: upper word offset */
519/*32*/ AWE_FX_COARSE_LOOP_END, /* SHORT: upper word offset */
520/*33*/ AWE_FX_ATTEN, /* BYTE: lo IFATN */
521
522 AWE_FX_END,
523};
524
525#endif /* AWE_VOICE_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index e86e4a938373..3dc715b02500 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -124,19 +124,8 @@
124#define be32_to_cpus __be32_to_cpus 124#define be32_to_cpus __be32_to_cpus
125#define cpu_to_be16s __cpu_to_be16s 125#define cpu_to_be16s __cpu_to_be16s
126#define be16_to_cpus __be16_to_cpus 126#define be16_to_cpus __be16_to_cpus
127#endif
128 127
129
130#if defined(__KERNEL__)
131/* 128/*
132 * Handle ntohl and suches. These have various compatibility
133 * issues - like we want to give the prototype even though we
134 * also have a macro for them in case some strange program
135 * wants to take the address of the thing or something..
136 *
137 * Note that these used to return a "long" in libc5, even though
138 * long is often 64-bit these days.. Thus the casts.
139 *
140 * They have to be macros in order to do the constant folding 129 * They have to be macros in order to do the constant folding
141 * correctly - if the argument passed into a inline function 130 * correctly - if the argument passed into a inline function
142 * it is no longer constant according to gcc.. 131 * it is no longer constant according to gcc..
@@ -147,17 +136,6 @@
147#undef htonl 136#undef htonl
148#undef htons 137#undef htons
149 138
150/*
151 * Do the prototypes. Somebody might want to take the
152 * address or some such sick thing..
153 */
154extern __u32 ntohl(__be32);
155extern __be32 htonl(__u32);
156extern __u16 ntohs(__be16);
157extern __be16 htons(__u16);
158
159#if defined(__GNUC__) && defined(__OPTIMIZE__)
160
161#define ___htonl(x) __cpu_to_be32(x) 139#define ___htonl(x) __cpu_to_be32(x)
162#define ___htons(x) __cpu_to_be16(x) 140#define ___htons(x) __cpu_to_be16(x)
163#define ___ntohl(x) __be32_to_cpu(x) 141#define ___ntohl(x) __be32_to_cpu(x)
@@ -168,9 +146,6 @@ extern __be16 htons(__u16);
168#define htons(x) ___htons(x) 146#define htons(x) ___htons(x)
169#define ntohs(x) ___ntohs(x) 147#define ntohs(x) ___ntohs(x)
170 148
171#endif /* OPTIMIZE */
172
173#endif /* KERNEL */ 149#endif /* KERNEL */
174 150
175
176#endif /* _LINUX_BYTEORDER_GENERIC_H */ 151#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
index 25f7f32883ec..142134ff1645 100644
--- a/include/linux/byteorder/swab.h
+++ b/include/linux/byteorder/swab.h
@@ -10,6 +10,10 @@
10 * separated swab functions from cpu_to_XX, 10 * separated swab functions from cpu_to_XX,
11 * to clean up support for bizarre-endian architectures. 11 * to clean up support for bizarre-endian architectures.
12 * 12 *
13 * Trent Piepho <xyzzy@speakeasy.org> 2007114
14 * make constant-folding work, provide C versions that
15 * gcc can optimize better, explain different versions
16 *
13 * See asm-i386/byteorder.h and suches for examples of how to provide 17 * See asm-i386/byteorder.h and suches for examples of how to provide
14 * architecture-dependent optimized versions 18 * architecture-dependent optimized versions
15 * 19 *
@@ -17,40 +21,66 @@
17 21
18#include <linux/compiler.h> 22#include <linux/compiler.h>
19 23
24/* Functions/macros defined, there are a lot:
25 *
26 * ___swabXX
27 * Generic C versions of the swab functions.
28 *
29 * ___constant_swabXX
30 * C versions that gcc can fold into a compile-time constant when
31 * the argument is a compile-time constant.
32 *
33 * __arch__swabXX[sp]?
34 * Architecture optimized versions of all the swab functions
35 * (including the s and p versions). These can be defined in
36 * asm-arch/byteorder.h. Any which are not, are defined here.
37 * __arch__swabXXs() is defined in terms of __arch__swabXXp(), which
38 * is defined in terms of __arch__swabXX(), which is in turn defined
39 * in terms of ___swabXX(x).
40 * These must be macros. They may be unsafe for arguments with
41 * side-effects.
42 *
43 * __fswabXX
44 * Inline function versions of the __arch__ macros. These _are_ safe
45 * if the arguments have side-effects. Note there are no s and p
46 * versions of these.
47 *
48 * __swabXX[sb]
49 * There are the ones you should actually use. The __swabXX versions
50 * will be a constant given a constant argument and use the arch
51 * specific code (if any) for non-constant arguments. The s and p
52 * versions always use the arch specific code (constant folding
53 * doesn't apply). They are safe to use with arguments with
54 * side-effects.
55 *
56 * swabXX[sb]
57 * Nicknames for __swabXX[sb] to use in the kernel.
58 */
59
20/* casts are necessary for constants, because we never know how for sure 60/* casts are necessary for constants, because we never know how for sure
21 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. 61 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
22 */ 62 */
23#define ___swab16(x) \
24({ \
25 __u16 __x = (x); \
26 ((__u16)( \
27 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
28 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
29})
30 63
31#define ___swab32(x) \ 64static __inline__ __attribute_const__ __u16 ___swab16(__u16 x)
32({ \ 65{
33 __u32 __x = (x); \ 66 return x<<8 | x>>8;
34 ((__u32)( \ 67}
35 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \ 68static __inline__ __attribute_const__ __u32 ___swab32(__u32 x)
36 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \ 69{
37 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \ 70 return x<<24 | x>>24 |
38 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \ 71 (x & (__u32)0x0000ff00UL)<<8 |
39}) 72 (x & (__u32)0x00ff0000UL)>>8;
40 73}
41#define ___swab64(x) \ 74static __inline__ __attribute_const__ __u64 ___swab64(__u64 x)
42({ \ 75{
43 __u64 __x = (x); \ 76 return x<<56 | x>>56 |
44 ((__u64)( \ 77 (x & (__u64)0x000000000000ff00ULL)<<40 |
45 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \ 78 (x & (__u64)0x0000000000ff0000ULL)<<24 |
46 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \ 79 (x & (__u64)0x00000000ff000000ULL)<< 8 |
47 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \ 80 (x & (__u64)0x000000ff00000000ULL)>> 8 |
48 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \ 81 (x & (__u64)0x0000ff0000000000ULL)>>24 |
49 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \ 82 (x & (__u64)0x00ff000000000000ULL)>>40;
50 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ 83}
51 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
52 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
53})
54 84
55#define ___constant_swab16(x) \ 85#define ___constant_swab16(x) \
56 ((__u16)( \ 86 ((__u16)( \
@@ -77,13 +107,13 @@
77 * provide defaults when no architecture-specific optimization is detected 107 * provide defaults when no architecture-specific optimization is detected
78 */ 108 */
79#ifndef __arch__swab16 109#ifndef __arch__swab16
80# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); }) 110# define __arch__swab16(x) ___swab16(x)
81#endif 111#endif
82#ifndef __arch__swab32 112#ifndef __arch__swab32
83# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); }) 113# define __arch__swab32(x) ___swab32(x)
84#endif 114#endif
85#ifndef __arch__swab64 115#ifndef __arch__swab64
86# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); }) 116# define __arch__swab64(x) ___swab64(x)
87#endif 117#endif
88 118
89#ifndef __arch__swab16p 119#ifndef __arch__swab16p
@@ -97,13 +127,13 @@
97#endif 127#endif
98 128
99#ifndef __arch__swab16s 129#ifndef __arch__swab16s
100# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0) 130# define __arch__swab16s(x) ((void)(*(x) = __arch__swab16p(x)))
101#endif 131#endif
102#ifndef __arch__swab32s 132#ifndef __arch__swab32s
103# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0) 133# define __arch__swab32s(x) ((void)(*(x) = __arch__swab32p(x)))
104#endif 134#endif
105#ifndef __arch__swab64s 135#ifndef __arch__swab64s
106# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0) 136# define __arch__swab64s(x) ((void)(*(x) = __arch__swab64p(x)))
107#endif 137#endif
108 138
109 139
@@ -113,15 +143,15 @@
113#if defined(__GNUC__) && defined(__OPTIMIZE__) 143#if defined(__GNUC__) && defined(__OPTIMIZE__)
114# define __swab16(x) \ 144# define __swab16(x) \
115(__builtin_constant_p((__u16)(x)) ? \ 145(__builtin_constant_p((__u16)(x)) ? \
116 ___swab16((x)) : \ 146 ___constant_swab16((x)) : \
117 __fswab16((x))) 147 __fswab16((x)))
118# define __swab32(x) \ 148# define __swab32(x) \
119(__builtin_constant_p((__u32)(x)) ? \ 149(__builtin_constant_p((__u32)(x)) ? \
120 ___swab32((x)) : \ 150 ___constant_swab32((x)) : \
121 __fswab32((x))) 151 __fswab32((x)))
122# define __swab64(x) \ 152# define __swab64(x) \
123(__builtin_constant_p((__u64)(x)) ? \ 153(__builtin_constant_p((__u64)(x)) ? \
124 ___swab64((x)) : \ 154 ___constant_swab64((x)) : \
125 __fswab64((x))) 155 __fswab64((x)))
126#else 156#else
127# define __swab16(x) __fswab16(x) 157# define __swab16(x) __fswab16(x)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 4ea7e7bcfafe..8486e78f7335 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -54,17 +54,17 @@ enum clock_event_nofitiers {
54/** 54/**
55 * struct clock_event_device - clock event device descriptor 55 * struct clock_event_device - clock event device descriptor
56 * @name: ptr to clock event name 56 * @name: ptr to clock event name
57 * @hints: usage hints 57 * @features: features
58 * @max_delta_ns: maximum delta value in ns 58 * @max_delta_ns: maximum delta value in ns
59 * @min_delta_ns: minimum delta value in ns 59 * @min_delta_ns: minimum delta value in ns
60 * @mult: nanosecond to cycles multiplier 60 * @mult: nanosecond to cycles multiplier
61 * @shift: nanoseconds to cycles divisor (power of two) 61 * @shift: nanoseconds to cycles divisor (power of two)
62 * @rating: variable to rate clock event devices 62 * @rating: variable to rate clock event devices
63 * @irq: irq number (only for non cpu local devices) 63 * @irq: IRQ number (only for non CPU local devices)
64 * @cpumask: cpumask to indicate for which cpus this device works 64 * @cpumask: cpumask to indicate for which CPUs this device works
65 * @set_next_event: set next event 65 * @set_next_event: set next event function
66 * @set_mode: set mode function 66 * @set_mode: set mode function
67 * @evthandler: Assigned by the framework to be called by the low 67 * @event_handler: Assigned by the framework to be called by the low
68 * level handler of the event source 68 * level handler of the event source
69 * @broadcast: function to broadcast events 69 * @broadcast: function to broadcast events
70 * @list: list head for the management code 70 * @list: list head for the management code
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index daa4940cc0f1..2665ca04cf8f 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -12,6 +12,7 @@
12#include <linux/timex.h> 12#include <linux/timex.h>
13#include <linux/time.h> 13#include <linux/time.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/cache.h>
15#include <linux/timer.h> 16#include <linux/timer.h>
16#include <asm/div64.h> 17#include <asm/div64.h>
17#include <asm/io.h> 18#include <asm/io.h>
@@ -52,6 +53,9 @@ struct clocksource;
52 * @xtime_interval: Used internally by timekeeping core, please ignore. 53 * @xtime_interval: Used internally by timekeeping core, please ignore.
53 */ 54 */
54struct clocksource { 55struct clocksource {
56 /*
57 * First part of structure is read mostly
58 */
55 char *name; 59 char *name;
56 struct list_head list; 60 struct list_head list;
57 int rating; 61 int rating;
@@ -63,8 +67,15 @@ struct clocksource {
63 cycle_t (*vread)(void); 67 cycle_t (*vread)(void);
64 68
65 /* timekeeping specific data, ignore */ 69 /* timekeeping specific data, ignore */
66 cycle_t cycle_last, cycle_interval; 70 cycle_t cycle_interval;
67 u64 xtime_nsec, xtime_interval; 71 u64 xtime_interval;
72 /*
73 * Second part is written at each timer interrupt
74 * Keep it in a different cache line to dirty no
75 * more than one cache line.
76 */
77 cycle_t cycle_last ____cacheline_aligned_in_smp;
78 u64 xtime_nsec;
68 s64 error; 79 s64 error;
69 80
70#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 81#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
deleted file mode 100644
index c26c3adcfacf..000000000000
--- a/include/linux/compat_ioctl.h
+++ /dev/null
@@ -1,830 +0,0 @@
1/* List here explicitly which ioctl's are known to have
2 * compatible types passed or none at all... Please include
3 * only stuff that is compatible on *all architectures*.
4 */
5
6COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
7COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
8
9/* Big T */
10COMPATIBLE_IOCTL(TCGETA)
11COMPATIBLE_IOCTL(TCSETA)
12COMPATIBLE_IOCTL(TCSETAW)
13COMPATIBLE_IOCTL(TCSETAF)
14COMPATIBLE_IOCTL(TCSBRK)
15ULONG_IOCTL(TCSBRKP)
16COMPATIBLE_IOCTL(TCXONC)
17COMPATIBLE_IOCTL(TCFLSH)
18COMPATIBLE_IOCTL(TCGETS)
19COMPATIBLE_IOCTL(TCSETS)
20COMPATIBLE_IOCTL(TCSETSW)
21COMPATIBLE_IOCTL(TCSETSF)
22COMPATIBLE_IOCTL(TIOCLINUX)
23COMPATIBLE_IOCTL(TIOCSBRK)
24COMPATIBLE_IOCTL(TIOCCBRK)
25ULONG_IOCTL(TIOCMIWAIT)
26COMPATIBLE_IOCTL(TIOCGICOUNT)
27/* Little t */
28COMPATIBLE_IOCTL(TIOCGETD)
29COMPATIBLE_IOCTL(TIOCSETD)
30COMPATIBLE_IOCTL(TIOCEXCL)
31COMPATIBLE_IOCTL(TIOCNXCL)
32COMPATIBLE_IOCTL(TIOCCONS)
33COMPATIBLE_IOCTL(TIOCGSOFTCAR)
34COMPATIBLE_IOCTL(TIOCSSOFTCAR)
35COMPATIBLE_IOCTL(TIOCSWINSZ)
36COMPATIBLE_IOCTL(TIOCGWINSZ)
37COMPATIBLE_IOCTL(TIOCMGET)
38COMPATIBLE_IOCTL(TIOCMBIC)
39COMPATIBLE_IOCTL(TIOCMBIS)
40COMPATIBLE_IOCTL(TIOCMSET)
41COMPATIBLE_IOCTL(TIOCPKT)
42COMPATIBLE_IOCTL(TIOCNOTTY)
43COMPATIBLE_IOCTL(TIOCSTI)
44COMPATIBLE_IOCTL(TIOCOUTQ)
45COMPATIBLE_IOCTL(TIOCSPGRP)
46COMPATIBLE_IOCTL(TIOCGPGRP)
47ULONG_IOCTL(TIOCSCTTY)
48COMPATIBLE_IOCTL(TIOCGPTN)
49COMPATIBLE_IOCTL(TIOCSPTLCK)
50COMPATIBLE_IOCTL(TIOCSERGETLSR)
51/* Little f */
52COMPATIBLE_IOCTL(FIOCLEX)
53COMPATIBLE_IOCTL(FIONCLEX)
54COMPATIBLE_IOCTL(FIOASYNC)
55COMPATIBLE_IOCTL(FIONBIO)
56COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
57/* 0x00 */
58COMPATIBLE_IOCTL(FIBMAP)
59COMPATIBLE_IOCTL(FIGETBSZ)
60/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
61 * Some need translations, these do not.
62 */
63COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
64COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
65COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
66ULONG_IOCTL(HDIO_SET_MULTCOUNT)
67ULONG_IOCTL(HDIO_SET_UNMASKINTR)
68ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
69ULONG_IOCTL(HDIO_SET_32BIT)
70ULONG_IOCTL(HDIO_SET_NOWERR)
71ULONG_IOCTL(HDIO_SET_DMA)
72ULONG_IOCTL(HDIO_SET_PIO_MODE)
73ULONG_IOCTL(HDIO_SET_NICE)
74ULONG_IOCTL(HDIO_SET_WCACHE)
75ULONG_IOCTL(HDIO_SET_ACOUSTIC)
76ULONG_IOCTL(HDIO_SET_BUSSTATE)
77ULONG_IOCTL(HDIO_SET_ADDRESS)
78COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
79/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
80COMPATIBLE_IOCTL(0x330)
81/* 0x02 -- Floppy ioctls */
82COMPATIBLE_IOCTL(FDMSGON)
83COMPATIBLE_IOCTL(FDMSGOFF)
84COMPATIBLE_IOCTL(FDSETEMSGTRESH)
85COMPATIBLE_IOCTL(FDFLUSH)
86COMPATIBLE_IOCTL(FDWERRORCLR)
87COMPATIBLE_IOCTL(FDSETMAXERRS)
88COMPATIBLE_IOCTL(FDGETMAXERRS)
89COMPATIBLE_IOCTL(FDGETDRVTYP)
90COMPATIBLE_IOCTL(FDEJECT)
91COMPATIBLE_IOCTL(FDCLRPRM)
92COMPATIBLE_IOCTL(FDFMTBEG)
93COMPATIBLE_IOCTL(FDFMTEND)
94COMPATIBLE_IOCTL(FDRESET)
95COMPATIBLE_IOCTL(FDTWADDLE)
96COMPATIBLE_IOCTL(FDFMTTRK)
97COMPATIBLE_IOCTL(FDRAWCMD)
98/* 0x12 */
99#ifdef CONFIG_BLOCK
100COMPATIBLE_IOCTL(BLKRASET)
101COMPATIBLE_IOCTL(BLKROSET)
102COMPATIBLE_IOCTL(BLKROGET)
103COMPATIBLE_IOCTL(BLKRRPART)
104COMPATIBLE_IOCTL(BLKFLSBUF)
105COMPATIBLE_IOCTL(BLKSECTSET)
106COMPATIBLE_IOCTL(BLKSSZGET)
107COMPATIBLE_IOCTL(BLKTRACESTART)
108COMPATIBLE_IOCTL(BLKTRACESTOP)
109COMPATIBLE_IOCTL(BLKTRACESETUP)
110COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
111ULONG_IOCTL(BLKRASET)
112ULONG_IOCTL(BLKFRASET)
113#endif
114/* RAID */
115COMPATIBLE_IOCTL(RAID_VERSION)
116COMPATIBLE_IOCTL(GET_ARRAY_INFO)
117COMPATIBLE_IOCTL(GET_DISK_INFO)
118COMPATIBLE_IOCTL(PRINT_RAID_DEBUG)
119COMPATIBLE_IOCTL(RAID_AUTORUN)
120COMPATIBLE_IOCTL(CLEAR_ARRAY)
121COMPATIBLE_IOCTL(ADD_NEW_DISK)
122ULONG_IOCTL(HOT_REMOVE_DISK)
123COMPATIBLE_IOCTL(SET_ARRAY_INFO)
124COMPATIBLE_IOCTL(SET_DISK_INFO)
125COMPATIBLE_IOCTL(WRITE_RAID_INFO)
126COMPATIBLE_IOCTL(UNPROTECT_ARRAY)
127COMPATIBLE_IOCTL(PROTECT_ARRAY)
128ULONG_IOCTL(HOT_ADD_DISK)
129ULONG_IOCTL(SET_DISK_FAULTY)
130COMPATIBLE_IOCTL(RUN_ARRAY)
131COMPATIBLE_IOCTL(STOP_ARRAY)
132COMPATIBLE_IOCTL(STOP_ARRAY_RO)
133COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
134COMPATIBLE_IOCTL(GET_BITMAP_FILE)
135ULONG_IOCTL(SET_BITMAP_FILE)
136/* DM */
137COMPATIBLE_IOCTL(DM_VERSION_32)
138COMPATIBLE_IOCTL(DM_REMOVE_ALL_32)
139COMPATIBLE_IOCTL(DM_LIST_DEVICES_32)
140COMPATIBLE_IOCTL(DM_DEV_CREATE_32)
141COMPATIBLE_IOCTL(DM_DEV_REMOVE_32)
142COMPATIBLE_IOCTL(DM_DEV_RENAME_32)
143COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32)
144COMPATIBLE_IOCTL(DM_DEV_STATUS_32)
145COMPATIBLE_IOCTL(DM_DEV_WAIT_32)
146COMPATIBLE_IOCTL(DM_TABLE_LOAD_32)
147COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32)
148COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
149COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
150COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
151COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
152COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
153COMPATIBLE_IOCTL(DM_VERSION)
154COMPATIBLE_IOCTL(DM_REMOVE_ALL)
155COMPATIBLE_IOCTL(DM_LIST_DEVICES)
156COMPATIBLE_IOCTL(DM_DEV_CREATE)
157COMPATIBLE_IOCTL(DM_DEV_REMOVE)
158COMPATIBLE_IOCTL(DM_DEV_RENAME)
159COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
160COMPATIBLE_IOCTL(DM_DEV_STATUS)
161COMPATIBLE_IOCTL(DM_DEV_WAIT)
162COMPATIBLE_IOCTL(DM_TABLE_LOAD)
163COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
164COMPATIBLE_IOCTL(DM_TABLE_DEPS)
165COMPATIBLE_IOCTL(DM_TABLE_STATUS)
166COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
167COMPATIBLE_IOCTL(DM_TARGET_MSG)
168COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
169/* Big K */
170COMPATIBLE_IOCTL(PIO_FONT)
171COMPATIBLE_IOCTL(GIO_FONT)
172ULONG_IOCTL(KDSIGACCEPT)
173COMPATIBLE_IOCTL(KDGETKEYCODE)
174COMPATIBLE_IOCTL(KDSETKEYCODE)
175ULONG_IOCTL(KIOCSOUND)
176ULONG_IOCTL(KDMKTONE)
177COMPATIBLE_IOCTL(KDGKBTYPE)
178ULONG_IOCTL(KDSETMODE)
179COMPATIBLE_IOCTL(KDGETMODE)
180ULONG_IOCTL(KDSKBMODE)
181COMPATIBLE_IOCTL(KDGKBMODE)
182ULONG_IOCTL(KDSKBMETA)
183COMPATIBLE_IOCTL(KDGKBMETA)
184COMPATIBLE_IOCTL(KDGKBENT)
185COMPATIBLE_IOCTL(KDSKBENT)
186COMPATIBLE_IOCTL(KDGKBSENT)
187COMPATIBLE_IOCTL(KDSKBSENT)
188COMPATIBLE_IOCTL(KDGKBDIACR)
189COMPATIBLE_IOCTL(KDSKBDIACR)
190COMPATIBLE_IOCTL(KDKBDREP)
191COMPATIBLE_IOCTL(KDGKBLED)
192ULONG_IOCTL(KDSKBLED)
193COMPATIBLE_IOCTL(KDGETLED)
194ULONG_IOCTL(KDSETLED)
195COMPATIBLE_IOCTL(GIO_SCRNMAP)
196COMPATIBLE_IOCTL(PIO_SCRNMAP)
197COMPATIBLE_IOCTL(GIO_UNISCRNMAP)
198COMPATIBLE_IOCTL(PIO_UNISCRNMAP)
199COMPATIBLE_IOCTL(PIO_FONTRESET)
200COMPATIBLE_IOCTL(PIO_UNIMAPCLR)
201/* Big S */
202COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
203COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
204COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
205COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
206COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
207COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
208COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
209COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
210/* Big T */
211COMPATIBLE_IOCTL(TUNSETNOCSUM)
212COMPATIBLE_IOCTL(TUNSETDEBUG)
213COMPATIBLE_IOCTL(TUNSETPERSIST)
214COMPATIBLE_IOCTL(TUNSETOWNER)
215/* Big V */
216COMPATIBLE_IOCTL(VT_SETMODE)
217COMPATIBLE_IOCTL(VT_GETMODE)
218COMPATIBLE_IOCTL(VT_GETSTATE)
219COMPATIBLE_IOCTL(VT_OPENQRY)
220ULONG_IOCTL(VT_ACTIVATE)
221ULONG_IOCTL(VT_WAITACTIVE)
222ULONG_IOCTL(VT_RELDISP)
223ULONG_IOCTL(VT_DISALLOCATE)
224COMPATIBLE_IOCTL(VT_RESIZE)
225COMPATIBLE_IOCTL(VT_RESIZEX)
226COMPATIBLE_IOCTL(VT_LOCKSWITCH)
227COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
228COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
229/* Little p (/dev/rtc, /dev/envctrl, etc.) */
230COMPATIBLE_IOCTL(RTC_AIE_ON)
231COMPATIBLE_IOCTL(RTC_AIE_OFF)
232COMPATIBLE_IOCTL(RTC_UIE_ON)
233COMPATIBLE_IOCTL(RTC_UIE_OFF)
234COMPATIBLE_IOCTL(RTC_PIE_ON)
235COMPATIBLE_IOCTL(RTC_PIE_OFF)
236COMPATIBLE_IOCTL(RTC_WIE_ON)
237COMPATIBLE_IOCTL(RTC_WIE_OFF)
238COMPATIBLE_IOCTL(RTC_ALM_SET)
239COMPATIBLE_IOCTL(RTC_ALM_READ)
240COMPATIBLE_IOCTL(RTC_RD_TIME)
241COMPATIBLE_IOCTL(RTC_SET_TIME)
242COMPATIBLE_IOCTL(RTC_WKALM_SET)
243COMPATIBLE_IOCTL(RTC_WKALM_RD)
244/*
245 * These two are only for the sbus rtc driver, but
246 * hwclock tries them on every rtc device first when
247 * running on sparc. On other architectures the entries
248 * are useless but harmless.
249 */
250COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
251COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
252/* Little m */
253COMPATIBLE_IOCTL(MTIOCTOP)
254/* Socket level stuff */
255COMPATIBLE_IOCTL(FIOQSIZE)
256COMPATIBLE_IOCTL(FIOSETOWN)
257COMPATIBLE_IOCTL(SIOCSPGRP)
258COMPATIBLE_IOCTL(FIOGETOWN)
259COMPATIBLE_IOCTL(SIOCGPGRP)
260COMPATIBLE_IOCTL(SIOCATMARK)
261COMPATIBLE_IOCTL(SIOCSIFLINK)
262COMPATIBLE_IOCTL(SIOCSIFENCAP)
263COMPATIBLE_IOCTL(SIOCGIFENCAP)
264COMPATIBLE_IOCTL(SIOCSIFNAME)
265COMPATIBLE_IOCTL(SIOCSARP)
266COMPATIBLE_IOCTL(SIOCGARP)
267COMPATIBLE_IOCTL(SIOCDARP)
268COMPATIBLE_IOCTL(SIOCSRARP)
269COMPATIBLE_IOCTL(SIOCGRARP)
270COMPATIBLE_IOCTL(SIOCDRARP)
271COMPATIBLE_IOCTL(SIOCADDDLCI)
272COMPATIBLE_IOCTL(SIOCDELDLCI)
273COMPATIBLE_IOCTL(SIOCGMIIPHY)
274COMPATIBLE_IOCTL(SIOCGMIIREG)
275COMPATIBLE_IOCTL(SIOCSMIIREG)
276COMPATIBLE_IOCTL(SIOCGIFVLAN)
277COMPATIBLE_IOCTL(SIOCSIFVLAN)
278COMPATIBLE_IOCTL(SIOCBRADDBR)
279COMPATIBLE_IOCTL(SIOCBRDELBR)
280/* SG stuff */
281COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
282COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
283COMPATIBLE_IOCTL(SG_EMULATED_HOST)
284ULONG_IOCTL(SG_SET_TRANSFORM)
285COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
286COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
287COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
288COMPATIBLE_IOCTL(SG_GET_SCSI_ID)
289COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA)
290COMPATIBLE_IOCTL(SG_GET_LOW_DMA)
291COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID)
292COMPATIBLE_IOCTL(SG_GET_PACK_ID)
293COMPATIBLE_IOCTL(SG_GET_NUM_WAITING)
294COMPATIBLE_IOCTL(SG_SET_DEBUG)
295COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE)
296COMPATIBLE_IOCTL(SG_GET_COMMAND_Q)
297COMPATIBLE_IOCTL(SG_SET_COMMAND_Q)
298COMPATIBLE_IOCTL(SG_GET_VERSION_NUM)
299COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN)
300COMPATIBLE_IOCTL(SG_SCSI_RESET)
301COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
302COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
303COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
304/* PPP stuff */
305COMPATIBLE_IOCTL(PPPIOCGFLAGS)
306COMPATIBLE_IOCTL(PPPIOCSFLAGS)
307COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
308COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
309COMPATIBLE_IOCTL(PPPIOCGUNIT)
310COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
311COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
312COMPATIBLE_IOCTL(PPPIOCGMRU)
313COMPATIBLE_IOCTL(PPPIOCSMRU)
314COMPATIBLE_IOCTL(PPPIOCSMAXCID)
315COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
316COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
317COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
318/* PPPIOCSCOMPRESS is translated */
319COMPATIBLE_IOCTL(PPPIOCGNPMODE)
320COMPATIBLE_IOCTL(PPPIOCSNPMODE)
321COMPATIBLE_IOCTL(PPPIOCGDEBUG)
322COMPATIBLE_IOCTL(PPPIOCSDEBUG)
323/* PPPIOCSPASS is translated */
324/* PPPIOCSACTIVE is translated */
325/* PPPIOCGIDLE is translated */
326COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
327COMPATIBLE_IOCTL(PPPIOCATTACH)
328COMPATIBLE_IOCTL(PPPIOCDETACH)
329COMPATIBLE_IOCTL(PPPIOCSMRRU)
330COMPATIBLE_IOCTL(PPPIOCCONNECT)
331COMPATIBLE_IOCTL(PPPIOCDISCONN)
332COMPATIBLE_IOCTL(PPPIOCATTCHAN)
333COMPATIBLE_IOCTL(PPPIOCGCHAN)
334/* PPPOX */
335COMPATIBLE_IOCTL(PPPOEIOCSFWD)
336COMPATIBLE_IOCTL(PPPOEIOCDFWD)
337/* LP */
338COMPATIBLE_IOCTL(LPGETSTATUS)
339/* ppdev */
340COMPATIBLE_IOCTL(PPSETMODE)
341COMPATIBLE_IOCTL(PPRSTATUS)
342COMPATIBLE_IOCTL(PPRCONTROL)
343COMPATIBLE_IOCTL(PPWCONTROL)
344COMPATIBLE_IOCTL(PPFCONTROL)
345COMPATIBLE_IOCTL(PPRDATA)
346COMPATIBLE_IOCTL(PPWDATA)
347COMPATIBLE_IOCTL(PPCLAIM)
348COMPATIBLE_IOCTL(PPRELEASE)
349COMPATIBLE_IOCTL(PPYIELD)
350COMPATIBLE_IOCTL(PPEXCL)
351COMPATIBLE_IOCTL(PPDATADIR)
352COMPATIBLE_IOCTL(PPNEGOT)
353COMPATIBLE_IOCTL(PPWCTLONIRQ)
354COMPATIBLE_IOCTL(PPCLRIRQ)
355COMPATIBLE_IOCTL(PPSETPHASE)
356COMPATIBLE_IOCTL(PPGETMODES)
357COMPATIBLE_IOCTL(PPGETMODE)
358COMPATIBLE_IOCTL(PPGETPHASE)
359COMPATIBLE_IOCTL(PPGETFLAGS)
360COMPATIBLE_IOCTL(PPSETFLAGS)
361/* CDROM stuff */
362COMPATIBLE_IOCTL(CDROMPAUSE)
363COMPATIBLE_IOCTL(CDROMRESUME)
364COMPATIBLE_IOCTL(CDROMPLAYMSF)
365COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
366COMPATIBLE_IOCTL(CDROMREADTOCHDR)
367COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
368COMPATIBLE_IOCTL(CDROMSTOP)
369COMPATIBLE_IOCTL(CDROMSTART)
370COMPATIBLE_IOCTL(CDROMEJECT)
371COMPATIBLE_IOCTL(CDROMVOLCTRL)
372COMPATIBLE_IOCTL(CDROMSUBCHNL)
373ULONG_IOCTL(CDROMEJECT_SW)
374COMPATIBLE_IOCTL(CDROMMULTISESSION)
375COMPATIBLE_IOCTL(CDROM_GET_MCN)
376COMPATIBLE_IOCTL(CDROMRESET)
377COMPATIBLE_IOCTL(CDROMVOLREAD)
378COMPATIBLE_IOCTL(CDROMSEEK)
379COMPATIBLE_IOCTL(CDROMPLAYBLK)
380COMPATIBLE_IOCTL(CDROMCLOSETRAY)
381ULONG_IOCTL(CDROM_SET_OPTIONS)
382ULONG_IOCTL(CDROM_CLEAR_OPTIONS)
383ULONG_IOCTL(CDROM_SELECT_SPEED)
384ULONG_IOCTL(CDROM_SELECT_DISC)
385ULONG_IOCTL(CDROM_MEDIA_CHANGED)
386ULONG_IOCTL(CDROM_DRIVE_STATUS)
387COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
388COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
389ULONG_IOCTL(CDROM_LOCKDOOR)
390ULONG_IOCTL(CDROM_DEBUG)
391COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
392/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
393 * not take a struct cdrom_read, instead they take a struct cdrom_msf
394 * which is compatible.
395 */
396COMPATIBLE_IOCTL(CDROMREADMODE2)
397COMPATIBLE_IOCTL(CDROMREADMODE1)
398COMPATIBLE_IOCTL(CDROMREADRAW)
399COMPATIBLE_IOCTL(CDROMREADCOOKED)
400COMPATIBLE_IOCTL(CDROMREADALL)
401/* DVD ioctls */
402COMPATIBLE_IOCTL(DVD_READ_STRUCT)
403COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
404COMPATIBLE_IOCTL(DVD_AUTH)
405/* pktcdvd */
406COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
407/* Big A */
408/* sparc only */
409/* Big Q for sound/OSS */
410COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
411COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
412COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
413COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
414COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
415COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
416COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
417COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
418COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
419COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
420COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
421COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
422COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
423COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
424COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
425COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
426COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
427COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
428COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
429COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
430COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
431COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
432/* Big T for sound/OSS */
433COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
434COMPATIBLE_IOCTL(SNDCTL_TMR_START)
435COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
436COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
437COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
438COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
439COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
440COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
441/* Little m for sound/OSS */
442COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
443COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
444COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
445/* Big P for sound/OSS */
446COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
447COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
448COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
449COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
450COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
451COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
452COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
453COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
454COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
455COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
456COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
457COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
458COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
459COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
460COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
461COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
462COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
463COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
464COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
465COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
466/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
467/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
468COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
469COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
470COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
471COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
472COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
473COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
474COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
475COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
476/* Big C for sound/OSS */
477COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
478COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
479COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
480COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
481COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
482COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
483COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
484COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
485COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
486COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
487/* Big M for sound/OSS */
488COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
489COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
490COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
491COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
492COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
493COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
494COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
495COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
496COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
497COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
498COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
499COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
500COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
501COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
502COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
503COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
504COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
505COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1))
506COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2))
507COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3))
508COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN))
509COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT))
510COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO))
511COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO))
512COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR))
513COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
514/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
515/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
516COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
517COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
518COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
519COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
520COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
521COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
522COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
523COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
524COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
525COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
526COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
527COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
528COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
529COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
530COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
531COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
532COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
533COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
534COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
535COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
536COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
537COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
538COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1))
539COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2))
540COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3))
541COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN))
542COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT))
543COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO))
544COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO))
545COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR))
546COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
547/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
548/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
549COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
550COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
551COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
552COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
553COMPATIBLE_IOCTL(SOUND_MIXER_AGC)
554COMPATIBLE_IOCTL(SOUND_MIXER_3DSE)
555COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
556COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
557COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
558COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
559COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
560COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
561COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
562COMPATIBLE_IOCTL(OSS_GETVERSION)
563/* AUTOFS */
564ULONG_IOCTL(AUTOFS_IOC_READY)
565ULONG_IOCTL(AUTOFS_IOC_FAIL)
566COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
567COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
568COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
569COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
570COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER)
571COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST)
572COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST)
573COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT)
574/* Raw devices */
575COMPATIBLE_IOCTL(RAW_SETBIND)
576COMPATIBLE_IOCTL(RAW_GETBIND)
577/* SMB ioctls which do not need any translations */
578COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
579/* Little a */
580COMPATIBLE_IOCTL(ATMSIGD_CTRL)
581COMPATIBLE_IOCTL(ATMARPD_CTRL)
582COMPATIBLE_IOCTL(ATMLEC_CTRL)
583COMPATIBLE_IOCTL(ATMLEC_MCAST)
584COMPATIBLE_IOCTL(ATMLEC_DATA)
585COMPATIBLE_IOCTL(ATM_SETSC)
586COMPATIBLE_IOCTL(SIOCSIFATMTCP)
587COMPATIBLE_IOCTL(SIOCMKCLIP)
588COMPATIBLE_IOCTL(ATMARP_MKIP)
589COMPATIBLE_IOCTL(ATMARP_SETENTRY)
590COMPATIBLE_IOCTL(ATMARP_ENCAP)
591COMPATIBLE_IOCTL(ATMTCP_CREATE)
592COMPATIBLE_IOCTL(ATMTCP_REMOVE)
593COMPATIBLE_IOCTL(ATMMPC_CTRL)
594COMPATIBLE_IOCTL(ATMMPC_DATA)
595/* Watchdog */
596COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
597COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
598COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
599COMPATIBLE_IOCTL(WDIOC_GETTEMP)
600COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
601COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
602COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
603COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
604/* Big R */
605COMPATIBLE_IOCTL(RNDGETENTCNT)
606COMPATIBLE_IOCTL(RNDADDTOENTCNT)
607COMPATIBLE_IOCTL(RNDGETPOOL)
608COMPATIBLE_IOCTL(RNDADDENTROPY)
609COMPATIBLE_IOCTL(RNDZAPENTCNT)
610COMPATIBLE_IOCTL(RNDCLEARPOOL)
611/* Bluetooth */
612COMPATIBLE_IOCTL(HCIDEVUP)
613COMPATIBLE_IOCTL(HCIDEVDOWN)
614COMPATIBLE_IOCTL(HCIDEVRESET)
615COMPATIBLE_IOCTL(HCIDEVRESTAT)
616COMPATIBLE_IOCTL(HCIGETDEVLIST)
617COMPATIBLE_IOCTL(HCIGETDEVINFO)
618COMPATIBLE_IOCTL(HCIGETCONNLIST)
619COMPATIBLE_IOCTL(HCIGETCONNINFO)
620COMPATIBLE_IOCTL(HCISETRAW)
621COMPATIBLE_IOCTL(HCISETSCAN)
622COMPATIBLE_IOCTL(HCISETAUTH)
623COMPATIBLE_IOCTL(HCISETENCRYPT)
624COMPATIBLE_IOCTL(HCISETPTYPE)
625COMPATIBLE_IOCTL(HCISETLINKPOL)
626COMPATIBLE_IOCTL(HCISETLINKMODE)
627COMPATIBLE_IOCTL(HCISETACLMTU)
628COMPATIBLE_IOCTL(HCISETSCOMTU)
629COMPATIBLE_IOCTL(HCIINQUIRY)
630COMPATIBLE_IOCTL(HCIUARTSETPROTO)
631COMPATIBLE_IOCTL(HCIUARTGETPROTO)
632COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
633COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
634COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
635COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
636COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
637COMPATIBLE_IOCTL(BNEPCONNADD)
638COMPATIBLE_IOCTL(BNEPCONNDEL)
639COMPATIBLE_IOCTL(BNEPGETCONNLIST)
640COMPATIBLE_IOCTL(BNEPGETCONNINFO)
641COMPATIBLE_IOCTL(CMTPCONNADD)
642COMPATIBLE_IOCTL(CMTPCONNDEL)
643COMPATIBLE_IOCTL(CMTPGETCONNLIST)
644COMPATIBLE_IOCTL(CMTPGETCONNINFO)
645COMPATIBLE_IOCTL(HIDPCONNADD)
646COMPATIBLE_IOCTL(HIDPCONNDEL)
647COMPATIBLE_IOCTL(HIDPGETCONNLIST)
648COMPATIBLE_IOCTL(HIDPGETCONNINFO)
649/* CAPI */
650COMPATIBLE_IOCTL(CAPI_REGISTER)
651COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
652COMPATIBLE_IOCTL(CAPI_GET_VERSION)
653COMPATIBLE_IOCTL(CAPI_GET_SERIAL)
654COMPATIBLE_IOCTL(CAPI_GET_PROFILE)
655COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD)
656COMPATIBLE_IOCTL(CAPI_GET_ERRCODE)
657COMPATIBLE_IOCTL(CAPI_INSTALLED)
658COMPATIBLE_IOCTL(CAPI_GET_FLAGS)
659COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
660COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
661COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
662COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
663/* Siemens Gigaset */
664COMPATIBLE_IOCTL(GIGASET_REDIR)
665COMPATIBLE_IOCTL(GIGASET_CONFIG)
666COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
667COMPATIBLE_IOCTL(GIGASET_VERSION)
668/* Misc. */
669COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
670COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
671COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
672COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
673COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
674COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
675/* USB */
676COMPATIBLE_IOCTL(USBDEVFS_RESETEP)
677COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE)
678COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION)
679COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER)
680COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB)
681COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE)
682COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE)
683COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO)
684COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO)
685COMPATIBLE_IOCTL(USBDEVFS_RESET)
686COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32)
687COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
688COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
689COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
690/* MTD */
691COMPATIBLE_IOCTL(MEMGETINFO)
692COMPATIBLE_IOCTL(MEMERASE)
693COMPATIBLE_IOCTL(MEMLOCK)
694COMPATIBLE_IOCTL(MEMUNLOCK)
695COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
696COMPATIBLE_IOCTL(MEMGETREGIONINFO)
697COMPATIBLE_IOCTL(MEMGETBADBLOCK)
698COMPATIBLE_IOCTL(MEMSETBADBLOCK)
699/* NBD */
700ULONG_IOCTL(NBD_SET_SOCK)
701ULONG_IOCTL(NBD_SET_BLKSIZE)
702ULONG_IOCTL(NBD_SET_SIZE)
703COMPATIBLE_IOCTL(NBD_DO_IT)
704COMPATIBLE_IOCTL(NBD_CLEAR_SOCK)
705COMPATIBLE_IOCTL(NBD_CLEAR_QUE)
706COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
707ULONG_IOCTL(NBD_SET_SIZE_BLOCKS)
708COMPATIBLE_IOCTL(NBD_DISCONNECT)
709/* i2c */
710COMPATIBLE_IOCTL(I2C_SLAVE)
711COMPATIBLE_IOCTL(I2C_SLAVE_FORCE)
712COMPATIBLE_IOCTL(I2C_TENBIT)
713COMPATIBLE_IOCTL(I2C_PEC)
714COMPATIBLE_IOCTL(I2C_RETRIES)
715COMPATIBLE_IOCTL(I2C_TIMEOUT)
716/* wireless */
717COMPATIBLE_IOCTL(SIOCSIWCOMMIT)
718COMPATIBLE_IOCTL(SIOCGIWNAME)
719COMPATIBLE_IOCTL(SIOCSIWNWID)
720COMPATIBLE_IOCTL(SIOCGIWNWID)
721COMPATIBLE_IOCTL(SIOCSIWFREQ)
722COMPATIBLE_IOCTL(SIOCGIWFREQ)
723COMPATIBLE_IOCTL(SIOCSIWMODE)
724COMPATIBLE_IOCTL(SIOCGIWMODE)
725COMPATIBLE_IOCTL(SIOCSIWSENS)
726COMPATIBLE_IOCTL(SIOCGIWSENS)
727COMPATIBLE_IOCTL(SIOCSIWRANGE)
728COMPATIBLE_IOCTL(SIOCSIWPRIV)
729COMPATIBLE_IOCTL(SIOCGIWPRIV)
730COMPATIBLE_IOCTL(SIOCSIWSTATS)
731COMPATIBLE_IOCTL(SIOCGIWSTATS)
732COMPATIBLE_IOCTL(SIOCSIWAP)
733COMPATIBLE_IOCTL(SIOCGIWAP)
734COMPATIBLE_IOCTL(SIOCSIWSCAN)
735COMPATIBLE_IOCTL(SIOCSIWRATE)
736COMPATIBLE_IOCTL(SIOCGIWRATE)
737COMPATIBLE_IOCTL(SIOCSIWRTS)
738COMPATIBLE_IOCTL(SIOCGIWRTS)
739COMPATIBLE_IOCTL(SIOCSIWFRAG)
740COMPATIBLE_IOCTL(SIOCGIWFRAG)
741COMPATIBLE_IOCTL(SIOCSIWTXPOW)
742COMPATIBLE_IOCTL(SIOCGIWTXPOW)
743COMPATIBLE_IOCTL(SIOCSIWRETRY)
744COMPATIBLE_IOCTL(SIOCGIWRETRY)
745COMPATIBLE_IOCTL(SIOCSIWPOWER)
746COMPATIBLE_IOCTL(SIOCGIWPOWER)
747/* hiddev */
748COMPATIBLE_IOCTL(HIDIOCGVERSION)
749COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
750COMPATIBLE_IOCTL(HIDIOCGDEVINFO)
751COMPATIBLE_IOCTL(HIDIOCGSTRING)
752COMPATIBLE_IOCTL(HIDIOCINITREPORT)
753COMPATIBLE_IOCTL(HIDIOCGREPORT)
754COMPATIBLE_IOCTL(HIDIOCSREPORT)
755COMPATIBLE_IOCTL(HIDIOCGREPORTINFO)
756COMPATIBLE_IOCTL(HIDIOCGFIELDINFO)
757COMPATIBLE_IOCTL(HIDIOCGUSAGE)
758COMPATIBLE_IOCTL(HIDIOCSUSAGE)
759COMPATIBLE_IOCTL(HIDIOCGUCODE)
760COMPATIBLE_IOCTL(HIDIOCGFLAG)
761COMPATIBLE_IOCTL(HIDIOCSFLAG)
762COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
763COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
764/* dvb */
765COMPATIBLE_IOCTL(AUDIO_STOP)
766COMPATIBLE_IOCTL(AUDIO_PLAY)
767COMPATIBLE_IOCTL(AUDIO_PAUSE)
768COMPATIBLE_IOCTL(AUDIO_CONTINUE)
769COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE)
770COMPATIBLE_IOCTL(AUDIO_SET_MUTE)
771COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC)
772COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE)
773COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT)
774COMPATIBLE_IOCTL(AUDIO_GET_STATUS)
775COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES)
776COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
777COMPATIBLE_IOCTL(AUDIO_SET_ID)
778COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
779COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
780COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID)
781COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES)
782COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE)
783COMPATIBLE_IOCTL(DMX_START)
784COMPATIBLE_IOCTL(DMX_STOP)
785COMPATIBLE_IOCTL(DMX_SET_FILTER)
786COMPATIBLE_IOCTL(DMX_SET_PES_FILTER)
787COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE)
788COMPATIBLE_IOCTL(DMX_GET_PES_PIDS)
789COMPATIBLE_IOCTL(DMX_GET_CAPS)
790COMPATIBLE_IOCTL(DMX_SET_SOURCE)
791COMPATIBLE_IOCTL(DMX_GET_STC)
792COMPATIBLE_IOCTL(FE_GET_INFO)
793COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD)
794COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD)
795COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY)
796COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST)
797COMPATIBLE_IOCTL(FE_SET_TONE)
798COMPATIBLE_IOCTL(FE_SET_VOLTAGE)
799COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE)
800COMPATIBLE_IOCTL(FE_READ_STATUS)
801COMPATIBLE_IOCTL(FE_READ_BER)
802COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH)
803COMPATIBLE_IOCTL(FE_READ_SNR)
804COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS)
805COMPATIBLE_IOCTL(FE_SET_FRONTEND)
806COMPATIBLE_IOCTL(FE_GET_FRONTEND)
807COMPATIBLE_IOCTL(FE_GET_EVENT)
808COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD)
809COMPATIBLE_IOCTL(VIDEO_STOP)
810COMPATIBLE_IOCTL(VIDEO_PLAY)
811COMPATIBLE_IOCTL(VIDEO_FREEZE)
812COMPATIBLE_IOCTL(VIDEO_CONTINUE)
813COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE)
814COMPATIBLE_IOCTL(VIDEO_SET_BLANK)
815COMPATIBLE_IOCTL(VIDEO_GET_STATUS)
816COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT)
817COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
818COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
819COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
820COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
821COMPATIBLE_IOCTL(VIDEO_SET_ID)
822COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
823COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
824COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM)
825COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT)
826COMPATIBLE_IOCTL(VIDEO_SET_SPU)
827COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
828COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
829COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
830COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
diff --git a/include/linux/console.h b/include/linux/console.h
index de25ee3b7919..62ef6e11d0d2 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -51,7 +51,7 @@ struct consw {
51 int (*con_scrolldelta)(struct vc_data *, int); 51 int (*con_scrolldelta)(struct vc_data *, int);
52 int (*con_set_origin)(struct vc_data *); 52 int (*con_set_origin)(struct vc_data *);
53 void (*con_save_screen)(struct vc_data *); 53 void (*con_save_screen)(struct vc_data *);
54 u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8); 54 u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
55 void (*con_invert_region)(struct vc_data *, u16 *, int); 55 void (*con_invert_region)(struct vc_data *, u16 *, int);
56 u16 *(*con_screen_pos)(struct vc_data *, int); 56 u16 *(*con_screen_pos)(struct vc_data *, int);
57 unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *); 57 unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
@@ -92,9 +92,8 @@ void give_up_console(const struct consw *sw);
92#define CON_BOOT (8) 92#define CON_BOOT (8)
93#define CON_ANYTIME (16) /* Safe to call when cpu is offline */ 93#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
94 94
95struct console 95struct console {
96{ 96 char name[16];
97 char name[8];
98 void (*write)(struct console *, const char *, unsigned); 97 void (*write)(struct console *, const char *, unsigned);
99 int (*read)(struct console *, char *, unsigned); 98 int (*read)(struct console *, char *, unsigned);
100 struct tty_driver *(*device)(struct console *, int *); 99 struct tty_driver *(*device)(struct console *, int *);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index a86162b26c0d..a461f76fb004 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -37,6 +37,7 @@ struct vc_data {
37 unsigned char vc_color; /* Foreground & background */ 37 unsigned char vc_color; /* Foreground & background */
38 unsigned char vc_s_color; /* Saved foreground & background */ 38 unsigned char vc_s_color; /* Saved foreground & background */
39 unsigned char vc_ulcolor; /* Color for underline mode */ 39 unsigned char vc_ulcolor; /* Color for underline mode */
40 unsigned char vc_itcolor;
40 unsigned char vc_halfcolor; /* Color for half intensity mode */ 41 unsigned char vc_halfcolor; /* Color for half intensity mode */
41 /* cursor */ 42 /* cursor */
42 unsigned int vc_cursor_type; 43 unsigned int vc_cursor_type;
@@ -71,10 +72,12 @@ struct vc_data {
71 unsigned int vc_deccolm : 1; /* 80/132 Column Mode */ 72 unsigned int vc_deccolm : 1; /* 80/132 Column Mode */
72 /* attribute flags */ 73 /* attribute flags */
73 unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ 74 unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
75 unsigned int vc_italic:1;
74 unsigned int vc_underline : 1; 76 unsigned int vc_underline : 1;
75 unsigned int vc_blink : 1; 77 unsigned int vc_blink : 1;
76 unsigned int vc_reverse : 1; 78 unsigned int vc_reverse : 1;
77 unsigned int vc_s_intensity : 2; /* saved rendition */ 79 unsigned int vc_s_intensity : 2; /* saved rendition */
80 unsigned int vc_s_italic:1;
78 unsigned int vc_s_underline : 1; 81 unsigned int vc_s_underline : 1;
79 unsigned int vc_s_blink : 1; 82 unsigned int vc_s_blink : 1;
80 unsigned int vc_s_reverse : 1; 83 unsigned int vc_s_reverse : 1;
diff --git a/include/asm-x86_64/const.h b/include/linux/const.h
index 54fb08f3db9b..07b300bfe34b 100644
--- a/include/asm-x86_64/const.h
+++ b/include/linux/const.h
@@ -1,11 +1,11 @@
1/* const.h: Macros for dealing with constants. */ 1/* const.h: Macros for dealing with constants. */
2 2
3#ifndef _X86_64_CONST_H 3#ifndef _LINUX_CONST_H
4#define _X86_64_CONST_H 4#define _LINUX_CONST_H
5 5
6/* Some constant macros are used in both assembler and 6/* Some constant macros are used in both assembler and
7 * C code. Therefore we cannot annotate them always with 7 * C code. Therefore we cannot annotate them always with
8 * 'UL' and other type specificers unilaterally. We 8 * 'UL' and other type specifiers unilaterally. We
9 * use the following macros to deal with this. 9 * use the following macros to deal with this.
10 */ 10 */
11 11
@@ -16,5 +16,4 @@
16#define _AC(X,Y) __AC(X,Y) 16#define _AC(X,Y) __AC(X,Y)
17#endif 17#endif
18 18
19 19#endif /* !(_LINUX_CONST_H) */
20#endif /* !(_X86_64_CONST_H) */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c22b0dfcbcd2..3b2df2523f1d 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -41,6 +41,9 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
41extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); 41extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
42extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); 42extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
43 43
44extern struct sysdev_attribute attr_sched_mc_power_savings;
45extern struct sysdev_attribute attr_sched_smt_power_savings;
46extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
44 47
45#ifdef CONFIG_HOTPLUG_CPU 48#ifdef CONFIG_HOTPLUG_CPU
46extern void unregister_cpu(struct cpu *cpu); 49extern void unregister_cpu(struct cpu *cpu);
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 46d8254c1a79..72aa00cc4b2d 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -67,6 +67,8 @@
67#ifndef _LINUX_CYCLADES_H 67#ifndef _LINUX_CYCLADES_H
68#define _LINUX_CYCLADES_H 68#define _LINUX_CYCLADES_H
69 69
70#include <linux/types.h>
71
70struct cyclades_monitor { 72struct cyclades_monitor {
71 unsigned long int_count; 73 unsigned long int_count;
72 unsigned long char_count; 74 unsigned long char_count;
@@ -108,7 +110,6 @@ struct cyclades_idle_stats {
108#define CYZSETPOLLCYCLE 0x43590e 110#define CYZSETPOLLCYCLE 0x43590e
109#define CYZGETPOLLCYCLE 0x43590f 111#define CYZGETPOLLCYCLE 0x43590f
110#define CYGETCD1400VER 0x435910 112#define CYGETCD1400VER 0x435910
111#define CYGETCARDINFO 0x435911
112#define CYSETWAIT 0x435912 113#define CYSETWAIT 0x435912
113#define CYGETWAIT 0x435913 114#define CYGETWAIT 0x435913
114 115
@@ -149,14 +150,12 @@ struct CYZ_BOOT_CTRL {
149 * architectures and compilers. 150 * architectures and compilers.
150 */ 151 */
151 152
152#if defined(__alpha__) 153#include <asm/types.h>
153typedef unsigned long ucdouble; /* 64 bits, unsigned */ 154
154typedef unsigned int uclong; /* 32 bits, unsigned */ 155typedef __u64 ucdouble; /* 64 bits, unsigned */
155#else 156typedef __u32 uclong; /* 32 bits, unsigned */
156typedef unsigned long uclong; /* 32 bits, unsigned */ 157typedef __u16 ucshort; /* 16 bits, unsigned */
157#endif 158typedef __u8 ucchar; /* 8 bits, unsigned */
158typedef unsigned short ucshort; /* 16 bits, unsigned */
159typedef unsigned char ucchar; /* 8 bits, unsigned */
160 159
161/* 160/*
162 * Memory Window Sizes 161 * Memory Window Sizes
@@ -174,24 +173,24 @@ typedef unsigned char ucchar; /* 8 bits, unsigned */
174 */ 173 */
175 174
176struct CUSTOM_REG { 175struct CUSTOM_REG {
177 uclong fpga_id; /* FPGA Identification Register */ 176 __u32 fpga_id; /* FPGA Identification Register */
178 uclong fpga_version; /* FPGA Version Number Register */ 177 __u32 fpga_version; /* FPGA Version Number Register */
179 uclong cpu_start; /* CPU start Register (write) */ 178 __u32 cpu_start; /* CPU start Register (write) */
180 uclong cpu_stop; /* CPU stop Register (write) */ 179 __u32 cpu_stop; /* CPU stop Register (write) */
181 uclong misc_reg; /* Miscelaneous Register */ 180 __u32 misc_reg; /* Miscelaneous Register */
182 uclong idt_mode; /* IDT mode Register */ 181 __u32 idt_mode; /* IDT mode Register */
183 uclong uart_irq_status; /* UART IRQ status Register */ 182 __u32 uart_irq_status; /* UART IRQ status Register */
184 uclong clear_timer0_irq; /* Clear timer interrupt Register */ 183 __u32 clear_timer0_irq; /* Clear timer interrupt Register */
185 uclong clear_timer1_irq; /* Clear timer interrupt Register */ 184 __u32 clear_timer1_irq; /* Clear timer interrupt Register */
186 uclong clear_timer2_irq; /* Clear timer interrupt Register */ 185 __u32 clear_timer2_irq; /* Clear timer interrupt Register */
187 uclong test_register; /* Test Register */ 186 __u32 test_register; /* Test Register */
188 uclong test_count; /* Test Count Register */ 187 __u32 test_count; /* Test Count Register */
189 uclong timer_select; /* Timer select register */ 188 __u32 timer_select; /* Timer select register */
190 uclong pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */ 189 __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */
191 uclong ram_wait_state; /* RAM wait-state Register */ 190 __u32 ram_wait_state; /* RAM wait-state Register */
192 uclong uart_wait_state; /* UART wait-state Register */ 191 __u32 uart_wait_state; /* UART wait-state Register */
193 uclong timer_wait_state; /* timer wait-state Register */ 192 __u32 timer_wait_state; /* timer wait-state Register */
194 uclong ack_wait_state; /* ACK wait State Register */ 193 __u32 ack_wait_state; /* ACK wait State Register */
195}; 194};
196 195
197/* 196/*
@@ -201,34 +200,34 @@ struct CUSTOM_REG {
201 */ 200 */
202 201
203struct RUNTIME_9060 { 202struct RUNTIME_9060 {
204 uclong loc_addr_range; /* 00h - Local Address Range */ 203 __u32 loc_addr_range; /* 00h - Local Address Range */
205 uclong loc_addr_base; /* 04h - Local Address Base */ 204 __u32 loc_addr_base; /* 04h - Local Address Base */
206 uclong loc_arbitr; /* 08h - Local Arbitration */ 205 __u32 loc_arbitr; /* 08h - Local Arbitration */
207 uclong endian_descr; /* 0Ch - Big/Little Endian Descriptor */ 206 __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */
208 uclong loc_rom_range; /* 10h - Local ROM Range */ 207 __u32 loc_rom_range; /* 10h - Local ROM Range */
209 uclong loc_rom_base; /* 14h - Local ROM Base */ 208 __u32 loc_rom_base; /* 14h - Local ROM Base */
210 uclong loc_bus_descr; /* 18h - Local Bus descriptor */ 209 __u32 loc_bus_descr; /* 18h - Local Bus descriptor */
211 uclong loc_range_mst; /* 1Ch - Local Range for Master to PCI */ 210 __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */
212 uclong loc_base_mst; /* 20h - Local Base for Master PCI */ 211 __u32 loc_base_mst; /* 20h - Local Base for Master PCI */
213 uclong loc_range_io; /* 24h - Local Range for Master IO */ 212 __u32 loc_range_io; /* 24h - Local Range for Master IO */
214 uclong pci_base_mst; /* 28h - PCI Base for Master PCI */ 213 __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */
215 uclong pci_conf_io; /* 2Ch - PCI configuration for Master IO */ 214 __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */
216 uclong filler1; /* 30h */ 215 __u32 filler1; /* 30h */
217 uclong filler2; /* 34h */ 216 __u32 filler2; /* 34h */
218 uclong filler3; /* 38h */ 217 __u32 filler3; /* 38h */
219 uclong filler4; /* 3Ch */ 218 __u32 filler4; /* 3Ch */
220 uclong mail_box_0; /* 40h - Mail Box 0 */ 219 __u32 mail_box_0; /* 40h - Mail Box 0 */
221 uclong mail_box_1; /* 44h - Mail Box 1 */ 220 __u32 mail_box_1; /* 44h - Mail Box 1 */
222 uclong mail_box_2; /* 48h - Mail Box 2 */ 221 __u32 mail_box_2; /* 48h - Mail Box 2 */
223 uclong mail_box_3; /* 4Ch - Mail Box 3 */ 222 __u32 mail_box_3; /* 4Ch - Mail Box 3 */
224 uclong filler5; /* 50h */ 223 __u32 filler5; /* 50h */
225 uclong filler6; /* 54h */ 224 __u32 filler6; /* 54h */
226 uclong filler7; /* 58h */ 225 __u32 filler7; /* 58h */
227 uclong filler8; /* 5Ch */ 226 __u32 filler8; /* 5Ch */
228 uclong pci_doorbell; /* 60h - PCI to Local Doorbell */ 227 __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */
229 uclong loc_doorbell; /* 64h - Local to PCI Doorbell */ 228 __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */
230 uclong intr_ctrl_stat; /* 68h - Interrupt Control/Status */ 229 __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */
231 uclong init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */ 230 __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */
232}; 231};
233 232
234/* Values for the Local Base Address re-map register */ 233/* Values for the Local Base Address re-map register */
@@ -270,8 +269,8 @@ struct RUNTIME_9060 {
270#define ZF_TINACT ZF_TINACT_DEF 269#define ZF_TINACT ZF_TINACT_DEF
271 270
272struct FIRM_ID { 271struct FIRM_ID {
273 uclong signature; /* ZFIRM/U signature */ 272 __u32 signature; /* ZFIRM/U signature */
274 uclong zfwctrl_addr; /* pointer to ZFW_CTRL structure */ 273 __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */
275}; 274};
276 275
277/* Op. System id */ 276/* Op. System id */
@@ -408,24 +407,24 @@ struct FIRM_ID {
408 */ 407 */
409 408
410struct CH_CTRL { 409struct CH_CTRL {
411 uclong op_mode; /* operation mode */ 410 __u32 op_mode; /* operation mode */
412 uclong intr_enable; /* interrupt masking */ 411 __u32 intr_enable; /* interrupt masking */
413 uclong sw_flow; /* SW flow control */ 412 __u32 sw_flow; /* SW flow control */
414 uclong flow_status; /* output flow status */ 413 __u32 flow_status; /* output flow status */
415 uclong comm_baud; /* baud rate - numerically specified */ 414 __u32 comm_baud; /* baud rate - numerically specified */
416 uclong comm_parity; /* parity */ 415 __u32 comm_parity; /* parity */
417 uclong comm_data_l; /* data length/stop */ 416 __u32 comm_data_l; /* data length/stop */
418 uclong comm_flags; /* other flags */ 417 __u32 comm_flags; /* other flags */
419 uclong hw_flow; /* HW flow control */ 418 __u32 hw_flow; /* HW flow control */
420 uclong rs_control; /* RS-232 outputs */ 419 __u32 rs_control; /* RS-232 outputs */
421 uclong rs_status; /* RS-232 inputs */ 420 __u32 rs_status; /* RS-232 inputs */
422 uclong flow_xon; /* xon char */ 421 __u32 flow_xon; /* xon char */
423 uclong flow_xoff; /* xoff char */ 422 __u32 flow_xoff; /* xoff char */
424 uclong hw_overflow; /* hw overflow counter */ 423 __u32 hw_overflow; /* hw overflow counter */
425 uclong sw_overflow; /* sw overflow counter */ 424 __u32 sw_overflow; /* sw overflow counter */
426 uclong comm_error; /* frame/parity error counter */ 425 __u32 comm_error; /* frame/parity error counter */
427 uclong ichar; 426 __u32 ichar;
428 uclong filler[7]; 427 __u32 filler[7];
429}; 428};
430 429
431 430
@@ -435,18 +434,18 @@ struct CH_CTRL {
435 */ 434 */
436 435
437struct BUF_CTRL { 436struct BUF_CTRL {
438 uclong flag_dma; /* buffers are in Host memory */ 437 __u32 flag_dma; /* buffers are in Host memory */
439 uclong tx_bufaddr; /* address of the tx buffer */ 438 __u32 tx_bufaddr; /* address of the tx buffer */
440 uclong tx_bufsize; /* tx buffer size */ 439 __u32 tx_bufsize; /* tx buffer size */
441 uclong tx_threshold; /* tx low water mark */ 440 __u32 tx_threshold; /* tx low water mark */
442 uclong tx_get; /* tail index tx buf */ 441 __u32 tx_get; /* tail index tx buf */
443 uclong tx_put; /* head index tx buf */ 442 __u32 tx_put; /* head index tx buf */
444 uclong rx_bufaddr; /* address of the rx buffer */ 443 __u32 rx_bufaddr; /* address of the rx buffer */
445 uclong rx_bufsize; /* rx buffer size */ 444 __u32 rx_bufsize; /* rx buffer size */
446 uclong rx_threshold; /* rx high water mark */ 445 __u32 rx_threshold; /* rx high water mark */
447 uclong rx_get; /* tail index rx buf */ 446 __u32 rx_get; /* tail index rx buf */
448 uclong rx_put; /* head index rx buf */ 447 __u32 rx_put; /* head index rx buf */
449 uclong filler[5]; /* filler to align structures */ 448 __u32 filler[5]; /* filler to align structures */
450}; 449};
451 450
452/* 451/*
@@ -457,27 +456,27 @@ struct BUF_CTRL {
457struct BOARD_CTRL { 456struct BOARD_CTRL {
458 457
459 /* static info provided by the on-board CPU */ 458 /* static info provided by the on-board CPU */
460 uclong n_channel; /* number of channels */ 459 __u32 n_channel; /* number of channels */
461 uclong fw_version; /* firmware version */ 460 __u32 fw_version; /* firmware version */
462 461
463 /* static info provided by the driver */ 462 /* static info provided by the driver */
464 uclong op_system; /* op_system id */ 463 __u32 op_system; /* op_system id */
465 uclong dr_version; /* driver version */ 464 __u32 dr_version; /* driver version */
466 465
467 /* board control area */ 466 /* board control area */
468 uclong inactivity; /* inactivity control */ 467 __u32 inactivity; /* inactivity control */
469 468
470 /* host to FW commands */ 469 /* host to FW commands */
471 uclong hcmd_channel; /* channel number */ 470 __u32 hcmd_channel; /* channel number */
472 uclong hcmd_param; /* pointer to parameters */ 471 __u32 hcmd_param; /* pointer to parameters */
473 472
474 /* FW to Host commands */ 473 /* FW to Host commands */
475 uclong fwcmd_channel; /* channel number */ 474 __u32 fwcmd_channel; /* channel number */
476 uclong fwcmd_param; /* pointer to parameters */ 475 __u32 fwcmd_param; /* pointer to parameters */
477 uclong zf_int_queue_addr; /* offset for INT_QUEUE structure */ 476 __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */
478 477
479 /* filler so the structures are aligned */ 478 /* filler so the structures are aligned */
480 uclong filler[6]; 479 __u32 filler[6];
481}; 480};
482 481
483/* Host Interrupt Queue */ 482/* Host Interrupt Queue */
@@ -506,11 +505,10 @@ struct ZFW_CTRL {
506/****************** ****************** *******************/ 505/****************** ****************** *******************/
507#endif 506#endif
508 507
508#ifdef __KERNEL__
509
509/* Per card data structure */ 510/* Per card data structure */
510struct resource;
511struct cyclades_card { 511struct cyclades_card {
512 unsigned long base_phys;
513 unsigned long ctl_phys;
514 void __iomem *base_addr; 512 void __iomem *base_addr;
515 void __iomem *ctl_addr; 513 void __iomem *ctl_addr;
516 int irq; 514 int irq;
@@ -519,33 +517,18 @@ struct cyclades_card {
519 int nports; /* Number of ports in the card */ 517 int nports; /* Number of ports in the card */
520 int bus_index; /* address shift - 0 for ISA, 1 for PCI */ 518 int bus_index; /* address shift - 0 for ISA, 1 for PCI */
521 int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ 519 int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
522 struct pci_dev *pdev;
523#ifdef __KERNEL__
524 spinlock_t card_lock; 520 spinlock_t card_lock;
525#else 521 struct cyclades_port *ports;
526 unsigned long filler;
527#endif
528}; 522};
529 523
530struct cyclades_chip {
531 int filler;
532};
533
534
535#ifdef __KERNEL__
536
537/*************************************** 524/***************************************
538 * Memory access functions/macros * 525 * Memory access functions/macros *
539 * (required to support Alpha systems) * 526 * (required to support Alpha systems) *
540 ***************************************/ 527 ***************************************/
541 528
542#define cy_writeb(port,val) {writeb((val),(port)); mb();} 529#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
543#define cy_writew(port,val) {writew((val),(port)); mb();} 530#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
544#define cy_writel(port,val) {writel((val),(port)); mb();} 531#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
545
546#define cy_readb(port) readb(port)
547#define cy_readw(port) readw(port)
548#define cy_readl(port) readl(port)
549 532
550/* 533/*
551 * Statistics counters 534 * Statistics counters
@@ -567,7 +550,7 @@ struct cyclades_icount {
567 550
568struct cyclades_port { 551struct cyclades_port {
569 int magic; 552 int magic;
570 int card; 553 struct cyclades_card *card;
571 int line; 554 int line;
572 int flags; /* defined in tty.h */ 555 int flags; /* defined in tty.h */
573 int type; /* UART type */ 556 int type; /* UART type */
@@ -587,7 +570,6 @@ struct cyclades_port {
587 int close_delay; 570 int close_delay;
588 unsigned short closing_wait; 571 unsigned short closing_wait;
589 unsigned long event; 572 unsigned long event;
590 unsigned long last_active;
591 int count; /* # of fd on device */ 573 int count; /* # of fd on device */
592 int breakon; 574 int breakon;
593 int breakoff; 575 int breakoff;
@@ -598,7 +580,6 @@ struct cyclades_port {
598 int xmit_cnt; 580 int xmit_cnt;
599 int default_threshold; 581 int default_threshold;
600 int default_timeout; 582 int default_timeout;
601 unsigned long jiffies[3];
602 unsigned long rflush_count; 583 unsigned long rflush_count;
603 struct cyclades_monitor mon; 584 struct cyclades_monitor mon;
604 struct cyclades_idle_stats idle_stats; 585 struct cyclades_idle_stats idle_stats;
@@ -606,7 +587,7 @@ struct cyclades_port {
606 struct work_struct tqueue; 587 struct work_struct tqueue;
607 wait_queue_head_t open_wait; 588 wait_queue_head_t open_wait;
608 wait_queue_head_t close_wait; 589 wait_queue_head_t close_wait;
609 wait_queue_head_t shutdown_wait; 590 struct completion shutdown_wait;
610 wait_queue_head_t delta_msr_wait; 591 wait_queue_head_t delta_msr_wait;
611 int throttle; 592 int throttle;
612}; 593};
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 63f64a9a5bf7..aab53df4fafa 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -133,6 +133,7 @@ struct dentry_operations {
133 int (*d_delete)(struct dentry *); 133 int (*d_delete)(struct dentry *);
134 void (*d_release)(struct dentry *); 134 void (*d_release)(struct dentry *);
135 void (*d_iput)(struct dentry *, struct inode *); 135 void (*d_iput)(struct dentry *, struct inode *);
136 char *(*d_dname)(struct dentry *, char *, int);
136}; 137};
137 138
138/* the dentry parameter passed to d_hash and d_compare is the parent 139/* the dentry parameter passed to d_hash and d_compare is the parent
@@ -293,6 +294,11 @@ extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
293/* validate "insecure" dentry pointer */ 294/* validate "insecure" dentry pointer */
294extern int d_validate(struct dentry *, struct dentry *); 295extern int d_validate(struct dentry *, struct dentry *);
295 296
297/*
298 * helper function for dentry_operations.d_dname() members
299 */
300extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
301
296extern char * d_path(struct dentry *, struct vfsmount *, char *, int); 302extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
297 303
298/* Allocation counts.. */ 304/* Allocation counts.. */
diff --git a/include/linux/device.h b/include/linux/device.h
index 6579068134d1..2e1a2988b7e1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -412,12 +412,13 @@ struct device {
412 struct klist_node knode_parent; /* node in sibling list */ 412 struct klist_node knode_parent; /* node in sibling list */
413 struct klist_node knode_driver; 413 struct klist_node knode_driver;
414 struct klist_node knode_bus; 414 struct klist_node knode_bus;
415 struct device * parent; 415 struct device *parent;
416 416
417 struct kobject kobj; 417 struct kobject kobj;
418 char bus_id[BUS_ID_SIZE]; /* position on parent bus */ 418 char bus_id[BUS_ID_SIZE]; /* position on parent bus */
419 struct device_type *type; 419 struct device_type *type;
420 unsigned is_registered:1; 420 unsigned is_registered:1;
421 unsigned uevent_suppress:1;
421 struct device_attribute uevent_attr; 422 struct device_attribute uevent_attr;
422 struct device_attribute *devt_attr; 423 struct device_attribute *devt_attr;
423 424
@@ -458,7 +459,6 @@ struct device {
458 struct class *class; 459 struct class *class;
459 dev_t devt; /* dev_t, creates the sysfs "dev" */ 460 dev_t devt; /* dev_t, creates the sysfs "dev" */
460 struct attribute_group **groups; /* optional groups */ 461 struct attribute_group **groups; /* optional groups */
461 int uevent_suppress;
462 462
463 void (*release)(struct device * dev); 463 void (*release)(struct device * dev);
464}; 464};
diff --git a/include/linux/display.h b/include/linux/display.h
new file mode 100644
index 000000000000..3bf70d639728
--- /dev/null
+++ b/include/linux/display.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2006 James Simmons <jsimmons@infradead.org>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 */
22
23#ifndef _LINUX_DISPLAY_H
24#define _LINUX_DISPLAY_H
25
26#include <linux/device.h>
27
28struct display_device;
29
30/* This structure defines all the properties of a Display. */
31struct display_driver {
32 int (*set_contrast)(struct display_device *, unsigned int);
33 int (*get_contrast)(struct display_device *);
34 void (*suspend)(struct display_device *, pm_message_t state);
35 void (*resume)(struct display_device *);
36 int (*probe)(struct display_device *, void *);
37 int (*remove)(struct display_device *);
38 int max_contrast;
39};
40
41struct display_device {
42 struct module *owner; /* Owner module */
43 struct display_driver *driver;
44 struct device *parent; /* This is the parent */
45 struct device *dev; /* This is this display device */
46 struct mutex lock;
47 void *priv_data;
48 char type[16];
49 char *name;
50 int idx;
51};
52
53extern struct display_device *display_device_register(struct display_driver *driver,
54 struct device *dev, void *devdata);
55extern void display_device_unregister(struct display_device *dev);
56
57extern int probe_edid(struct display_device *dev, void *devdata);
58
59#define to_display_device(obj) container_of(obj, struct display_device, class_dev)
60
61#endif
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
new file mode 100644
index 000000000000..31f6e3c427fb
--- /dev/null
+++ b/include/linux/ds1wm.h
@@ -0,0 +1,11 @@
1/* platform data for the DS1WM driver */
2
3struct ds1wm_platform_data {
4 int bus_shift; /* number of shifts needed to calculate the
5 * offset between DS1WM registers;
6 * e.g. on h5xxx and h2200 this is 2
7 * (registers aligned to 4-byte boundaries),
8 * while on hx4700 this is 1 */
9 void (*enable)(struct platform_device *pdev);
10 void (*disable)(struct platform_device *pdev);
11};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index f8ebd7c1ddb3..0b9579a4cd42 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -213,7 +213,6 @@ typedef struct {
213} efi_config_table_t; 213} efi_config_table_t;
214 214
215#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) 215#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
216#define EFI_SYSTEM_TABLE_REVISION ((1 << 16) | 00)
217 216
218typedef struct { 217typedef struct {
219 efi_table_hdr_t hdr; 218 efi_table_hdr_t hdr;
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 4eb18ac510ae..ece49a804fe1 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -824,6 +824,7 @@ extern int ext3_change_inode_journal_flag(struct inode *, int);
824extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); 824extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
825extern void ext3_truncate (struct inode *); 825extern void ext3_truncate (struct inode *);
826extern void ext3_set_inode_flags(struct inode *); 826extern void ext3_set_inode_flags(struct inode *);
827extern void ext3_get_inode_flags(struct ext3_inode_info *);
827extern void ext3_set_aops(struct inode *inode); 828extern void ext3_set_aops(struct inode *inode);
828 829
829/* ioctl.c */ 830/* ioctl.c */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index be913ec87169..dff7a728948c 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -4,6 +4,8 @@
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/i2c.h> 5#include <linux/i2c.h>
6 6
7struct dentry;
8
7/* Definitions of frame buffers */ 9/* Definitions of frame buffers */
8 10
9#define FB_MAJOR 29 11#define FB_MAJOR 29
@@ -525,12 +527,20 @@ struct fb_cursor_user {
525#define FB_EVENT_MODE_CHANGE_ALL 0x0B 527#define FB_EVENT_MODE_CHANGE_ALL 0x0B
526/* A software display blank change occured */ 528/* A software display blank change occured */
527#define FB_EVENT_CONBLANK 0x0C 529#define FB_EVENT_CONBLANK 0x0C
530/* Get drawing requirements */
531#define FB_EVENT_GET_REQ 0x0D
528 532
529struct fb_event { 533struct fb_event {
530 struct fb_info *info; 534 struct fb_info *info;
531 void *data; 535 void *data;
532}; 536};
533 537
538struct fb_blit_caps {
539 u32 x;
540 u32 y;
541 u32 len;
542 u32 flags;
543};
534 544
535extern int fb_register_client(struct notifier_block *nb); 545extern int fb_register_client(struct notifier_block *nb);
536extern int fb_unregister_client(struct notifier_block *nb); 546extern int fb_unregister_client(struct notifier_block *nb);
@@ -556,11 +566,25 @@ struct fb_pixmap {
556 u32 scan_align; /* alignment per scanline */ 566 u32 scan_align; /* alignment per scanline */
557 u32 access_align; /* alignment per read/write (bits) */ 567 u32 access_align; /* alignment per read/write (bits) */
558 u32 flags; /* see FB_PIXMAP_* */ 568 u32 flags; /* see FB_PIXMAP_* */
569 u32 blit_x; /* supported bit block dimensions (1-32)*/
570 u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
571 /* blit_y = 1 << (height - 1) */
572 /* if 0, will be set to 0xffffffff (all)*/
559 /* access methods */ 573 /* access methods */
560 void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); 574 void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
561 void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); 575 void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
562}; 576};
563 577
578#ifdef CONFIG_FB_DEFERRED_IO
579struct fb_deferred_io {
580 /* delay between mkwrite and deferred handler */
581 unsigned long delay;
582 struct mutex lock; /* mutex that protects the page list */
583 struct list_head pagelist; /* list of touched pages */
584 /* callback */
585 void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
586};
587#endif
564 588
565/* 589/*
566 * Frame buffer operations 590 * Frame buffer operations
@@ -579,8 +603,10 @@ struct fb_ops {
579 /* For framebuffers with strange non linear layouts or that do not 603 /* For framebuffers with strange non linear layouts or that do not
580 * work with normal memory mapped access 604 * work with normal memory mapped access
581 */ 605 */
582 ssize_t (*fb_read)(struct file *file, char __user *buf, size_t count, loff_t *ppos); 606 ssize_t (*fb_read)(struct fb_info *info, char __user *buf,
583 ssize_t (*fb_write)(struct file *file, const char __user *buf, size_t count, loff_t *ppos); 607 size_t count, loff_t *ppos);
608 ssize_t (*fb_write)(struct fb_info *info, const char __user *buf,
609 size_t count, loff_t *ppos);
584 610
585 /* checks var and eventually tweaks it to something supported, 611 /* checks var and eventually tweaks it to something supported,
586 * DO NOT MODIFY PAR */ 612 * DO NOT MODIFY PAR */
@@ -634,10 +660,13 @@ struct fb_ops {
634 660
635 /* restore saved state */ 661 /* restore saved state */
636 void (*fb_restore_state)(struct fb_info *info); 662 void (*fb_restore_state)(struct fb_info *info);
663
664 /* get capability given var */
665 void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
666 struct fb_var_screeninfo *var);
637}; 667};
638 668
639#ifdef CONFIG_FB_TILEBLITTING 669#ifdef CONFIG_FB_TILEBLITTING
640
641#define FB_TILE_CURSOR_NONE 0 670#define FB_TILE_CURSOR_NONE 0
642#define FB_TILE_CURSOR_UNDERLINE 1 671#define FB_TILE_CURSOR_UNDERLINE 1
643#define FB_TILE_CURSOR_LOWER_THIRD 2 672#define FB_TILE_CURSOR_LOWER_THIRD 2
@@ -709,6 +738,8 @@ struct fb_tile_ops {
709 /* cursor */ 738 /* cursor */
710 void (*fb_tilecursor)(struct fb_info *info, 739 void (*fb_tilecursor)(struct fb_info *info,
711 struct fb_tilecursor *cursor); 740 struct fb_tilecursor *cursor);
741 /* get maximum length of the tile map */
742 int (*fb_get_tilemax)(struct fb_info *info);
712}; 743};
713#endif /* CONFIG_FB_TILEBLITTING */ 744#endif /* CONFIG_FB_TILEBLITTING */
714 745
@@ -778,6 +809,10 @@ struct fb_info {
778 struct mutex bl_curve_mutex; 809 struct mutex bl_curve_mutex;
779 u8 bl_curve[FB_BACKLIGHT_LEVELS]; 810 u8 bl_curve[FB_BACKLIGHT_LEVELS];
780#endif 811#endif
812#ifdef CONFIG_FB_DEFERRED_IO
813 struct delayed_work deferred_work;
814 struct fb_deferred_io *fbdefio;
815#endif
781 816
782 struct fb_ops *fbops; 817 struct fb_ops *fbops;
783 struct device *device; /* This is the parent */ 818 struct device *device; /* This is the parent */
@@ -879,6 +914,16 @@ extern int fb_blank(struct fb_info *info, int blank);
879extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 914extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
880extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); 915extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
881extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); 916extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
917/*
918 * Drawing operations where framebuffer is in system RAM
919 */
920extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
921extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
922extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
923extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
924 size_t count, loff_t *ppos);
925extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
926 size_t count, loff_t *ppos);
882 927
883/* drivers/video/fbmem.c */ 928/* drivers/video/fbmem.c */
884extern int register_framebuffer(struct fb_info *fb_info); 929extern int register_framebuffer(struct fb_info *fb_info);
@@ -913,6 +958,12 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
913 } 958 }
914} 959}
915 960
961/* drivers/video/fb_defio.c */
962extern void fb_deferred_io_init(struct fb_info *info);
963extern void fb_deferred_io_cleanup(struct fb_info *info);
964extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
965 int datasync);
966
916/* drivers/video/fbsysfs.c */ 967/* drivers/video/fbsysfs.c */
917extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); 968extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
918extern void framebuffer_release(struct fb_info *info); 969extern void framebuffer_release(struct fb_info *info);
diff --git a/include/linux/font.h b/include/linux/font.h
index 53b129f07f6f..40a24ab41b36 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -49,7 +49,8 @@ extern const struct font_desc *find_font(const char *name);
49 49
50/* Get the default font for a specific screen size */ 50/* Get the default font for a specific screen size */
51 51
52extern const struct font_desc *get_default_font(int xres, int yres); 52extern const struct font_desc *get_default_font(int xres, int yres,
53 u32 font_w, u32 font_h);
53 54
54/* Max. length for the name of a predefined font */ 55/* Max. length for the name of a predefined font */
55#define MAX_FONT_NAME 32 56#define MAX_FONT_NAME 32
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bc6d27cecaac..7cf0c54a46a7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -30,6 +30,7 @@
30#define SEEK_SET 0 /* seek relative to beginning of file */ 30#define SEEK_SET 0 /* seek relative to beginning of file */
31#define SEEK_CUR 1 /* seek relative to current file position */ 31#define SEEK_CUR 1 /* seek relative to current file position */
32#define SEEK_END 2 /* seek relative to end of file */ 32#define SEEK_END 2 /* seek relative to end of file */
33#define SEEK_MAX SEEK_END
33 34
34/* And dynamically-tunable limits and defaults: */ 35/* And dynamically-tunable limits and defaults: */
35struct files_stat_struct { 36struct files_stat_struct {
@@ -91,6 +92,7 @@ extern int dir_notify_enable;
91/* public flags for file_system_type */ 92/* public flags for file_system_type */
92#define FS_REQUIRES_DEV 1 93#define FS_REQUIRES_DEV 1
93#define FS_BINARY_MOUNTDATA 2 94#define FS_BINARY_MOUNTDATA 2
95#define FS_HAS_SUBTYPE 4
94#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ 96#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
95#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() 97#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move()
96 * during rename() internally. 98 * during rename() internally.
@@ -847,11 +849,6 @@ extern int fcntl_getlease(struct file *filp);
847/* fs/sync.c */ 849/* fs/sync.c */
848extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, 850extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
849 loff_t endbyte, unsigned int flags); 851 loff_t endbyte, unsigned int flags);
850static inline int do_sync_file_range(struct file *file, loff_t offset,
851 loff_t endbyte, unsigned int flags)
852{
853 return do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
854}
855 852
856/* fs/locks.c */ 853/* fs/locks.c */
857extern void locks_init_lock(struct file_lock *); 854extern void locks_init_lock(struct file_lock *);
@@ -960,6 +957,12 @@ struct super_block {
960 /* Granularity of c/m/atime in ns. 957 /* Granularity of c/m/atime in ns.
961 Cannot be worse than a second */ 958 Cannot be worse than a second */
962 u32 s_time_gran; 959 u32 s_time_gran;
960
961 /*
962 * Filesystem subtype. If non-empty the filesystem type field
963 * in /proc/mounts will be "type.subtype"
964 */
965 char *s_subtype;
963}; 966};
964 967
965extern struct timespec current_fs_time(struct super_block *sb); 968extern struct timespec current_fs_time(struct super_block *sb);
@@ -1735,6 +1738,8 @@ extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor
1735extern void do_generic_mapping_read(struct address_space *mapping, 1738extern void do_generic_mapping_read(struct address_space *mapping,
1736 struct file_ra_state *, struct file *, 1739 struct file_ra_state *, struct file *,
1737 loff_t *, read_descriptor_t *, read_actor_t); 1740 loff_t *, read_descriptor_t *, read_actor_t);
1741extern int generic_segment_checks(const struct iovec *iov,
1742 unsigned long *nr_segs, size_t *count, int access_flags);
1738 1743
1739/* fs/splice.c */ 1744/* fs/splice.c */
1740extern ssize_t generic_file_splice_read(struct file *, loff_t *, 1745extern ssize_t generic_file_splice_read(struct file *, loff_t *,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 3f153b4e156c..820125c628c1 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -100,6 +100,35 @@ long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
100extern int 100extern int
101handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); 101handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
102 102
103/*
104 * Futexes are matched on equal values of this key.
105 * The key type depends on whether it's a shared or private mapping.
106 * Don't rearrange members without looking at hash_futex().
107 *
108 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
109 * We set bit 0 to indicate if it's an inode-based key.
110 */
111union futex_key {
112 struct {
113 unsigned long pgoff;
114 struct inode *inode;
115 int offset;
116 } shared;
117 struct {
118 unsigned long address;
119 struct mm_struct *mm;
120 int offset;
121 } private;
122 struct {
123 unsigned long word;
124 void *ptr;
125 int offset;
126 } both;
127};
128int get_futex_key(u32 __user *uaddr, union futex_key *key);
129void get_futex_key_refs(union futex_key *key);
130void drop_futex_key_refs(union futex_key *key);
131
103#ifdef CONFIG_FUTEX 132#ifdef CONFIG_FUTEX
104extern void exit_robust_list(struct task_struct *curr); 133extern void exit_robust_list(struct task_struct *curr);
105extern void exit_pi_state_list(struct task_struct *curr); 134extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/init.h b/include/linux/init.h
index dbbdbd1bec77..8bc32bb2fce2 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -77,7 +77,8 @@ extern char *saved_command_line;
77extern unsigned int reset_devices; 77extern unsigned int reset_devices;
78 78
79/* used by init/main.c */ 79/* used by init/main.c */
80extern void setup_arch(char **); 80void setup_arch(char **);
81void prepare_namespace(void);
81 82
82#endif 83#endif
83 84
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2d95ff50e9b..795102309bf1 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -138,7 +138,7 @@ extern struct group_info init_groups;
138 .journal_info = NULL, \ 138 .journal_info = NULL, \
139 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 139 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
140 .fs_excl = ATOMIC_INIT(0), \ 140 .fs_excl = ATOMIC_INIT(0), \
141 .pi_lock = SPIN_LOCK_UNLOCKED, \ 141 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
142 INIT_TRACE_IRQFLAGS \ 142 INIT_TRACE_IRQFLAGS \
143 INIT_LOCKDEP \ 143 INIT_LOCKDEP \
144} 144}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0319f665dd3f..f7b01b9a35b3 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -44,6 +44,9 @@
44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt 44 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
45 * IRQF_PERCPU - Interrupt is per cpu 45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in an shared interrupt is considered for
49 * performance reasons)
47 */ 50 */
48#define IRQF_DISABLED 0x00000020 51#define IRQF_DISABLED 0x00000020
49#define IRQF_SAMPLE_RANDOM 0x00000040 52#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -52,22 +55,29 @@
52#define IRQF_TIMER 0x00000200 55#define IRQF_TIMER 0x00000200
53#define IRQF_PERCPU 0x00000400 56#define IRQF_PERCPU 0x00000400
54#define IRQF_NOBALANCING 0x00000800 57#define IRQF_NOBALANCING 0x00000800
58#define IRQF_IRQPOLL 0x00001000
55 59
56/* 60/*
57 * Migration helpers. Scheduled for removal in 1/2007 61 * Migration helpers. Scheduled for removal in 9/2007
58 * Do not use for new code ! 62 * Do not use for new code !
59 */ 63 */
60#define SA_INTERRUPT IRQF_DISABLED 64static inline
61#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM 65unsigned long __deprecated deprecated_irq_flag(unsigned long flag)
62#define SA_SHIRQ IRQF_SHARED 66{
63#define SA_PROBEIRQ IRQF_PROBE_SHARED 67 return flag;
64#define SA_PERCPU IRQF_PERCPU 68}
65 69
66#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW 70#define SA_INTERRUPT deprecated_irq_flag(IRQF_DISABLED)
67#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH 71#define SA_SAMPLE_RANDOM deprecated_irq_flag(IRQF_SAMPLE_RANDOM)
68#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING 72#define SA_SHIRQ deprecated_irq_flag(IRQF_SHARED)
69#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING 73#define SA_PROBEIRQ deprecated_irq_flag(IRQF_PROBE_SHARED)
70#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK 74#define SA_PERCPU deprecated_irq_flag(IRQF_PERCPU)
75
76#define SA_TRIGGER_LOW deprecated_irq_flag(IRQF_TRIGGER_LOW)
77#define SA_TRIGGER_HIGH deprecated_irq_flag(IRQF_TRIGGER_HIGH)
78#define SA_TRIGGER_FALLING deprecated_irq_flag(IRQF_TRIGGER_FALLING)
79#define SA_TRIGGER_RISING deprecated_irq_flag(IRQF_TRIGGER_RISING)
80#define SA_TRIGGER_MASK deprecated_irq_flag(IRQF_TRIGGER_MASK)
71 81
72typedef irqreturn_t (*irq_handler_t)(int, void *); 82typedef irqreturn_t (*irq_handler_t)(int, void *);
73 83
@@ -83,11 +93,11 @@ struct irqaction {
83}; 93};
84 94
85extern irqreturn_t no_action(int cpl, void *dev_id); 95extern irqreturn_t no_action(int cpl, void *dev_id);
86extern int request_irq(unsigned int, irq_handler_t handler, 96extern int __must_check request_irq(unsigned int, irq_handler_t handler,
87 unsigned long, const char *, void *); 97 unsigned long, const char *, void *);
88extern void free_irq(unsigned int, void *); 98extern void free_irq(unsigned int, void *);
89 99
90extern int devm_request_irq(struct device *dev, unsigned int irq, 100extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
91 irq_handler_t handler, unsigned long irqflags, 101 irq_handler_t handler, unsigned long irqflags,
92 const char *devname, void *dev_id); 102 const char *devname, void *dev_id);
93extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 103extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
diff --git a/include/linux/ioctl32.h b/include/linux/ioctl32.h
deleted file mode 100644
index 948809d99917..000000000000
--- a/include/linux/ioctl32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef IOCTL32_H
2#define IOCTL32_H 1
3
4#include <linux/compiler.h> /* for __deprecated */
5
6struct file;
7
8typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int,
9 unsigned long, struct file *);
10
11struct ioctl_trans {
12 unsigned long cmd;
13 ioctl_trans_handler_t handler;
14 struct ioctl_trans *next;
15};
16
17#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6da6772c19ff..1980867a64a4 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -92,16 +92,19 @@ extern struct ipc_namespace init_ipc_ns;
92 92
93#ifdef CONFIG_SYSVIPC 93#ifdef CONFIG_SYSVIPC
94#define INIT_IPC_NS(ns) .ns = &init_ipc_ns, 94#define INIT_IPC_NS(ns) .ns = &init_ipc_ns,
95extern int copy_ipcs(unsigned long flags, struct task_struct *tsk); 95extern struct ipc_namespace *copy_ipcs(unsigned long flags,
96 struct ipc_namespace *ns);
96#else 97#else
97#define INIT_IPC_NS(ns) 98#define INIT_IPC_NS(ns)
98static inline int copy_ipcs(unsigned long flags, struct task_struct *tsk) 99static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
99{ return 0; } 100 struct ipc_namespace *ns)
101{
102 return ns;
103}
100#endif 104#endif
101 105
102#ifdef CONFIG_IPC_NS 106#ifdef CONFIG_IPC_NS
103extern void free_ipc_ns(struct kref *kref); 107extern void free_ipc_ns(struct kref *kref);
104extern int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns);
105#endif 108#endif
106 109
107static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) 110static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a6899402b522..1695054e8c63 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -147,8 +147,6 @@ struct irq_chip {
147 * @dir: /proc/irq/ procfs entry 147 * @dir: /proc/irq/ procfs entry
148 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP 148 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
149 * @name: flow handler name for /proc/interrupts output 149 * @name: flow handler name for /proc/interrupts output
150 *
151 * Pad this out to 32 bytes for cache and indexing reasons.
152 */ 150 */
153struct irq_desc { 151struct irq_desc {
154 irq_flow_handler_t handle_irq; 152 irq_flow_handler_t handle_irq;
@@ -175,7 +173,7 @@ struct irq_desc {
175 struct proc_dir_entry *dir; 173 struct proc_dir_entry *dir;
176#endif 174#endif
177 const char *name; 175 const char *name;
178} ____cacheline_aligned; 176} ____cacheline_internodealigned_in_smp;
179 177
180extern struct irq_desc irq_desc[NR_IRQS]; 178extern struct irq_desc irq_desc[NR_IRQS];
181 179
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 63bd9cf821a7..5a52f2c94f3f 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -187,7 +187,6 @@ typedef struct {
187#define CDEBUG_SIZE 1024 187#define CDEBUG_SIZE 1024
188#define CDEBUG_GSIZE 4096 188#define CDEBUG_GSIZE 4096
189 189
190_cdebbuf *cdebbuf_alloc(void);
191void cdebbuf_free(_cdebbuf *cdb); 190void cdebbuf_free(_cdebbuf *cdb);
192int cdebug_init(void); 191int cdebug_init(void);
193void cdebug_exit(void); 192void cdebug_exit(void);
diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h
index 0e7e44ce8301..07821ca5955f 100644
--- a/include/linux/isdn_divertif.h
+++ b/include/linux/isdn_divertif.h
@@ -24,6 +24,10 @@
24#define DIVERT_REL_ERR 0x04 /* module not registered */ 24#define DIVERT_REL_ERR 0x04 /* module not registered */
25#define DIVERT_REG_NAME isdn_register_divert 25#define DIVERT_REG_NAME isdn_register_divert
26 26
27#ifdef __KERNEL__
28#include <linux/isdnif.h>
29#include <linux/types.h>
30
27/***************************************************************/ 31/***************************************************************/
28/* structure exchanging data between isdn hl and divert module */ 32/* structure exchanging data between isdn hl and divert module */
29/***************************************************************/ 33/***************************************************************/
@@ -40,3 +44,4 @@ typedef struct
40/* function register */ 44/* function register */
41/*********************/ 45/*********************/
42extern int DIVERT_REG_NAME(isdn_divert_if *); 46extern int DIVERT_REG_NAME(isdn_divert_if *);
47#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 3e3b92dabe3b..12178d2c882b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -30,6 +30,9 @@ extern int sprint_symbol(char *buffer, unsigned long address);
30/* Look up a kernel symbol and print it to the kernel messages. */ 30/* Look up a kernel symbol and print it to the kernel messages. */
31extern void __print_symbol(const char *fmt, unsigned long address); 31extern void __print_symbol(const char *fmt, unsigned long address);
32 32
33int lookup_symbol_name(unsigned long addr, char *symname);
34int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
35
33#else /* !CONFIG_KALLSYMS */ 36#else /* !CONFIG_KALLSYMS */
34 37
35static inline unsigned long kallsyms_lookup_name(const char *name) 38static inline unsigned long kallsyms_lookup_name(const char *name)
@@ -58,6 +61,16 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
58 return 0; 61 return 0;
59} 62}
60 63
64static inline int lookup_symbol_name(unsigned long addr, char *symname)
65{
66 return -ERANGE;
67}
68
69static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
70{
71 return -ERANGE;
72}
73
61/* Stupid that this does nothing, but I didn't create this mess. */ 74/* Stupid that this does nothing, but I didn't create this mess. */
62#define __print_symbol(fmt, addr) 75#define __print_symbol(fmt, addr)
63#endif /*CONFIG_KALLSYMS*/ 76#endif /*CONFIG_KALLSYMS*/
diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h
new file mode 100644
index 000000000000..5db38d6d8b92
--- /dev/null
+++ b/include/linux/kdebug.h
@@ -0,0 +1,20 @@
1#ifndef _LINUX_KDEBUG_H
2#define _LINUX_KDEBUG_H
3
4#include <asm/kdebug.h>
5
6struct die_args {
7 struct pt_regs *regs;
8 const char *str;
9 long err;
10 int trapnr;
11 int signr;
12};
13
14int register_die_notifier(struct notifier_block *nb);
15int unregister_die_notifier(struct notifier_block *nb);
16
17int notify_die(enum die_val val, const char *str,
18 struct pt_regs *regs, long err, int trap, int sig);
19
20#endif /* _LINUX_KDEBUG_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 696e5ec63f77..8c2c7fcd58ce 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -7,6 +7,8 @@
7#include <linux/linkage.h> 7#include <linux/linkage.h>
8#include <linux/compat.h> 8#include <linux/compat.h>
9#include <linux/ioport.h> 9#include <linux/ioport.h>
10#include <linux/elfcore.h>
11#include <linux/elf.h>
10#include <asm/kexec.h> 12#include <asm/kexec.h>
11 13
12/* Verify architecture specific macros are defined */ 14/* Verify architecture specific macros are defined */
@@ -31,6 +33,19 @@
31#error KEXEC_ARCH not defined 33#error KEXEC_ARCH not defined
32#endif 34#endif
33 35
36#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
37#define KEXEC_CORE_NOTE_NAME "CORE"
38#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
39#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
40/*
41 * The per-cpu notes area is a list of notes terminated by a "NULL"
42 * note header. For kdump, the code in vmcore.c runs in the context
43 * of the second kernel to combine them into one note.
44 */
45#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
46 KEXEC_CORE_NOTE_NAME_BYTES + \
47 KEXEC_CORE_NOTE_DESC_BYTES )
48
34/* 49/*
35 * This structure is used to hold the arguments that are used when loading 50 * This structure is used to hold the arguments that are used when loading
36 * kernel binaries. 51 * kernel binaries.
@@ -136,7 +151,7 @@ extern struct kimage *kexec_crash_image;
136/* Location of a reserved region to hold the crash kernel. 151/* Location of a reserved region to hold the crash kernel.
137 */ 152 */
138extern struct resource crashk_res; 153extern struct resource crashk_res;
139typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; 154typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
140extern note_buf_t *crash_notes; 155extern note_buf_t *crash_notes;
141 156
142 157
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 769be39b9681..23adf6075ae4 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -78,7 +78,7 @@ struct kprobe {
78 kprobe_opcode_t *addr; 78 kprobe_opcode_t *addr;
79 79
80 /* Allow user to indicate symbol name of the probe point */ 80 /* Allow user to indicate symbol name of the probe point */
81 char *symbol_name; 81 const char *symbol_name;
82 82
83 /* Offset into the symbol */ 83 /* Offset into the symbol */
84 unsigned int offset; 84 unsigned int offset;
@@ -123,12 +123,18 @@ DECLARE_PER_CPU(struct kprobe *, current_kprobe);
123DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 123DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
124 124
125#ifdef ARCH_SUPPORTS_KRETPROBES 125#ifdef ARCH_SUPPORTS_KRETPROBES
126extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); 126extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
127 struct pt_regs *regs);
128extern int arch_trampoline_kprobe(struct kprobe *p);
127#else /* ARCH_SUPPORTS_KRETPROBES */ 129#else /* ARCH_SUPPORTS_KRETPROBES */
128static inline void arch_prepare_kretprobe(struct kretprobe *rp, 130static inline void arch_prepare_kretprobe(struct kretprobe *rp,
129 struct pt_regs *regs) 131 struct pt_regs *regs)
130{ 132{
131} 133}
134static inline int arch_trampoline_kprobe(struct kprobe *p)
135{
136 return 0;
137}
132#endif /* ARCH_SUPPORTS_KRETPROBES */ 138#endif /* ARCH_SUPPORTS_KRETPROBES */
133/* 139/*
134 * Function-return probe - 140 * Function-return probe -
@@ -157,6 +163,16 @@ struct kretprobe_instance {
157 struct task_struct *task; 163 struct task_struct *task;
158}; 164};
159 165
166static inline void kretprobe_assert(struct kretprobe_instance *ri,
167 unsigned long orig_ret_address, unsigned long trampoline_address)
168{
169 if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
170 printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
171 ri->rp, ri->rp->kp.addr);
172 BUG();
173 }
174}
175
160extern spinlock_t kretprobe_lock; 176extern spinlock_t kretprobe_lock;
161extern struct mutex kprobe_mutex; 177extern struct mutex kprobe_mutex;
162extern int arch_prepare_kprobe(struct kprobe *p); 178extern int arch_prepare_kprobe(struct kprobe *p);
@@ -199,8 +215,6 @@ void jprobe_return(void);
199int register_kretprobe(struct kretprobe *rp); 215int register_kretprobe(struct kretprobe *rp);
200void unregister_kretprobe(struct kretprobe *rp); 216void unregister_kretprobe(struct kretprobe *rp);
201 217
202struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
203void add_rp_inst(struct kretprobe_instance *ri);
204void kprobe_flush_task(struct task_struct *tk); 218void kprobe_flush_task(struct task_struct *tk);
205void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 219void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
206#else /* CONFIG_KPROBES */ 220#else /* CONFIG_KPROBES */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d8cfc72ea9c1..7906d750aa77 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -296,18 +296,8 @@ enum {
296 296
297 /* how hard are we gonna try to probe/recover devices */ 297 /* how hard are we gonna try to probe/recover devices */
298 ATA_PROBE_MAX_TRIES = 3, 298 ATA_PROBE_MAX_TRIES = 3,
299 ATA_EH_RESET_TRIES = 3,
300 ATA_EH_DEV_TRIES = 3, 299 ATA_EH_DEV_TRIES = 3,
301 300
302 /* Drive spinup time (time from power-on to the first D2H FIS)
303 * in msecs - 8s currently. Failing to get ready in this time
304 * isn't critical. It will result in reset failure for
305 * controllers which can't wait for the first D2H FIS. libata
306 * will retry, so it just has to be long enough to spin up
307 * most devices.
308 */
309 ATA_SPINUP_WAIT = 8000,
310
311 /* Horkage types. May be set by libata or controller on drives 301 /* Horkage types. May be set by libata or controller on drives
312 (some horkage may be drive/controller pair dependant */ 302 (some horkage may be drive/controller pair dependant */
313 303
@@ -348,8 +338,9 @@ struct ata_queued_cmd;
348 338
349/* typedefs */ 339/* typedefs */
350typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 340typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
351typedef int (*ata_prereset_fn_t)(struct ata_port *ap); 341typedef int (*ata_prereset_fn_t)(struct ata_port *ap, unsigned long deadline);
352typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); 342typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes,
343 unsigned long deadline);
353typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); 344typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
354 345
355struct ata_ioports { 346struct ata_ioports {
@@ -494,7 +485,6 @@ struct ata_eh_info {
494 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 485 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
495 unsigned int flags; /* ATA_EHI_* flags */ 486 unsigned int flags; /* ATA_EHI_* flags */
496 487
497 unsigned long hotplug_timestamp;
498 unsigned int probe_mask; 488 unsigned int probe_mask;
499 489
500 char desc[ATA_EH_DESC_LEN]; 490 char desc[ATA_EH_DESC_LEN];
@@ -688,13 +678,17 @@ extern void __sata_phy_reset(struct ata_port *ap);
688extern void sata_phy_reset(struct ata_port *ap); 678extern void sata_phy_reset(struct ata_port *ap);
689extern void ata_bus_reset(struct ata_port *ap); 679extern void ata_bus_reset(struct ata_port *ap);
690extern int sata_set_spd(struct ata_port *ap); 680extern int sata_set_spd(struct ata_port *ap);
691extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); 681extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param,
692extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); 682 unsigned long deadline);
693extern int ata_std_prereset(struct ata_port *ap); 683extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param,
694extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 684 unsigned long deadline);
695extern int sata_port_hardreset(struct ata_port *ap, 685extern int ata_std_prereset(struct ata_port *ap, unsigned long deadline);
696 const unsigned long *timing); 686extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
697extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 687 unsigned long deadline);
688extern int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
689 unsigned long deadline);
690extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
691 unsigned long deadline);
698extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 692extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
699extern void ata_port_disable(struct ata_port *); 693extern void ata_port_disable(struct ata_port *);
700extern void ata_std_ports(struct ata_ioports *ioaddr); 694extern void ata_std_ports(struct ata_ioports *ioaddr);
@@ -750,6 +744,7 @@ extern void ata_host_resume(struct ata_host *host);
750extern int ata_ratelimit(void); 744extern int ata_ratelimit(void);
751extern int ata_busy_sleep(struct ata_port *ap, 745extern int ata_busy_sleep(struct ata_port *ap,
752 unsigned long timeout_pat, unsigned long timeout); 746 unsigned long timeout_pat, unsigned long timeout);
747extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
753extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, 748extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
754 void *data, unsigned long delay); 749 void *data, unsigned long delay);
755extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 750extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
@@ -919,12 +914,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
919 914
920static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 915static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
921{ 916{
922 if (ehi->flags & ATA_EHI_HOTPLUGGED)
923 return;
924
925 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 917 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
926 ehi->hotplug_timestamp = jiffies;
927
928 ehi->action |= ATA_EH_SOFTRESET; 918 ehi->action |= ATA_EH_SOFTRESET;
929 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 919 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
930} 920}
diff --git a/include/linux/list.h b/include/linux/list.h
index f9d71eab05ee..9202703be2a4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -426,6 +426,17 @@ static inline void list_splice_init_rcu(struct list_head *list,
426 container_of(ptr, type, member) 426 container_of(ptr, type, member)
427 427
428/** 428/**
429 * list_first_entry - get the first element from a list
430 * @ptr: the list head to take the element from.
431 * @type: the type of the struct this is embedded in.
432 * @member: the name of the list_struct within the struct.
433 *
434 * Note, that list is expected to be not empty.
435 */
436#define list_first_entry(ptr, type, member) \
437 list_entry((ptr)->next, type, member)
438
439/**
429 * list_for_each - iterate over a list 440 * list_for_each - iterate over a list
430 * @pos: the &struct list_head to use as a loop cursor. 441 * @pos: the &struct list_head to use as a loop cursor.
431 * @head: the head for your list. 442 * @head: the head for your list.
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 191a595055f0..0b99b31f017b 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -64,6 +64,8 @@ struct loop_device {
64 wait_queue_head_t lo_event; 64 wait_queue_head_t lo_event;
65 65
66 request_queue_t *lo_queue; 66 request_queue_t *lo_queue;
67 struct gendisk *lo_disk;
68 struct list_head lo_list;
67}; 69};
68 70
69#endif /* __KERNEL__ */ 71#endif /* __KERNEL__ */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index bdc01127dced..580b3f4956ee 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -22,8 +22,15 @@ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
22/* Some RTCs extend the mc146818 register set to support alarms of more 22/* Some RTCs extend the mc146818 register set to support alarms of more
23 * than 24 hours in the future; or dates that include a century code. 23 * than 24 hours in the future; or dates that include a century code.
24 * This platform_data structure can pass this information to the driver. 24 * This platform_data structure can pass this information to the driver.
25 *
26 * Also, some platforms need suspend()/resume() hooks to kick in special
27 * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up
28 * a separate wakeup alarm used by some almost-clone chips.
25 */ 29 */
26struct cmos_rtc_board_info { 30struct cmos_rtc_board_info {
31 void (*wake_on)(struct device *dev);
32 void (*wake_off)(struct device *dev);
33
27 u8 rtc_day_alarm; /* zero, or register index */ 34 u8 rtc_day_alarm; /* zero, or register index */
28 u8 rtc_mon_alarm; /* zero, or register index */ 35 u8 rtc_mon_alarm; /* zero, or register index */
29 u8 rtc_century; /* zero, or register index */ 36 u8 rtc_century; /* zero, or register index */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 4af0b1fc282a..1fa4d9813b31 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -14,10 +14,9 @@ struct mnt_namespace {
14 int event; 14 int event;
15}; 15};
16 16
17extern int copy_mnt_ns(int, struct task_struct *); 17extern struct mnt_namespace *copy_mnt_ns(int, struct mnt_namespace *,
18extern void __put_mnt_ns(struct mnt_namespace *ns);
19extern struct mnt_namespace *dup_mnt_ns(struct task_struct *,
20 struct fs_struct *); 18 struct fs_struct *);
19extern void __put_mnt_ns(struct mnt_namespace *ns);
21 20
22static inline void put_mnt_ns(struct mnt_namespace *ns) 21static inline void put_mnt_ns(struct mnt_namespace *ns)
23{ 22{
diff --git a/include/linux/module.h b/include/linux/module.h
index f0b0faf42d5d..6d3dc9c4ff96 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -370,16 +370,14 @@ struct module *module_text_address(unsigned long addr);
370struct module *__module_text_address(unsigned long addr); 370struct module *__module_text_address(unsigned long addr);
371int is_module_address(unsigned long addr); 371int is_module_address(unsigned long addr);
372 372
373/* Returns module and fills in value, defined and namebuf, or NULL if 373/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
374 symnum out of range. */ 374 symnum out of range. */
375struct module *module_get_kallsym(unsigned int symnum, unsigned long *value, 375int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
376 char *type, char *name, size_t namelen); 376 char *name, char *module_name, int *exported);
377 377
378/* Look for this name: can be of form module:name. */ 378/* Look for this name: can be of form module:name. */
379unsigned long module_kallsyms_lookup_name(const char *name); 379unsigned long module_kallsyms_lookup_name(const char *name);
380 380
381int is_exported(const char *name, const struct module *mod);
382
383extern void __module_put_and_exit(struct module *mod, long code) 381extern void __module_put_and_exit(struct module *mod, long code)
384 __attribute__((noreturn)); 382 __attribute__((noreturn));
385#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); 383#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -456,6 +454,8 @@ const char *module_address_lookup(unsigned long addr,
456 unsigned long *symbolsize, 454 unsigned long *symbolsize,
457 unsigned long *offset, 455 unsigned long *offset,
458 char **modname); 456 char **modname);
457int lookup_module_symbol_name(unsigned long addr, char *symname);
458int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
459 459
460/* For extable.c to search modules' exception tables. */ 460/* For extable.c to search modules' exception tables. */
461const struct exception_table_entry *search_module_extables(unsigned long addr); 461const struct exception_table_entry *search_module_extables(unsigned long addr);
@@ -527,20 +527,24 @@ static inline const char *module_address_lookup(unsigned long addr,
527 return NULL; 527 return NULL;
528} 528}
529 529
530static inline struct module *module_get_kallsym(unsigned int symnum, 530static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
531 unsigned long *value,
532 char *type, char *name,
533 size_t namelen)
534{ 531{
535 return NULL; 532 return -ERANGE;
536} 533}
537 534
538static inline unsigned long module_kallsyms_lookup_name(const char *name) 535static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
539{ 536{
540 return 0; 537 return -ERANGE;
541} 538}
542 539
543static inline int is_exported(const char *name, const struct module *mod) 540static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
541 char *type, char *name,
542 char *module_name, int *exported)
543{
544 return -ERANGE;
545}
546
547static inline unsigned long module_kallsyms_lookup_name(const char *name)
544{ 548{
545 return 0; 549 return 0;
546} 550}
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index fa253fa73aa3..0e09c005dda8 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -205,7 +205,8 @@ struct fat_mount_options {
205 numtail:1, /* Does first alias have a numeric '~1' type tail? */ 205 numtail:1, /* Does first alias have a numeric '~1' type tail? */
206 atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ 206 atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */
207 flush:1, /* write things quickly */ 207 flush:1, /* write things quickly */
208 nocase:1; /* Does this need case conversion? 0=need case conversion*/ 208 nocase:1, /* Does this need case conversion? 0=need case conversion*/
209 usefree:1; /* Use free_clusters for FAT32 */
209}; 210};
210 211
211#define FAT_HASH_BITS 8 212#define FAT_HASH_BITS 8
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c95d5e642548..52b4378311c8 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -82,7 +82,7 @@ struct nfs_server {
82 struct rpc_clnt * client_acl; /* ACL RPC client handle */ 82 struct rpc_clnt * client_acl; /* ACL RPC client handle */
83 struct nfs_iostats * io_stats; /* I/O statistics */ 83 struct nfs_iostats * io_stats; /* I/O statistics */
84 struct backing_dev_info backing_dev_info; 84 struct backing_dev_info backing_dev_info;
85 atomic_t writeback; /* number of writeback pages */ 85 atomic_long_t writeback; /* number of writeback pages */
86 int flags; /* various flags */ 86 int flags; /* various flags */
87 unsigned int caps; /* server capabilities */ 87 unsigned int caps; /* server capabilities */
88 unsigned int rsize; /* read size */ 88 unsigned int rsize; /* read size */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 0b9f0dc30d61..189e0dc993ab 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -31,10 +31,11 @@ struct nsproxy {
31}; 31};
32extern struct nsproxy init_nsproxy; 32extern struct nsproxy init_nsproxy;
33 33
34struct nsproxy *dup_namespaces(struct nsproxy *orig);
35int copy_namespaces(int flags, struct task_struct *tsk); 34int copy_namespaces(int flags, struct task_struct *tsk);
36void get_task_namespaces(struct task_struct *tsk); 35void get_task_namespaces(struct task_struct *tsk);
37void free_nsproxy(struct nsproxy *ns); 36void free_nsproxy(struct nsproxy *ns);
37int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
38 struct fs_struct *);
38 39
39static inline void put_nsproxy(struct nsproxy *ns) 40static inline void put_nsproxy(struct nsproxy *ns)
40{ 41{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b4def5e083ed..8a83537d6978 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -11,6 +11,7 @@
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/bitops.h>
14 15
15/* 16/*
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 17 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
@@ -19,6 +20,16 @@
19#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 20#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 21#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21 22
23static inline void mapping_set_error(struct address_space *mapping, int error)
24{
25 if (error) {
26 if (error == -ENOSPC)
27 set_bit(AS_ENOSPC, &mapping->flags);
28 else
29 set_bit(AS_EIO, &mapping->flags);
30 }
31}
32
22static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 33static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
23{ 34{
24 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 35 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 80682aaa8f18..9cdd6943e01b 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -279,6 +279,10 @@ struct parport {
279 int dma; 279 int dma;
280 int muxport; /* which muxport (if any) this is */ 280 int muxport; /* which muxport (if any) this is */
281 int portnum; /* which physical parallel port (not mux) */ 281 int portnum; /* which physical parallel port (not mux) */
282 struct device *dev; /* Physical device associated with IO/DMA.
283 * This may unfortulately be null if the
284 * port has a legacy driver.
285 */
282 286
283 struct parport *physport; 287 struct parport *physport;
284 /* If this is a non-default mux 288 /* If this is a non-default mux
@@ -289,7 +293,7 @@ struct parport {
289 following structure members are 293 following structure members are
290 meaningless: devices, cad, muxsel, 294 meaningless: devices, cad, muxsel,
291 waithead, waittail, flags, pdir, 295 waithead, waittail, flags, pdir,
292 ieee1284, *_lock. 296 dev, ieee1284, *_lock.
293 297
294 It this is a default mux parport, or 298 It this is a default mux parport, or
295 there is no mux involved, this points to 299 there is no mux involved, this points to
@@ -302,7 +306,7 @@ struct parport {
302 306
303 struct pardevice *waithead; 307 struct pardevice *waithead;
304 struct pardevice *waittail; 308 struct pardevice *waittail;
305 309
306 struct list_head list; 310 struct list_head list;
307 unsigned int flags; 311 unsigned int flags;
308 312
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index 1cc0f6b1a49a..ea8c6d84996d 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -38,7 +38,6 @@ struct parport_pc_private {
38 /* buffer suitable for DMA, if DMA enabled */ 38 /* buffer suitable for DMA, if DMA enabled */
39 char *dma_buf; 39 char *dma_buf;
40 dma_addr_t dma_handle; 40 dma_addr_t dma_handle;
41 struct pci_dev *dev;
42 struct list_head list; 41 struct list_head list;
43 struct parport *port; 42 struct parport *port;
44}; 43};
@@ -232,7 +231,7 @@ extern int parport_pc_claim_resources(struct parport *p);
232extern struct parport *parport_pc_probe_port (unsigned long base, 231extern struct parport *parport_pc_probe_port (unsigned long base,
233 unsigned long base_hi, 232 unsigned long base_hi,
234 int irq, int dma, 233 int irq, int dma,
235 struct pci_dev *dev); 234 struct device *dev);
236extern void parport_pc_unregister_port (struct parport *p); 235extern void parport_pc_unregister_port (struct parport *p);
237 236
238#endif 237#endif
diff --git a/include/linux/phantom.h b/include/linux/phantom.h
new file mode 100644
index 000000000000..d3ebbfae6903
--- /dev/null
+++ b/include/linux/phantom.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __PHANTOM_H
11#define __PHANTOM_H
12
13#include <asm/types.h>
14
15/* PHN_(G/S)ET_REG param */
16struct phm_reg {
17 __u32 reg;
18 __u32 value;
19};
20
21/* PHN_(G/S)ET_REGS param */
22struct phm_regs {
23 __u32 count;
24 __u32 mask;
25 __u32 values[8];
26};
27
28#define PH_IOC_MAGIC 'p'
29#define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *)
30#define PHN_SET_REG _IOW (PH_IOC_MAGIC, 1, struct phm_reg *)
31#define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *)
32#define PHN_SET_REGS _IOW (PH_IOC_MAGIC, 3, struct phm_regs *)
33#define PH_IOC_MAXNR 3
34
35#define PHN_CONTROL 0x6 /* control byte in iaddr space */
36#define PHN_CTL_AMP 0x1 /* switch after torques change */
37#define PHN_CTL_BUT 0x2 /* is button switched */
38#define PHN_CTL_IRQ 0x10 /* is irq enabled */
39
40#define PHN_ZERO_FORCE 2048 /* zero torque on motor */
41
42#endif
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 2833806d42c6..169c6c24209b 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -29,7 +29,7 @@ static inline void get_pid_ns(struct pid_namespace *ns)
29 kref_get(&ns->kref); 29 kref_get(&ns->kref);
30} 30}
31 31
32extern int copy_pid_ns(int flags, struct task_struct *tsk); 32extern struct pid_namespace *copy_pid_ns(int flags, struct pid_namespace *ns);
33extern void free_pid_ns(struct kref *kref); 33extern void free_pid_ns(struct kref *kref);
34 34
35static inline void put_pid_ns(struct pid_namespace *ns) 35static inline void put_pid_ns(struct pid_namespace *ns)
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index b0952e532ed5..37ca57392add 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -225,4 +225,12 @@ extern unsigned int pmu_power_flags;
225/* Backlight */ 225/* Backlight */
226extern void pmu_backlight_init(void); 226extern void pmu_backlight_init(void);
227 227
228/* some code needs to know if the PMU was suspended for hibernation */
229#ifdef CONFIG_PM
230extern int pmu_sys_suspended;
231#else
232/* if power management is not configured it can't be suspended */
233#define pmu_sys_suspended 0
234#endif
235
228#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 9a5226f0f169..2a1897e6f937 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -177,6 +177,7 @@ static inline void pnp_set_card_drvdata (struct pnp_card_link *pcard, void *data
177 177
178struct pnp_dev { 178struct pnp_dev {
179 struct device dev; /* Driver Model device interface */ 179 struct device dev; /* Driver Model device interface */
180 u64 dma_mask;
180 unsigned char number; /* used as an index, must be unique */ 181 unsigned char number; /* used as an index, must be unique */
181 int status; 182 int status;
182 183
@@ -363,6 +364,7 @@ int pnp_add_device(struct pnp_dev *dev);
363int pnp_device_attach(struct pnp_dev *pnp_dev); 364int pnp_device_attach(struct pnp_dev *pnp_dev);
364void pnp_device_detach(struct pnp_dev *pnp_dev); 365void pnp_device_detach(struct pnp_dev *pnp_dev);
365extern struct list_head pnp_global; 366extern struct list_head pnp_global;
367extern int pnp_platform_devices;
366 368
367/* multidevice card support */ 369/* multidevice card support */
368int pnp_add_card(struct pnp_card *card); 370int pnp_add_card(struct pnp_card *card);
@@ -410,6 +412,7 @@ static inline int pnp_init_device(struct pnp_dev *dev) { return -ENODEV; }
410static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; } 412static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; }
411static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; } 413static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
412static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { ; } 414static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { ; }
415#define pnp_platform_devices 0
413 416
414/* multidevice card support */ 417/* multidevice card support */
415static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; } 418static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 95f518b17684..d93c300a3449 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -15,8 +15,8 @@
15 * Magic nums for obj red zoning. 15 * Magic nums for obj red zoning.
16 * Placed in the first word before and the first word after an obj. 16 * Placed in the first word before and the first word after an obj.
17 */ 17 */
18#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ 18#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
19#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ 19#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
20 20
21#define SLUB_RED_INACTIVE 0xbb 21#define SLUB_RED_INACTIVE 0xbb
22#define SLUB_RED_ACTIVE 0xcc 22#define SLUB_RED_ACTIVE 0xcc
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index f4f7a63cae1f..3469f96bc8b2 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -106,6 +106,9 @@ int task_statm(struct mm_struct *, int *, int *, int *, int *);
106char *task_mem(struct mm_struct *, char *); 106char *task_mem(struct mm_struct *, char *);
107void clear_refs_smap(struct mm_struct *mm); 107void clear_refs_smap(struct mm_struct *mm);
108 108
109struct proc_dir_entry *de_get(struct proc_dir_entry *de);
110void de_put(struct proc_dir_entry *de);
111
109extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 112extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
110 struct proc_dir_entry *parent); 113 struct proc_dir_entry *parent);
111extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); 114extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 77db80a953d6..62439828395e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -44,8 +44,6 @@
44typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ 44typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
45typedef __u64 qsize_t; /* Type in which we store sizes */ 45typedef __u64 qsize_t; /* Type in which we store sizes */
46 46
47extern spinlock_t dq_data_lock;
48
49/* Size of blocks in which are counted size limits */ 47/* Size of blocks in which are counted size limits */
50#define QUOTABLOCK_BITS 10 48#define QUOTABLOCK_BITS 10
51#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) 49#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
@@ -139,6 +137,8 @@ struct if_dqinfo {
139#include <linux/dqblk_v1.h> 137#include <linux/dqblk_v1.h>
140#include <linux/dqblk_v2.h> 138#include <linux/dqblk_v2.h>
141 139
140extern spinlock_t dq_data_lock;
141
142/* Maximal numbers of writes for quota operation (insert/delete/update) 142/* Maximal numbers of writes for quota operation (insert/delete/update)
143 * (over VFS all formats) */ 143 * (over VFS all formats) */
144#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC) 144#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC)
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 90c23f690c0d..5110201a4159 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,9 +37,6 @@ extern int dquot_release(struct dquot *dquot);
37extern int dquot_commit_info(struct super_block *sb, int type); 37extern int dquot_commit_info(struct super_block *sb, int type);
38extern int dquot_mark_dquot_dirty(struct dquot *dquot); 38extern int dquot_mark_dquot_dirty(struct dquot *dquot);
39 39
40int remove_inode_dquot_ref(struct inode *inode, int type,
41 struct list_head *tofree_head);
42
43extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); 40extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
44extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 41extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
45 int format_id, int type); 42 int format_id, int type);
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 3a28742d86f9..1e5488ede037 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -401,9 +401,10 @@ struct reiserfs_sb_info {
401 int reserved_blocks; /* amount of blocks reserved for further allocations */ 401 int reserved_blocks; /* amount of blocks reserved for further allocations */
402 spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ 402 spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
403 struct dentry *priv_root; /* root of /.reiserfs_priv */ 403 struct dentry *priv_root; /* root of /.reiserfs_priv */
404#ifdef CONFIG_REISERFS_FS_XATTR
404 struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ 405 struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
405 struct rw_semaphore xattr_dir_sem; 406 struct rw_semaphore xattr_dir_sem;
406 407#endif
407 int j_errno; 408 int j_errno;
408#ifdef CONFIG_QUOTA 409#ifdef CONFIG_QUOTA
409 char *s_qf_names[MAXQUOTAS]; 410 char *s_qf_names[MAXQUOTAS];
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 5e22d4510d11..6d5e4a46781e 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -4,7 +4,7 @@
4 * service. It is used with both the legacy mc146818 and also EFI 4 * service. It is used with both the legacy mc146818 and also EFI
5 * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out 5 * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out
6 * from <linux/mc146818rtc.h> to this file for 2.4 kernels. 6 * from <linux/mc146818rtc.h> to this file for 2.4 kernels.
7 * 7 *
8 * Copyright (C) 1999 Hewlett-Packard Co. 8 * Copyright (C) 1999 Hewlett-Packard Co.
9 * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> 9 * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
10 */ 10 */
@@ -13,7 +13,7 @@
13 13
14/* 14/*
15 * The struct used to pass data via the following ioctl. Similar to the 15 * The struct used to pass data via the following ioctl. Similar to the
16 * struct tm in <time.h>, but it needs to be here so that the kernel 16 * struct tm in <time.h>, but it needs to be here so that the kernel
17 * source is self contained, allowing cross-compiles, etc. etc. 17 * source is self contained, allowing cross-compiles, etc. etc.
18 */ 18 */
19 19
@@ -50,7 +50,7 @@ struct rtc_wkalrm {
50 * pll_value*pll_posmult/pll_clock 50 * pll_value*pll_posmult/pll_clock
51 * -ve pll_value means clock will run slower by 51 * -ve pll_value means clock will run slower by
52 * pll_value*pll_negmult/pll_clock 52 * pll_value*pll_negmult/pll_clock
53 */ 53 */
54 54
55struct rtc_pll_info { 55struct rtc_pll_info {
56 int pll_ctrl; /* placeholder for fancier control */ 56 int pll_ctrl; /* placeholder for fancier control */
@@ -106,7 +106,6 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year
106extern int rtc_valid_tm(struct rtc_time *tm); 106extern int rtc_valid_tm(struct rtc_time *tm);
107extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 107extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
108extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 108extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
109extern void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm);
110 109
111#include <linux/device.h> 110#include <linux/device.h>
112#include <linux/seq_file.h> 111#include <linux/seq_file.h>
@@ -136,7 +135,7 @@ struct rtc_task;
136 135
137struct rtc_device 136struct rtc_device
138{ 137{
139 struct class_device class_dev; 138 struct device dev;
140 struct module *owner; 139 struct module *owner;
141 140
142 int id; 141 int id;
@@ -145,7 +144,6 @@ struct rtc_device
145 const struct rtc_class_ops *ops; 144 const struct rtc_class_ops *ops;
146 struct mutex ops_lock; 145 struct mutex ops_lock;
147 146
148 struct class_device *rtc_dev;
149 struct cdev char_dev; 147 struct cdev char_dev;
150 struct mutex char_lock; 148 struct mutex char_lock;
151 149
@@ -169,35 +167,34 @@ struct rtc_device
169 unsigned int uie_timer_active:1; 167 unsigned int uie_timer_active:1;
170#endif 168#endif
171}; 169};
172#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev) 170#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
173 171
174extern struct rtc_device *rtc_device_register(const char *name, 172extern struct rtc_device *rtc_device_register(const char *name,
175 struct device *dev, 173 struct device *dev,
176 const struct rtc_class_ops *ops, 174 const struct rtc_class_ops *ops,
177 struct module *owner); 175 struct module *owner);
178extern void rtc_device_unregister(struct rtc_device *rdev); 176extern void rtc_device_unregister(struct rtc_device *rtc);
179extern int rtc_interface_register(struct class_interface *intf);
180 177
181extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm); 178extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
182extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm); 179extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
183extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs); 180extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
184extern int rtc_read_alarm(struct class_device *class_dev, 181extern int rtc_read_alarm(struct rtc_device *rtc,
185 struct rtc_wkalrm *alrm); 182 struct rtc_wkalrm *alrm);
186extern int rtc_set_alarm(struct class_device *class_dev, 183extern int rtc_set_alarm(struct rtc_device *rtc,
187 struct rtc_wkalrm *alrm); 184 struct rtc_wkalrm *alrm);
188extern void rtc_update_irq(struct class_device *class_dev, 185extern void rtc_update_irq(struct rtc_device *rtc,
189 unsigned long num, unsigned long events); 186 unsigned long num, unsigned long events);
190 187
191extern struct class_device *rtc_class_open(char *name); 188extern struct rtc_device *rtc_class_open(char *name);
192extern void rtc_class_close(struct class_device *class_dev); 189extern void rtc_class_close(struct rtc_device *rtc);
193 190
194extern int rtc_irq_register(struct class_device *class_dev, 191extern int rtc_irq_register(struct rtc_device *rtc,
195 struct rtc_task *task); 192 struct rtc_task *task);
196extern void rtc_irq_unregister(struct class_device *class_dev, 193extern void rtc_irq_unregister(struct rtc_device *rtc,
197 struct rtc_task *task); 194 struct rtc_task *task);
198extern int rtc_irq_set_state(struct class_device *class_dev, 195extern int rtc_irq_set_state(struct rtc_device *rtc,
199 struct rtc_task *task, int enabled); 196 struct rtc_task *task, int enabled);
200extern int rtc_irq_set_freq(struct class_device *class_dev, 197extern int rtc_irq_set_freq(struct rtc_device *rtc,
201 struct rtc_task *task, int freq); 198 struct rtc_task *task, int freq);
202 199
203typedef struct rtc_task { 200typedef struct rtc_task {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1707583de49..3d95c480f58d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,6 +194,14 @@ extern void sched_init_smp(void);
194extern void init_idle(struct task_struct *idle, int cpu); 194extern void init_idle(struct task_struct *idle, int cpu);
195 195
196extern cpumask_t nohz_cpu_mask; 196extern cpumask_t nohz_cpu_mask;
197#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
198extern int select_nohz_load_balancer(int cpu);
199#else
200static inline int select_nohz_load_balancer(int cpu)
201{
202 return 0;
203}
204#endif
197 205
198/* 206/*
199 * Only dump TASK_* tasks. (0 for all tasks) 207 * Only dump TASK_* tasks. (0 for all tasks)
@@ -226,6 +234,7 @@ extern void scheduler_tick(void);
226extern void softlockup_tick(void); 234extern void softlockup_tick(void);
227extern void spawn_softlockup_task(void); 235extern void spawn_softlockup_task(void);
228extern void touch_softlockup_watchdog(void); 236extern void touch_softlockup_watchdog(void);
237extern void touch_all_softlockup_watchdogs(void);
229#else 238#else
230static inline void softlockup_tick(void) 239static inline void softlockup_tick(void)
231{ 240{
@@ -236,6 +245,9 @@ static inline void spawn_softlockup_task(void)
236static inline void touch_softlockup_watchdog(void) 245static inline void touch_softlockup_watchdog(void)
237{ 246{
238} 247}
248static inline void touch_all_softlockup_watchdogs(void)
249{
250}
239#endif 251#endif
240 252
241 253
@@ -668,8 +680,14 @@ struct sched_group {
668 /* 680 /*
669 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 681 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
670 * single CPU. This is read only (except for setup, hotplug CPU). 682 * single CPU. This is read only (except for setup, hotplug CPU).
683 * Note : Never change cpu_power without recompute its reciprocal
684 */
685 unsigned int __cpu_power;
686 /*
687 * reciprocal value of cpu_power to avoid expensive divides
688 * (see include/linux/reciprocal_div.h)
671 */ 689 */
672 unsigned long cpu_power; 690 u32 reciprocal_cpu_power;
673}; 691};
674 692
675struct sched_domain { 693struct sched_domain {
@@ -801,8 +819,8 @@ struct task_struct {
801 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 819 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
802 struct thread_info *thread_info; 820 struct thread_info *thread_info;
803 atomic_t usage; 821 atomic_t usage;
804 unsigned long flags; /* per process flags, defined below */ 822 unsigned int flags; /* per process flags, defined below */
805 unsigned long ptrace; 823 unsigned int ptrace;
806 824
807 int lock_depth; /* BKL lock depth */ 825 int lock_depth; /* BKL lock depth */
808 826
@@ -825,7 +843,7 @@ struct task_struct {
825 unsigned long long sched_time; /* sched_clock time spent running */ 843 unsigned long long sched_time; /* sched_clock time spent running */
826 enum sleep_type sleep_type; 844 enum sleep_type sleep_type;
827 845
828 unsigned long policy; 846 unsigned int policy;
829 cpumask_t cpus_allowed; 847 cpumask_t cpus_allowed;
830 unsigned int time_slice, first_time_slice; 848 unsigned int time_slice, first_time_slice;
831 849
@@ -845,11 +863,11 @@ struct task_struct {
845 863
846/* task state */ 864/* task state */
847 struct linux_binfmt *binfmt; 865 struct linux_binfmt *binfmt;
848 long exit_state; 866 int exit_state;
849 int exit_code, exit_signal; 867 int exit_code, exit_signal;
850 int pdeath_signal; /* The signal sent when the parent dies */ 868 int pdeath_signal; /* The signal sent when the parent dies */
851 /* ??? */ 869 /* ??? */
852 unsigned long personality; 870 unsigned int personality;
853 unsigned did_exec:1; 871 unsigned did_exec:1;
854 pid_t pid; 872 pid_t pid;
855 pid_t tgid; 873 pid_t tgid;
@@ -881,7 +899,7 @@ struct task_struct {
881 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 899 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
882 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 900 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
883 901
884 unsigned long rt_priority; 902 unsigned int rt_priority;
885 cputime_t utime, stime; 903 cputime_t utime, stime;
886 unsigned long nvcsw, nivcsw; /* context switch counts */ 904 unsigned long nvcsw, nivcsw; /* context switch counts */
887 struct timespec start_time; 905 struct timespec start_time;
@@ -1641,10 +1659,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
1641extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); 1659extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
1642extern long sched_getaffinity(pid_t pid, cpumask_t *mask); 1660extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
1643 1661
1644#include <linux/sysdev.h>
1645extern int sched_mc_power_savings, sched_smt_power_savings; 1662extern int sched_mc_power_savings, sched_smt_power_savings;
1646extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
1647extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
1648 1663
1649extern void normalize_rt_tasks(void); 1664extern void normalize_rt_tasks(void);
1650 1665
diff --git a/include/linux/spi/Kbuild b/include/linux/spi/Kbuild
new file mode 100644
index 000000000000..d375a082986e
--- /dev/null
+++ b/include/linux/spi/Kbuild
@@ -0,0 +1 @@
header-y += spidev.h
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4f0f8c2e58a5..b6bedc3ee95c 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -32,11 +32,12 @@ extern struct bus_type spi_bus_type;
32 * @max_speed_hz: Maximum clock rate to be used with this chip 32 * @max_speed_hz: Maximum clock rate to be used with this chip
33 * (on this board); may be changed by the device's driver. 33 * (on this board); may be changed by the device's driver.
34 * The spi_transfer.speed_hz can override this for each transfer. 34 * The spi_transfer.speed_hz can override this for each transfer.
35 * @chip-select: Chipselect, distinguishing chips handled by "master". 35 * @chip_select: Chipselect, distinguishing chips handled by @master.
36 * @mode: The spi mode defines how data is clocked out and in. 36 * @mode: The spi mode defines how data is clocked out and in.
37 * This may be changed by the device's driver. 37 * This may be changed by the device's driver.
38 * The "active low" default for chipselect mode can be overridden, 38 * The "active low" default for chipselect mode can be overridden
39 * as can the "MSB first" default for each word in a transfer. 39 * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
40 * each word in a transfer (by specifying SPI_LSB_FIRST).
40 * @bits_per_word: Data transfers involve one or more words; word sizes 41 * @bits_per_word: Data transfers involve one or more words; word sizes
41 * like eight or 12 bits are common. In-memory wordsizes are 42 * like eight or 12 bits are common. In-memory wordsizes are
42 * powers of two bytes (e.g. 20 bit samples use 32 bits). 43 * powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -48,14 +49,18 @@ extern struct bus_type spi_bus_type;
48 * @controller_state: Controller's runtime state 49 * @controller_state: Controller's runtime state
49 * @controller_data: Board-specific definitions for controller, such as 50 * @controller_data: Board-specific definitions for controller, such as
50 * FIFO initialization parameters; from board_info.controller_data 51 * FIFO initialization parameters; from board_info.controller_data
52 * @modalias: Name of the driver to use with this device, or an alias
53 * for that name. This appears in the sysfs "modalias" attribute
54 * for driver coldplugging, and in uevents used for hotplugging
51 * 55 *
52 * An spi_device is used to interchange data between an SPI slave 56 * A @spi_device is used to interchange data between an SPI slave
53 * (usually a discrete chip) and CPU memory. 57 * (usually a discrete chip) and CPU memory.
54 * 58 *
55 * In "dev", the platform_data is used to hold information about this 59 * In @dev, the platform_data is used to hold information about this
56 * device that's meaningful to the device's protocol driver, but not 60 * device that's meaningful to the device's protocol driver, but not
57 * to its controller. One example might be an identifier for a chip 61 * to its controller. One example might be an identifier for a chip
58 * variant with slightly different functionality. 62 * variant with slightly different functionality; another might be
63 * information about how this particular board wires the chip's pins.
59 */ 64 */
60struct spi_device { 65struct spi_device {
61 struct device dev; 66 struct device dev;
@@ -77,13 +82,15 @@ struct spi_device {
77 void *controller_data; 82 void *controller_data;
78 const char *modalias; 83 const char *modalias;
79 84
80 // likely need more hooks for more protocol options affecting how 85 /*
81 // the controller talks to each chip, like: 86 * likely need more hooks for more protocol options affecting how
82 // - memory packing (12 bit samples into low bits, others zeroed) 87 * the controller talks to each chip, like:
83 // - priority 88 * - memory packing (12 bit samples into low bits, others zeroed)
84 // - drop chipselect after each word 89 * - priority
85 // - chipselect delays 90 * - drop chipselect after each word
86 // - ... 91 * - chipselect delays
92 * - ...
93 */
87}; 94};
88 95
89static inline struct spi_device *to_spi_device(struct device *dev) 96static inline struct spi_device *to_spi_device(struct device *dev)
@@ -146,6 +153,11 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
146 153
147extern int spi_register_driver(struct spi_driver *sdrv); 154extern int spi_register_driver(struct spi_driver *sdrv);
148 155
156/**
157 * spi_unregister_driver - reverse effect of spi_register_driver
158 * @sdrv: the driver to unregister
159 * Context: can sleep
160 */
149static inline void spi_unregister_driver(struct spi_driver *sdrv) 161static inline void spi_unregister_driver(struct spi_driver *sdrv)
150{ 162{
151 if (sdrv) 163 if (sdrv)
@@ -165,18 +177,20 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
165 * @setup: updates the device mode and clocking records used by a 177 * @setup: updates the device mode and clocking records used by a
166 * device's SPI controller; protocol code may call this. This 178 * device's SPI controller; protocol code may call this. This
167 * must fail if an unrecognized or unsupported mode is requested. 179 * must fail if an unrecognized or unsupported mode is requested.
180 * It's always safe to call this unless transfers are pending on
181 * the device whose settings are being modified.
168 * @transfer: adds a message to the controller's transfer queue. 182 * @transfer: adds a message to the controller's transfer queue.
169 * @cleanup: frees controller-specific state 183 * @cleanup: frees controller-specific state
170 * 184 *
171 * Each SPI master controller can communicate with one or more spi_device 185 * Each SPI master controller can communicate with one or more @spi_device
172 * children. These make a small bus, sharing MOSI, MISO and SCK signals 186 * children. These make a small bus, sharing MOSI, MISO and SCK signals
173 * but not chip select signals. Each device may be configured to use a 187 * but not chip select signals. Each device may be configured to use a
174 * different clock rate, since those shared signals are ignored unless 188 * different clock rate, since those shared signals are ignored unless
175 * the chip is selected. 189 * the chip is selected.
176 * 190 *
177 * The driver for an SPI controller manages access to those devices through 191 * The driver for an SPI controller manages access to those devices through
178 * a queue of spi_message transactions, copyin data between CPU memory and 192 * a queue of spi_message transactions, copying data between CPU memory and
179 * an SPI slave device). For each such message it queues, it calls the 193 * an SPI slave device. For each such message it queues, it calls the
180 * message's completion function when the transaction completes. 194 * message's completion function when the transaction completes.
181 */ 195 */
182struct spi_master { 196struct spi_master {
@@ -280,27 +294,27 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
280 * struct spi_transfer - a read/write buffer pair 294 * struct spi_transfer - a read/write buffer pair
281 * @tx_buf: data to be written (dma-safe memory), or NULL 295 * @tx_buf: data to be written (dma-safe memory), or NULL
282 * @rx_buf: data to be read (dma-safe memory), or NULL 296 * @rx_buf: data to be read (dma-safe memory), or NULL
283 * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped 297 * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
284 * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped 298 * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
285 * @len: size of rx and tx buffers (in bytes) 299 * @len: size of rx and tx buffers (in bytes)
286 * @speed_hz: Select a speed other then the device default for this 300 * @speed_hz: Select a speed other then the device default for this
287 * transfer. If 0 the default (from spi_device) is used. 301 * transfer. If 0 the default (from @spi_device) is used.
288 * @bits_per_word: select a bits_per_word other then the device default 302 * @bits_per_word: select a bits_per_word other then the device default
289 * for this transfer. If 0 the default (from spi_device) is used. 303 * for this transfer. If 0 the default (from @spi_device) is used.
290 * @cs_change: affects chipselect after this transfer completes 304 * @cs_change: affects chipselect after this transfer completes
291 * @delay_usecs: microseconds to delay after this transfer before 305 * @delay_usecs: microseconds to delay after this transfer before
292 * (optionally) changing the chipselect status, then starting 306 * (optionally) changing the chipselect status, then starting
293 * the next transfer or completing this spi_message. 307 * the next transfer or completing this @spi_message.
294 * @transfer_list: transfers are sequenced through spi_message.transfers 308 * @transfer_list: transfers are sequenced through @spi_message.transfers
295 * 309 *
296 * SPI transfers always write the same number of bytes as they read. 310 * SPI transfers always write the same number of bytes as they read.
297 * Protocol drivers should always provide rx_buf and/or tx_buf. 311 * Protocol drivers should always provide @rx_buf and/or @tx_buf.
298 * In some cases, they may also want to provide DMA addresses for 312 * In some cases, they may also want to provide DMA addresses for
299 * the data being transferred; that may reduce overhead, when the 313 * the data being transferred; that may reduce overhead, when the
300 * underlying driver uses dma. 314 * underlying driver uses dma.
301 * 315 *
302 * If the transmit buffer is null, zeroes will be shifted out 316 * If the transmit buffer is null, zeroes will be shifted out
303 * while filling rx_buf. If the receive buffer is null, the data 317 * while filling @rx_buf. If the receive buffer is null, the data
304 * shifted in will be discarded. Only "len" bytes shift out (or in). 318 * shifted in will be discarded. Only "len" bytes shift out (or in).
305 * It's an error to try to shift out a partial word. (For example, by 319 * It's an error to try to shift out a partial word. (For example, by
306 * shifting out three bytes with word size of sixteen or twenty bits; 320 * shifting out three bytes with word size of sixteen or twenty bits;
@@ -309,7 +323,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
309 * In-memory data values are always in native CPU byte order, translated 323 * In-memory data values are always in native CPU byte order, translated
310 * from the wire byte order (big-endian except with SPI_LSB_FIRST). So 324 * from the wire byte order (big-endian except with SPI_LSB_FIRST). So
311 * for example when bits_per_word is sixteen, buffers are 2N bytes long 325 * for example when bits_per_word is sixteen, buffers are 2N bytes long
312 * and hold N sixteen bit words in CPU byte order. 326 * (@len = 2N) and hold N sixteen bit words in CPU byte order.
313 * 327 *
314 * When the word size of the SPI transfer is not a power-of-two multiple 328 * When the word size of the SPI transfer is not a power-of-two multiple
315 * of eight bits, those in-memory words include extra bits. In-memory 329 * of eight bits, those in-memory words include extra bits. In-memory
@@ -318,7 +332,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
318 * 332 *
319 * All SPI transfers start with the relevant chipselect active. Normally 333 * All SPI transfers start with the relevant chipselect active. Normally
320 * it stays selected until after the last transfer in a message. Drivers 334 * it stays selected until after the last transfer in a message. Drivers
321 * can affect the chipselect signal using cs_change: 335 * can affect the chipselect signal using cs_change.
322 * 336 *
323 * (i) If the transfer isn't the last one in the message, this flag is 337 * (i) If the transfer isn't the last one in the message, this flag is
324 * used to make the chipselect briefly go inactive in the middle of the 338 * used to make the chipselect briefly go inactive in the middle of the
@@ -372,7 +386,7 @@ struct spi_transfer {
372 * @queue: for use by whichever driver currently owns the message 386 * @queue: for use by whichever driver currently owns the message
373 * @state: for use by whichever driver currently owns the message 387 * @state: for use by whichever driver currently owns the message
374 * 388 *
375 * An spi_message is used to execute an atomic sequence of data transfers, 389 * A @spi_message is used to execute an atomic sequence of data transfers,
376 * each represented by a struct spi_transfer. The sequence is "atomic" 390 * each represented by a struct spi_transfer. The sequence is "atomic"
377 * in the sense that no other spi_message may use that SPI bus until that 391 * in the sense that no other spi_message may use that SPI bus until that
378 * sequence completes. On some systems, many such sequences can execute as 392 * sequence completes. On some systems, many such sequences can execute as
@@ -464,8 +478,9 @@ static inline void spi_message_free(struct spi_message *m)
464} 478}
465 479
466/** 480/**
467 * spi_setup -- setup SPI mode and clock rate 481 * spi_setup - setup SPI mode and clock rate
468 * @spi: the device whose settings are being modified 482 * @spi: the device whose settings are being modified
483 * Context: can sleep
469 * 484 *
470 * SPI protocol drivers may need to update the transfer mode if the 485 * SPI protocol drivers may need to update the transfer mode if the
471 * device doesn't work with the mode 0 default. They may likewise need 486 * device doesn't work with the mode 0 default. They may likewise need
@@ -474,7 +489,7 @@ static inline void spi_message_free(struct spi_message *m)
474 * The changes take effect the next time the device is selected and data 489 * The changes take effect the next time the device is selected and data
475 * is transferred to or from it. 490 * is transferred to or from it.
476 * 491 *
477 * Note that this call wil fail if the protocol driver specifies an option 492 * Note that this call will fail if the protocol driver specifies an option
478 * that the underlying controller or its driver does not support. For 493 * that the underlying controller or its driver does not support. For
479 * example, not all hardware supports wire transfers using nine bit words, 494 * example, not all hardware supports wire transfers using nine bit words,
480 * LSB-first wire encoding, or active-high chipselects. 495 * LSB-first wire encoding, or active-high chipselects.
@@ -487,9 +502,10 @@ spi_setup(struct spi_device *spi)
487 502
488 503
489/** 504/**
490 * spi_async -- asynchronous SPI transfer 505 * spi_async - asynchronous SPI transfer
491 * @spi: device with which data will be exchanged 506 * @spi: device with which data will be exchanged
492 * @message: describes the data transfers, including completion callback 507 * @message: describes the data transfers, including completion callback
508 * Context: any (irqs may be blocked, etc)
493 * 509 *
494 * This call may be used in_irq and other contexts which can't sleep, 510 * This call may be used in_irq and other contexts which can't sleep,
495 * as well as from task contexts which can sleep. 511 * as well as from task contexts which can sleep.
@@ -535,6 +551,7 @@ extern int spi_sync(struct spi_device *spi, struct spi_message *message);
535 * @spi: device to which data will be written 551 * @spi: device to which data will be written
536 * @buf: data buffer 552 * @buf: data buffer
537 * @len: data buffer size 553 * @len: data buffer size
554 * Context: can sleep
538 * 555 *
539 * This writes the buffer and returns zero or a negative error code. 556 * This writes the buffer and returns zero or a negative error code.
540 * Callable only from contexts that can sleep. 557 * Callable only from contexts that can sleep.
@@ -558,8 +575,9 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
558 * @spi: device from which data will be read 575 * @spi: device from which data will be read
559 * @buf: data buffer 576 * @buf: data buffer
560 * @len: data buffer size 577 * @len: data buffer size
578 * Context: can sleep
561 * 579 *
562 * This writes the buffer and returns zero or a negative error code. 580 * This reads the buffer and returns zero or a negative error code.
563 * Callable only from contexts that can sleep. 581 * Callable only from contexts that can sleep.
564 */ 582 */
565static inline int 583static inline int
@@ -585,6 +603,7 @@ extern int spi_write_then_read(struct spi_device *spi,
585 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read 603 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
586 * @spi: device with which data will be exchanged 604 * @spi: device with which data will be exchanged
587 * @cmd: command to be written before data is read back 605 * @cmd: command to be written before data is read back
606 * Context: can sleep
588 * 607 *
589 * This returns the (unsigned) eight bit number returned by the 608 * This returns the (unsigned) eight bit number returned by the
590 * device, or else a negative error code. Callable only from 609 * device, or else a negative error code. Callable only from
@@ -605,6 +624,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
605 * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read 624 * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
606 * @spi: device with which data will be exchanged 625 * @spi: device with which data will be exchanged
607 * @cmd: command to be written before data is read back 626 * @cmd: command to be written before data is read back
627 * Context: can sleep
608 * 628 *
609 * This returns the (unsigned) sixteen bit number returned by the 629 * This returns the (unsigned) sixteen bit number returned by the
610 * device, or else a negative error code. Callable only from 630 * device, or else a negative error code. Callable only from
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h
new file mode 100644
index 000000000000..7d700be57490
--- /dev/null
+++ b/include/linux/spi/spidev.h
@@ -0,0 +1,124 @@
1/*
2 * include/linux/spi/spidev.h
3 *
4 * Copyright (C) 2006 SWAPP
5 * Andrea Paterniani <a.paterniani@swapp-eng.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef SPIDEV_H
23#define SPIDEV_H
24
25
26/* User space versions of kernel symbols for SPI clocking modes,
27 * matching <linux/spi/spi.h>
28 */
29
30#define SPI_CPHA 0x01
31#define SPI_CPOL 0x02
32
33#define SPI_MODE_0 (0|0)
34#define SPI_MODE_1 (0|SPI_CPHA)
35#define SPI_MODE_2 (SPI_CPOL|0)
36#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
37
38
39/*---------------------------------------------------------------------------*/
40
41/* IOCTL commands */
42
43#define SPI_IOC_MAGIC 'k'
44
45/**
46 * struct spi_ioc_transfer - describes a single SPI transfer
47 * @tx_buf: Holds pointer to userspace buffer with transmit data, or null.
48 * If no data is provided, zeroes are shifted out.
49 * @rx_buf: Holds pointer to userspace buffer for receive data, or null.
50 * @len: Length of tx and rx buffers, in bytes.
51 * @speed_hz: Temporary override of the device's bitrate.
52 * @bits_per_word: Temporary override of the device's wordsize.
53 * @delay_usecs: If nonzero, how long to delay after the last bit transfer
54 * before optionally deselecting the device before the next transfer.
55 * @cs_change: True to deselect device before starting the next transfer.
56 *
57 * This structure is mapped directly to the kernel spi_transfer structure;
58 * the fields have the same meanings, except of course that the pointers
59 * are in a different address space (and may be of different sizes in some
60 * cases, such as 32-bit i386 userspace over a 64-bit x86_64 kernel).
61 * Zero-initialize the structure, including currently unused fields, to
62 * accomodate potential future updates.
63 *
64 * SPI_IOC_MESSAGE gives userspace the equivalent of kernel spi_sync().
65 * Pass it an array of related transfers, they'll execute together.
66 * Each transfer may be half duplex (either direction) or full duplex.
67 *
68 * struct spi_ioc_transfer mesg[4];
69 * ...
70 * status = ioctl(fd, SPI_IOC_MESSAGE(4), mesg);
71 *
72 * So for example one transfer might send a nine bit command (right aligned
73 * in a 16-bit word), the next could read a block of 8-bit data before
74 * terminating that command by temporarily deselecting the chip; the next
75 * could send a different nine bit command (re-selecting the chip), and the
76 * last transfer might write some register values.
77 */
78struct spi_ioc_transfer {
79 __u64 tx_buf;
80 __u64 rx_buf;
81
82 __u32 len;
83 __u32 speed_hz;
84
85 __u16 delay_usecs;
86 __u8 bits_per_word;
87 __u8 cs_change;
88 __u32 pad;
89
90 /* If the contents of 'struct spi_ioc_transfer' ever change
91 * incompatibly, then the ioctl number (currently 0) must change;
92 * ioctls with constant size fields get a bit more in the way of
93 * error checking than ones (like this) where that field varies.
94 *
95 * NOTE: struct layout is the same in 64bit and 32bit userspace.
96 */
97};
98
99/* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */
100#define SPI_MSGSIZE(N) \
101 ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \
102 ? ((N)*(sizeof (struct spi_ioc_transfer))) : 0)
103#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])
104
105
106/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */
107#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8)
108#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8)
109
110/* Read / Write SPI bit justification */
111#define SPI_IOC_RD_LSB_FIRST _IOR(SPI_IOC_MAGIC, 2, __u8)
112#define SPI_IOC_WR_LSB_FIRST _IOW(SPI_IOC_MAGIC, 2, __u8)
113
114/* Read / Write SPI device word length (1..N) */
115#define SPI_IOC_RD_BITS_PER_WORD _IOR(SPI_IOC_MAGIC, 3, __u8)
116#define SPI_IOC_WR_BITS_PER_WORD _IOW(SPI_IOC_MAGIC, 3, __u8)
117
118/* Read / Write SPI device default max speed hz */
119#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32)
120#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32)
121
122
123
124#endif /* SPIDEV_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index dc5fb69e4de9..210549ba4ef4 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -85,6 +85,12 @@ typedef struct {
85 RW_DEP_MAP_INIT(lockname) } 85 RW_DEP_MAP_INIT(lockname) }
86#endif 86#endif
87 87
88/*
89 * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
90 * are hence deprecated.
91 * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
92 * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
93 */
88#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 94#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
89#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) 95#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
90 96
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 50e2b01e517c..1d2b084c0185 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -6,15 +6,13 @@ struct stack_trace {
6 unsigned int nr_entries, max_entries; 6 unsigned int nr_entries, max_entries;
7 unsigned long *entries; 7 unsigned long *entries;
8 int skip; /* input argument: How many entries to skip */ 8 int skip; /* input argument: How many entries to skip */
9 int all_contexts; /* input argument: if true do than one stack */
10}; 9};
11 10
12extern void save_stack_trace(struct stack_trace *trace, 11extern void save_stack_trace(struct stack_trace *trace);
13 struct task_struct *task);
14 12
15extern void print_stack_trace(struct stack_trace *trace, int spaces); 13extern void print_stack_trace(struct stack_trace *trace, int spaces);
16#else 14#else
17# define save_stack_trace(trace, task) do { } while (0) 15# define save_stack_trace(trace) do { } while (0)
18# define print_stack_trace(trace) do { } while (0) 16# define print_stack_trace(trace) do { } while (0)
19#endif 17#endif
20 18
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 679ef0d70b6b..611c398dab72 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -53,6 +53,9 @@
53#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH) 53#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
54#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) 54#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
55 55
56#define UTIME_NOW ((1l << 30) - 1l)
57#define UTIME_OMIT ((1l << 30) - 2l)
58
56#include <linux/types.h> 59#include <linux/types.h>
57#include <linux/time.h> 60#include <linux/time.h>
58 61
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 96868be9c211..9d2aa1a12aa0 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_SWSUSP_H 1#ifndef _LINUX_SWSUSP_H
2#define _LINUX_SWSUSP_H 2#define _LINUX_SWSUSP_H
3 3
4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) 4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5#include <asm/suspend.h> 5#include <asm/suspend.h>
6#endif 6#endif
7#include <linux/swap.h> 7#include <linux/swap.h>
diff --git a/include/linux/svga.h b/include/linux/svga.h
index eadb981bb37c..e1cc552e04fe 100644
--- a/include/linux/svga.h
+++ b/include/linux/svga.h
@@ -112,6 +112,7 @@ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area);
112void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect); 112void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit); 113void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor); 114void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor);
115int svga_get_tilemax(struct fb_info *info);
115 116
116int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); 117int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
117int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node); 118int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
index 389ccf858d37..e699ab279c2c 100644
--- a/include/linux/sysdev.h
+++ b/include/linux/sysdev.h
@@ -22,6 +22,7 @@
22#define _SYSDEV_H_ 22#define _SYSDEV_H_
23 23
24#include <linux/kobject.h> 24#include <linux/kobject.h>
25#include <linux/module.h>
25#include <linux/pm.h> 26#include <linux/pm.h>
26 27
27 28
diff --git a/include/linux/time.h b/include/linux/time.h
index 8ea8dea713c7..dda9be685ab6 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -109,7 +109,7 @@ extern void do_gettimeofday(struct timeval *tv);
109extern int do_settimeofday(struct timespec *tv); 109extern int do_settimeofday(struct timespec *tv);
110extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); 110extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
111#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 111#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
112extern long do_utimes(int dfd, char __user *filename, struct timeval *times); 112extern long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags);
113struct itimerval; 113struct itimerval;
114extern int do_setitimer(int which, struct itimerval *value, 114extern int do_setitimer(int which, struct itimerval *value,
115 struct itimerval *ovalue); 115 struct itimerval *ovalue);
@@ -119,6 +119,7 @@ extern void getnstimeofday(struct timespec *tv);
119 119
120extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 120extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
121extern int timekeeping_is_continuous(void); 121extern int timekeeping_is_continuous(void);
122extern void update_wall_time(void);
122 123
123/** 124/**
124 * timespec_to_ns - Convert timespec to nanoseconds 125 * timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 719113b652dd..e0c5c16c992f 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -37,6 +37,7 @@ extern struct tvec_t_base_s boot_tvec_bases;
37 TIMER_INITIALIZER(_function, _expires, _data) 37 TIMER_INITIALIZER(_function, _expires, _data)
38 38
39void fastcall init_timer(struct timer_list * timer); 39void fastcall init_timer(struct timer_list * timer);
40void fastcall init_timer_deferrable(struct timer_list *timer);
40 41
41static inline void setup_timer(struct timer_list * timer, 42static inline void setup_timer(struct timer_list * timer,
42 void (*function)(unsigned long), 43 void (*function)(unsigned long),
diff --git a/include/linux/tty.h b/include/linux/tty.h
index dee72b9a20fb..bb4576085203 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,7 @@ extern int tty_hung_up_p(struct file * filp);
313extern void do_SAK(struct tty_struct *tty); 313extern void do_SAK(struct tty_struct *tty);
314extern void __do_SAK(struct tty_struct *tty); 314extern void __do_SAK(struct tty_struct *tty);
315extern void disassociate_ctty(int priv); 315extern void disassociate_ctty(int priv);
316extern void no_tty(void);
316extern void tty_flip_buffer_push(struct tty_struct *tty); 317extern void tty_flip_buffer_push(struct tty_struct *tty);
317extern speed_t tty_get_baud_rate(struct tty_struct *tty); 318extern speed_t tty_get_baud_rate(struct tty_struct *tty);
318extern speed_t tty_termios_baud_rate(struct ktermios *termios); 319extern speed_t tty_termios_baud_rate(struct ktermios *termios);
@@ -333,7 +334,6 @@ extern int tty_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
333 334
334extern dev_t tty_devnum(struct tty_struct *tty); 335extern dev_t tty_devnum(struct tty_struct *tty);
335extern void proc_clear_tty(struct task_struct *p); 336extern void proc_clear_tty(struct task_struct *p);
336extern void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
337extern struct tty_struct *get_current_tty(void); 337extern struct tty_struct *get_current_tty(void);
338 338
339extern struct mutex tty_mutex; 339extern struct mutex tty_mutex;
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 1fd61eeed664..a6c1e8eed226 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -32,6 +32,8 @@
32 * - first public version 32 * - first public version
33 */ 33 */
34 34
35#include <linux/input.h>
36
35#define UINPUT_VERSION 3 37#define UINPUT_VERSION 3
36 38
37#ifdef __KERNEL__ 39#ifdef __KERNEL__
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index e10267d402c5..f8d3b326e93a 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -49,9 +49,7 @@ static inline void get_uts_ns(struct uts_namespace *ns)
49} 49}
50 50
51#ifdef CONFIG_UTS_NS 51#ifdef CONFIG_UTS_NS
52extern int unshare_utsname(unsigned long unshare_flags, 52extern struct uts_namespace *copy_utsname(int flags, struct uts_namespace *ns);
53 struct uts_namespace **new_uts);
54extern int copy_utsname(int flags, struct task_struct *tsk);
55extern void free_uts_ns(struct kref *kref); 53extern void free_uts_ns(struct kref *kref);
56 54
57static inline void put_uts_ns(struct uts_namespace *ns) 55static inline void put_uts_ns(struct uts_namespace *ns)
@@ -59,21 +57,12 @@ static inline void put_uts_ns(struct uts_namespace *ns)
59 kref_put(&ns->kref, free_uts_ns); 57 kref_put(&ns->kref, free_uts_ns);
60} 58}
61#else 59#else
62static inline int unshare_utsname(unsigned long unshare_flags, 60static inline struct uts_namespace *copy_utsname(int flags,
63 struct uts_namespace **new_uts) 61 struct uts_namespace *ns)
64{ 62{
65 if (unshare_flags & CLONE_NEWUTS) 63 return ns;
66 return -EINVAL;
67
68 return 0;
69} 64}
70 65
71static inline int copy_utsname(int flags, struct task_struct *tsk)
72{
73 if (flags & CLONE_NEWUTS)
74 return -EINVAL;
75 return 0;
76}
77static inline void put_uts_ns(struct uts_namespace *ns) 66static inline void put_uts_ns(struct uts_namespace *ns)
78{ 67{
79} 68}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 924e502905d4..4b7ee83787c1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -53,6 +53,7 @@ extern void vunmap(void *addr);
53 53
54extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 54extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
55 unsigned long pgoff); 55 unsigned long pgoff);
56void vmalloc_sync_all(void);
56 57
57/* 58/*
58 * Lowlevel-APIs (not for driver use!) 59 * Lowlevel-APIs (not for driver use!)
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index e0db669998f3..d961635d0e61 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -9,6 +9,7 @@
9#include <linux/vt.h> 9#include <linux/vt.h>
10#include <linux/kd.h> 10#include <linux/kd.h>
11#include <linux/tty.h> 11#include <linux/tty.h>
12#include <linux/mutex.h>
12#include <linux/console_struct.h> 13#include <linux/console_struct.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
14 15
@@ -82,7 +83,7 @@ void reset_vc(struct vc_data *vc);
82 83
83#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE) 84#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
84extern char con_buf[CON_BUF_SIZE]; 85extern char con_buf[CON_BUF_SIZE];
85extern struct semaphore con_buf_sem; 86extern struct mutex con_buf_mtx;
86extern char vt_dont_switch; 87extern char vt_dont_switch;
87 88
88struct vt_spawn_console { 89struct vt_spawn_console {
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b8abfc74d038..f16ba1e0687d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -121,6 +121,12 @@ struct execute_work {
121 init_timer(&(_work)->timer); \ 121 init_timer(&(_work)->timer); \
122 } while (0) 122 } while (0)
123 123
124#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
125 do { \
126 INIT_WORK(&(_work)->work, (_func)); \
127 init_timer_deferrable(&(_work)->timer); \
128 } while (0)
129
124/** 130/**
125 * work_pending - Find out whether a work item is currently pending 131 * work_pending - Find out whether a work item is currently pending
126 * @work: The work item in question 132 * @work: The work item in question
diff --git a/include/math-emu/extended.h b/include/math-emu/extended.h
deleted file mode 100644
index 84770fceb53e..000000000000
--- a/include/math-emu/extended.h
+++ /dev/null
@@ -1,396 +0,0 @@
1/* Software floating-point emulation.
2 Definitions for IEEE Extended Precision.
3 Copyright (C) 1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Jakub Jelinek (jj@ultra.linux.cz).
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
16
17 You should have received a copy of the GNU Library General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21
22
23#ifndef __MATH_EMU_EXTENDED_H__
24#define __MATH_EMU_EXTENDED_H__
25
26#if _FP_W_TYPE_SIZE < 32
27#error "Here's a nickel, kid. Go buy yourself a real computer."
28#endif
29
30#if _FP_W_TYPE_SIZE < 64
31#define _FP_FRACTBITS_E (4*_FP_W_TYPE_SIZE)
32#else
33#define _FP_FRACTBITS_E (2*_FP_W_TYPE_SIZE)
34#endif
35
36#define _FP_FRACBITS_E 64
37#define _FP_FRACXBITS_E (_FP_FRACTBITS_E - _FP_FRACBITS_E)
38#define _FP_WFRACBITS_E (_FP_WORKBITS + _FP_FRACBITS_E)
39#define _FP_WFRACXBITS_E (_FP_FRACTBITS_E - _FP_WFRACBITS_E)
40#define _FP_EXPBITS_E 15
41#define _FP_EXPBIAS_E 16383
42#define _FP_EXPMAX_E 32767
43
44#define _FP_QNANBIT_E \
45 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2) % _FP_W_TYPE_SIZE)
46#define _FP_IMPLBIT_E \
47 ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1) % _FP_W_TYPE_SIZE)
48#define _FP_OVERFLOW_E \
49 ((_FP_W_TYPE)1 << (_FP_WFRACBITS_E % _FP_W_TYPE_SIZE))
50
51#if _FP_W_TYPE_SIZE < 64
52
53union _FP_UNION_E
54{
55 long double flt;
56 struct
57 {
58#if __BYTE_ORDER == __BIG_ENDIAN
59 unsigned long pad1 : _FP_W_TYPE_SIZE;
60 unsigned long pad2 : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
61 unsigned long sign : 1;
62 unsigned long exp : _FP_EXPBITS_E;
63 unsigned long frac1 : _FP_W_TYPE_SIZE;
64 unsigned long frac0 : _FP_W_TYPE_SIZE;
65#else
66 unsigned long frac0 : _FP_W_TYPE_SIZE;
67 unsigned long frac1 : _FP_W_TYPE_SIZE;
68 unsigned exp : _FP_EXPBITS_E;
69 unsigned sign : 1;
70#endif /* not bigendian */
71 } bits __attribute__((packed));
72};
73
74
75#define FP_DECL_E(X) _FP_DECL(4,X)
76
77#define FP_UNPACK_RAW_E(X, val) \
78 do { \
79 union _FP_UNION_E _flo; _flo.flt = (val); \
80 \
81 X##_f[2] = 0; X##_f[3] = 0; \
82 X##_f[0] = _flo.bits.frac0; \
83 X##_f[1] = _flo.bits.frac1; \
84 X##_e = _flo.bits.exp; \
85 X##_s = _flo.bits.sign; \
86 if (!X##_e && (X##_f[1] || X##_f[0]) \
87 && !(X##_f[1] & _FP_IMPLBIT_E)) \
88 { \
89 X##_e++; \
90 FP_SET_EXCEPTION(FP_EX_DENORM); \
91 } \
92 } while (0)
93
94#define FP_UNPACK_RAW_EP(X, val) \
95 do { \
96 union _FP_UNION_E *_flo = \
97 (union _FP_UNION_E *)(val); \
98 \
99 X##_f[2] = 0; X##_f[3] = 0; \
100 X##_f[0] = _flo->bits.frac0; \
101 X##_f[1] = _flo->bits.frac1; \
102 X##_e = _flo->bits.exp; \
103 X##_s = _flo->bits.sign; \
104 if (!X##_e && (X##_f[1] || X##_f[0]) \
105 && !(X##_f[1] & _FP_IMPLBIT_E)) \
106 { \
107 X##_e++; \
108 FP_SET_EXCEPTION(FP_EX_DENORM); \
109 } \
110 } while (0)
111
112#define FP_PACK_RAW_E(val, X) \
113 do { \
114 union _FP_UNION_E _flo; \
115 \
116 if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
117 else X##_f[1] &= ~(_FP_IMPLBIT_E); \
118 _flo.bits.frac0 = X##_f[0]; \
119 _flo.bits.frac1 = X##_f[1]; \
120 _flo.bits.exp = X##_e; \
121 _flo.bits.sign = X##_s; \
122 \
123 (val) = _flo.flt; \
124 } while (0)
125
126#define FP_PACK_RAW_EP(val, X) \
127 do { \
128 if (!FP_INHIBIT_RESULTS) \
129 { \
130 union _FP_UNION_E *_flo = \
131 (union _FP_UNION_E *)(val); \
132 \
133 if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
134 else X##_f[1] &= ~(_FP_IMPLBIT_E); \
135 _flo->bits.frac0 = X##_f[0]; \
136 _flo->bits.frac1 = X##_f[1]; \
137 _flo->bits.exp = X##_e; \
138 _flo->bits.sign = X##_s; \
139 } \
140 } while (0)
141
142#define FP_UNPACK_E(X,val) \
143 do { \
144 FP_UNPACK_RAW_E(X,val); \
145 _FP_UNPACK_CANONICAL(E,4,X); \
146 } while (0)
147
148#define FP_UNPACK_EP(X,val) \
149 do { \
150 FP_UNPACK_RAW_2_P(X,val); \
151 _FP_UNPACK_CANONICAL(E,4,X); \
152 } while (0)
153
154#define FP_PACK_E(val,X) \
155 do { \
156 _FP_PACK_CANONICAL(E,4,X); \
157 FP_PACK_RAW_E(val,X); \
158 } while (0)
159
160#define FP_PACK_EP(val,X) \
161 do { \
162 _FP_PACK_CANONICAL(E,4,X); \
163 FP_PACK_RAW_EP(val,X); \
164 } while (0)
165
166#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,4,X)
167#define FP_NEG_E(R,X) _FP_NEG(E,4,R,X)
168#define FP_ADD_E(R,X,Y) _FP_ADD(E,4,R,X,Y)
169#define FP_SUB_E(R,X,Y) _FP_SUB(E,4,R,X,Y)
170#define FP_MUL_E(R,X,Y) _FP_MUL(E,4,R,X,Y)
171#define FP_DIV_E(R,X,Y) _FP_DIV(E,4,R,X,Y)
172#define FP_SQRT_E(R,X) _FP_SQRT(E,4,R,X)
173
174/*
175 * Square root algorithms:
176 * We have just one right now, maybe Newton approximation
177 * should be added for those machines where division is fast.
178 * This has special _E version because standard _4 square
179 * root would not work (it has to start normally with the
180 * second word and not the first), but as we have to do it
181 * anyway, we optimize it by doing most of the calculations
182 * in two UWtype registers instead of four.
183 */
184
185#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
186 do { \
187 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
188 _FP_FRAC_SRL_4(X, (_FP_WORKBITS)); \
189 while (q) \
190 { \
191 T##_f[1] = S##_f[1] + q; \
192 if (T##_f[1] <= X##_f[1]) \
193 { \
194 S##_f[1] = T##_f[1] + q; \
195 X##_f[1] -= T##_f[1]; \
196 R##_f[1] += q; \
197 } \
198 _FP_FRAC_SLL_2(X, 1); \
199 q >>= 1; \
200 } \
201 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
202 while (q) \
203 { \
204 T##_f[0] = S##_f[0] + q; \
205 T##_f[1] = S##_f[1]; \
206 if (T##_f[1] < X##_f[1] || \
207 (T##_f[1] == X##_f[1] && \
208 T##_f[0] <= X##_f[0])) \
209 { \
210 S##_f[0] = T##_f[0] + q; \
211 S##_f[1] += (T##_f[0] > S##_f[0]); \
212 _FP_FRAC_DEC_2(X, T); \
213 R##_f[0] += q; \
214 } \
215 _FP_FRAC_SLL_2(X, 1); \
216 q >>= 1; \
217 } \
218 _FP_FRAC_SLL_4(R, (_FP_WORKBITS)); \
219 if (X##_f[0] | X##_f[1]) \
220 { \
221 if (S##_f[1] < X##_f[1] || \
222 (S##_f[1] == X##_f[1] && \
223 S##_f[0] < X##_f[0])) \
224 R##_f[0] |= _FP_WORK_ROUND; \
225 R##_f[0] |= _FP_WORK_STICKY; \
226 } \
227 } while (0)
228
229#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,4,r,X,Y,un)
230#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,4,r,X,Y)
231
232#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,4,r,X,rsz,rsg)
233#define FP_TO_INT_ROUND_E(r,X,rsz,rsg) _FP_TO_INT_ROUND(E,4,r,X,rsz,rsg)
234#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,4,X,r,rs,rt)
235
236#define _FP_FRAC_HIGH_E(X) (X##_f[2])
237#define _FP_FRAC_HIGH_RAW_E(X) (X##_f[1])
238
239#else /* not _FP_W_TYPE_SIZE < 64 */
240union _FP_UNION_E
241{
242 long double flt /* __attribute__((mode(TF))) */ ;
243 struct {
244#if __BYTE_ORDER == __BIG_ENDIAN
245 unsigned long pad : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
246 unsigned sign : 1;
247 unsigned exp : _FP_EXPBITS_E;
248 unsigned long frac : _FP_W_TYPE_SIZE;
249#else
250 unsigned long frac : _FP_W_TYPE_SIZE;
251 unsigned exp : _FP_EXPBITS_E;
252 unsigned sign : 1;
253#endif
254 } bits;
255};
256
257#define FP_DECL_E(X) _FP_DECL(2,X)
258
259#define FP_UNPACK_RAW_E(X, val) \
260 do { \
261 union _FP_UNION_E _flo; _flo.flt = (val); \
262 \
263 X##_f0 = _flo.bits.frac; \
264 X##_f1 = 0; \
265 X##_e = _flo.bits.exp; \
266 X##_s = _flo.bits.sign; \
267 if (!X##_e && X##_f0 && !(X##_f0 & _FP_IMPLBIT_E)) \
268 { \
269 X##_e++; \
270 FP_SET_EXCEPTION(FP_EX_DENORM); \
271 } \
272 } while (0)
273
274#define FP_UNPACK_RAW_EP(X, val) \
275 do { \
276 union _FP_UNION_E *_flo = \
277 (union _FP_UNION_E *)(val); \
278 \
279 X##_f0 = _flo->bits.frac; \
280 X##_f1 = 0; \
281 X##_e = _flo->bits.exp; \
282 X##_s = _flo->bits.sign; \
283 if (!X##_e && X##_f0 && !(X##_f0 & _FP_IMPLBIT_E)) \
284 { \
285 X##_e++; \
286 FP_SET_EXCEPTION(FP_EX_DENORM); \
287 } \
288 } while (0)
289
290#define FP_PACK_RAW_E(val, X) \
291 do { \
292 union _FP_UNION_E _flo; \
293 \
294 if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
295 else X##_f0 &= ~(_FP_IMPLBIT_E); \
296 _flo.bits.frac = X##_f0; \
297 _flo.bits.exp = X##_e; \
298 _flo.bits.sign = X##_s; \
299 \
300 (val) = _flo.flt; \
301 } while (0)
302
303#define FP_PACK_RAW_EP(fs, val, X) \
304 do { \
305 if (!FP_INHIBIT_RESULTS) \
306 { \
307 union _FP_UNION_E *_flo = \
308 (union _FP_UNION_E *)(val); \
309 \
310 if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
311 else X##_f0 &= ~(_FP_IMPLBIT_E); \
312 _flo->bits.frac = X##_f0; \
313 _flo->bits.exp = X##_e; \
314 _flo->bits.sign = X##_s; \
315 } \
316 } while (0)
317
318
319#define FP_UNPACK_E(X,val) \
320 do { \
321 FP_UNPACK_RAW_E(X,val); \
322 _FP_UNPACK_CANONICAL(E,2,X); \
323 } while (0)
324
325#define FP_UNPACK_EP(X,val) \
326 do { \
327 FP_UNPACK_RAW_EP(X,val); \
328 _FP_UNPACK_CANONICAL(E,2,X); \
329 } while (0)
330
331#define FP_PACK_E(val,X) \
332 do { \
333 _FP_PACK_CANONICAL(E,2,X); \
334 FP_PACK_RAW_E(val,X); \
335 } while (0)
336
337#define FP_PACK_EP(val,X) \
338 do { \
339 _FP_PACK_CANONICAL(E,2,X); \
340 FP_PACK_RAW_EP(val,X); \
341 } while (0)
342
343#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,2,X)
344#define FP_NEG_E(R,X) _FP_NEG(E,2,R,X)
345#define FP_ADD_E(R,X,Y) _FP_ADD(E,2,R,X,Y)
346#define FP_SUB_E(R,X,Y) _FP_SUB(E,2,R,X,Y)
347#define FP_MUL_E(R,X,Y) _FP_MUL(E,2,R,X,Y)
348#define FP_DIV_E(R,X,Y) _FP_DIV(E,2,R,X,Y)
349#define FP_SQRT_E(R,X) _FP_SQRT(E,2,R,X)
350
351/*
352 * Square root algorithms:
353 * We have just one right now, maybe Newton approximation
354 * should be added for those machines where division is fast.
355 * We optimize it by doing most of the calculations
356 * in one UWtype registers instead of two, although we don't
357 * have to.
358 */
359#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
360 do { \
361 q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
362 _FP_FRAC_SRL_2(X, (_FP_WORKBITS)); \
363 while (q) \
364 { \
365 T##_f0 = S##_f0 + q; \
366 if (T##_f0 <= X##_f0) \
367 { \
368 S##_f0 = T##_f0 + q; \
369 X##_f0 -= T##_f0; \
370 R##_f0 += q; \
371 } \
372 _FP_FRAC_SLL_1(X, 1); \
373 q >>= 1; \
374 } \
375 _FP_FRAC_SLL_2(R, (_FP_WORKBITS)); \
376 if (X##_f0) \
377 { \
378 if (S##_f0 < X##_f0) \
379 R##_f0 |= _FP_WORK_ROUND; \
380 R##_f0 |= _FP_WORK_STICKY; \
381 } \
382 } while (0)
383
384#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un)
385#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y)
386
387#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg)
388#define FP_TO_INT_ROUND_E(r,X,rsz,rsg) _FP_TO_INT_ROUND(E,2,r,X,rsz,rsg)
389#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt)
390
391#define _FP_FRAC_HIGH_E(X) (X##_f1)
392#define _FP_FRAC_HIGH_RAW_E(X) (X##_f0)
393
394#endif /* not _FP_W_TYPE_SIZE < 64 */
395
396#endif /* __MATH_EMU_EXTENDED_H__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 25c37e34bfdc..689b886038da 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1361,15 +1361,6 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1361extern __u32 sysctl_wmem_max; 1361extern __u32 sysctl_wmem_max;
1362extern __u32 sysctl_rmem_max; 1362extern __u32 sysctl_rmem_max;
1363 1363
1364#ifdef CONFIG_NET
1365int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
1366#else
1367static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
1368{
1369 return -ENODEV;
1370}
1371#endif
1372
1373extern void sk_init(void); 1364extern void sk_init(void);
1374 1365
1375#ifdef CONFIG_SYSCTL 1366#ifdef CONFIG_SYSCTL
diff --git a/include/video/mach64.h b/include/video/mach64.h
index 09a7f4a7289f..a8332e528ec1 100644
--- a/include/video/mach64.h
+++ b/include/video/mach64.h
@@ -885,6 +885,7 @@
885#define SDRAM 4 885#define SDRAM 4
886#define SGRAM 5 886#define SGRAM 5
887#define WRAM 6 887#define WRAM 6
888#define SDRAM32 6
888 889
889#define DAC_INTERNAL 0x00 890#define DAC_INTERNAL 0x00
890#define DAC_IBMRGB514 0x01 891#define DAC_IBMRGB514 0x01
diff --git a/include/video/permedia2.h b/include/video/permedia2.h
index b95d36289336..9e49c9571ec3 100644
--- a/include/video/permedia2.h
+++ b/include/video/permedia2.h
@@ -154,6 +154,10 @@
154#define PM2VI_RD_CLK1_PRESCALE 0x204 154#define PM2VI_RD_CLK1_PRESCALE 0x204
155#define PM2VI_RD_CLK1_FEEDBACK 0x205 155#define PM2VI_RD_CLK1_FEEDBACK 0x205
156#define PM2VI_RD_CLK1_POSTSCALE 0x206 156#define PM2VI_RD_CLK1_POSTSCALE 0x206
157#define PM2VI_RD_MCLK_CONTROL 0x20D
158#define PM2VI_RD_MCLK_PRESCALE 0x20E
159#define PM2VI_RD_MCLK_FEEDBACK 0x20F
160#define PM2VI_RD_MCLK_POSTSCALE 0x210
157#define PM2VI_RD_CURSOR_PALETTE 0x303 161#define PM2VI_RD_CURSOR_PALETTE 0x303
158#define PM2VI_RD_CURSOR_PATTERN 0x400 162#define PM2VI_RD_CURSOR_PATTERN 0x400
159 163
diff --git a/include/video/tgafb.h b/include/video/tgafb.h
index be2b3e94e251..03d0dbe293a8 100644
--- a/include/video/tgafb.h
+++ b/include/video/tgafb.h
@@ -39,6 +39,7 @@
39#define TGA_RASTEROP_REG 0x0034 39#define TGA_RASTEROP_REG 0x0034
40#define TGA_PIXELSHIFT_REG 0x0038 40#define TGA_PIXELSHIFT_REG 0x0038
41#define TGA_DEEP_REG 0x0050 41#define TGA_DEEP_REG 0x0050
42#define TGA_START_REG 0x0054
42#define TGA_PIXELMASK_REG 0x005c 43#define TGA_PIXELMASK_REG 0x005c
43#define TGA_CURSOR_BASE_REG 0x0060 44#define TGA_CURSOR_BASE_REG 0x0060
44#define TGA_HORIZ_REG 0x0064 45#define TGA_HORIZ_REG 0x0064
@@ -140,7 +141,7 @@
140 141
141 142
142/* 143/*
143 * Useful defines for managing the BT463 on the 24-plane TGAs 144 * Useful defines for managing the BT463 on the 24-plane TGAs/SFB+s
144 */ 145 */
145 146
146#define BT463_ADDR_LO 0x0 147#define BT463_ADDR_LO 0x0
@@ -168,12 +169,35 @@
168#define BT463_WINDOW_TYPE_BASE 0x0300 169#define BT463_WINDOW_TYPE_BASE 0x0300
169 170
170/* 171/*
172 * Useful defines for managing the BT459 on the 8-plane SFB+s
173 */
174
175#define BT459_ADDR_LO 0x0
176#define BT459_ADDR_HI 0x1
177#define BT459_REG_ACC 0x2
178#define BT459_PALETTE 0x3
179
180#define BT459_CUR_CLR_1 0x0181
181#define BT459_CUR_CLR_2 0x0182
182#define BT459_CUR_CLR_3 0x0183
183
184#define BT459_CMD_REG_0 0x0201
185#define BT459_CMD_REG_1 0x0202
186#define BT459_CMD_REG_2 0x0203
187
188#define BT459_READ_MASK 0x0204
189
190#define BT459_BLINK_MASK 0x0206
191
192#define BT459_CUR_CMD_REG 0x0300
193
194/*
171 * The framebuffer driver private data. 195 * The framebuffer driver private data.
172 */ 196 */
173 197
174struct tga_par { 198struct tga_par {
175 /* PCI device. */ 199 /* PCI/TC device. */
176 struct pci_dev *pdev; 200 struct device *dev;
177 201
178 /* Device dependent information. */ 202 /* Device dependent information. */
179 void __iomem *tga_mem_base; 203 void __iomem *tga_mem_base;
@@ -235,4 +259,21 @@ BT463_WRITE(struct tga_par *par, u32 m, u16 a, u8 v)
235 TGA_WRITE_REG(par, m << 10 | v, TGA_RAMDAC_REG); 259 TGA_WRITE_REG(par, m << 10 | v, TGA_RAMDAC_REG);
236} 260}
237 261
262static inline void
263BT459_LOAD_ADDR(struct tga_par *par, u16 a)
264{
265 TGA_WRITE_REG(par, BT459_ADDR_LO << 2, TGA_RAMDAC_SETUP_REG);
266 TGA_WRITE_REG(par, a & 0xff, TGA_RAMDAC_REG);
267 TGA_WRITE_REG(par, BT459_ADDR_HI << 2, TGA_RAMDAC_SETUP_REG);
268 TGA_WRITE_REG(par, a >> 8, TGA_RAMDAC_REG);
269}
270
271static inline void
272BT459_WRITE(struct tga_par *par, u32 m, u16 a, u8 v)
273{
274 BT459_LOAD_ADDR(par, a);
275 TGA_WRITE_REG(par, m << 2, TGA_RAMDAC_SETUP_REG);
276 TGA_WRITE_REG(par, v, TGA_RAMDAC_REG);
277}
278
238#endif /* TGAFB_H */ 279#endif /* TGAFB_H */