aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-15 04:51:40 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-15 04:51:40 -0500
commitf055408957750cf759162c364c2a4dfe19765844 (patch)
treeaecc0a13c582d310902e6fa95d8853c627828fcc /include
parent83cbd33aae2c3cd14f80a8abf733033a57aa4923 (diff)
parent4060994c3e337b40e0f6fa8ce2cc178e021baf3d (diff)
Merge branch 'master'
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/atomic.h12
-rw-r--r--include/asm-arm/arch-pxa/akita.h2
-rw-r--r--include/asm-arm/atomic.h42
-rw-r--r--include/asm-arm26/atomic.h29
-rw-r--r--include/asm-cris/atomic.h27
-rw-r--r--include/asm-frv/atomic.h12
-rw-r--r--include/asm-generic/sections.h1
-rw-r--r--include/asm-h8300/atomic.h27
-rw-r--r--include/asm-i386/atomic.h21
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h2
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-i386/system.h42
-rw-r--r--include/asm-ia64/atomic.h12
-rw-r--r--include/asm-m68k/atomic.h12
-rw-r--r--include/asm-m68k/processor.h14
-rw-r--r--include/asm-m68k/thread_info.h91
-rw-r--r--include/asm-m68knommu/atomic.h12
-rw-r--r--include/asm-mips/atomic.h21
-rw-r--r--include/asm-parisc/atomic.h20
-rw-r--r--include/asm-powerpc/atomic.h27
-rw-r--r--include/asm-powerpc/btext.h (renamed from include/asm-ppc64/btext.h)0
-rw-r--r--include/asm-powerpc/delay.h (renamed from include/asm-ppc64/delay.h)19
-rw-r--r--include/asm-powerpc/eeh.h (renamed from include/asm-ppc64/eeh.h)0
-rw-r--r--include/asm-powerpc/floppy.h (renamed from include/asm-ppc64/floppy.h)25
-rw-r--r--include/asm-powerpc/hvconsole.h (renamed from include/asm-ppc64/hvconsole.h)0
-rw-r--r--include/asm-powerpc/hvcserver.h (renamed from include/asm-ppc64/hvcserver.h)0
-rw-r--r--include/asm-powerpc/kexec.h1
-rw-r--r--include/asm-powerpc/machdep.h4
-rw-r--r--include/asm-powerpc/nvram.h (renamed from include/asm-ppc64/nvram.h)17
-rw-r--r--include/asm-powerpc/page.h179
-rw-r--r--include/asm-powerpc/page_32.h40
-rw-r--r--include/asm-powerpc/page_64.h174
-rw-r--r--include/asm-powerpc/serial.h (renamed from include/asm-ppc64/serial.h)19
-rw-r--r--include/asm-powerpc/vdso_datapage.h2
-rw-r--r--include/asm-ppc/immap_85xx.h2
-rw-r--r--include/asm-ppc/ipic.h2
-rw-r--r--include/asm-ppc/mpc83xx.h2
-rw-r--r--include/asm-ppc/mpc85xx.h2
-rw-r--r--include/asm-ppc/nvram.h73
-rw-r--r--include/asm-ppc/ppc_sys.h2
-rw-r--r--include/asm-ppc64/page.h328
-rw-r--r--include/asm-ppc64/prom.h220
-rw-r--r--include/asm-ppc64/system.h310
-rw-r--r--include/asm-s390/atomic.h12
-rw-r--r--include/asm-sh/atomic.h29
-rw-r--r--include/asm-sh64/atomic.h29
-rw-r--r--include/asm-sparc/atomic.h4
-rw-r--r--include/asm-sparc64/atomic.h12
-rw-r--r--include/asm-v850/atomic.h30
-rw-r--r--include/asm-x86_64/apic.h2
-rw-r--r--include/asm-x86_64/atomic.h21
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/desc.h16
-rw-r--r--include/asm-x86_64/dma.h11
-rw-r--r--include/asm-x86_64/hpet.h35
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/ia32.h5
-rw-r--r--include/asm-x86_64/mce.h10
-rw-r--r--include/asm-x86_64/mmzone.h9
-rw-r--r--include/asm-x86_64/mpspec.h7
-rw-r--r--include/asm-x86_64/msr.h2
-rw-r--r--include/asm-x86_64/numa.h2
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/pda.h1
-rw-r--r--include/asm-x86_64/pgtable.h5
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/proto.h4
-rw-r--r--include/asm-x86_64/rwsem.h283
-rw-r--r--include/asm-x86_64/smp.h3
-rw-r--r--include/asm-x86_64/spinlock.h12
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/unistd.h3
-rw-r--r--include/asm-xtensa/atomic.h20
-rw-r--r--include/linux/acct.h2
-rw-r--r--include/linux/aio.h13
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cm4000_cs.h66
-rw-r--r--include/linux/compat_ioctl.h8
-rw-r--r--include/linux/file.h10
-rw-r--r--include/linux/font.h2
-rw-r--r--include/linux/fsl_devices.h2
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/mmzone.h22
-rw-r--r--include/linux/netfilter/nfnetlink.h6
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/pm.h49
-rw-r--r--include/linux/pm_legacy.h56
-rw-r--r--include/linux/preempt.h1
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/smp_lock.h3
-rw-r--r--include/linux/thread_info.h47
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/usb.h6
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/media/ir-common.h1
-rw-r--r--include/media/ir-kbd-i2c.h2
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-common.h110
108 files changed, 1402 insertions, 1531 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 20ac3d95ecd9..36505bb4e8cb 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -177,6 +177,18 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
177 return result; 177 return result;
178} 178}
179 179
180#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
181
182#define atomic_add_unless(v, a, u) \
183({ \
184 int c, old; \
185 c = atomic_read(v); \
186 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
187 c = old; \
188 c != (u); \
189})
190#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
191
180#define atomic_dec_return(v) atomic_sub_return(1,(v)) 192#define atomic_dec_return(v) atomic_sub_return(1,(v))
181#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 193#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
182 194
diff --git a/include/asm-arm/arch-pxa/akita.h b/include/asm-arm/arch-pxa/akita.h
index 4a1fbcfccc39..5d8cc1d9cb10 100644
--- a/include/asm-arm/arch-pxa/akita.h
+++ b/include/asm-arm/arch-pxa/akita.h
@@ -25,6 +25,8 @@
25/* Default Values */ 25/* Default Values */
26#define AKITA_IOEXP_IO_OUT (AKITA_IOEXP_IR_ON | AKITA_IOEXP_AKIN_PULLUP) 26#define AKITA_IOEXP_IO_OUT (AKITA_IOEXP_IR_ON | AKITA_IOEXP_AKIN_PULLUP)
27 27
28extern struct platform_device akitaioexp_device;
29
28void akita_set_ioexp(struct device *dev, unsigned char bitmask); 30void akita_set_ioexp(struct device *dev, unsigned char bitmask);
29void akita_reset_ioexp(struct device *dev, unsigned char bitmask); 31void akita_reset_ioexp(struct device *dev, unsigned char bitmask);
30 32
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 2885972b0855..75b802719723 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v)
80 return result; 80 return result;
81} 81}
82 82
83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
84{
85 u32 oldval, res;
86
87 do {
88 __asm__ __volatile__("@ atomic_cmpxchg\n"
89 "ldrex %1, [%2]\n"
90 "teq %1, %3\n"
91 "strexeq %0, %4, [%2]\n"
92 : "=&r" (res), "=&r" (oldval)
93 : "r" (&ptr->counter), "Ir" (old), "r" (new)
94 : "cc");
95 } while (res);
96
97 return oldval;
98}
99
83static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 100static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
84{ 101{
85 unsigned long tmp, tmp2; 102 unsigned long tmp, tmp2;
@@ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
131 return val; 148 return val;
132} 149}
133 150
151static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
152{
153 int ret;
154 unsigned long flags;
155
156 local_irq_save(flags);
157 ret = v->counter;
158 if (likely(ret == old))
159 v->counter = new;
160 local_irq_restore(flags);
161
162 return ret;
163}
164
134static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 165static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
135{ 166{
136 unsigned long flags; 167 unsigned long flags;
@@ -142,6 +173,17 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
142 173
143#endif /* __LINUX_ARM_ARCH__ */ 174#endif /* __LINUX_ARM_ARCH__ */
144 175
176static inline int atomic_add_unless(atomic_t *v, int a, int u)
177{
178 int c, old;
179
180 c = atomic_read(v);
181 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
182 c = old;
183 return c != u;
184}
185#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
186
145#define atomic_add(i, v) (void) atomic_add_return(i, v) 187#define atomic_add(i, v) (void) atomic_add_return(i, v)
146#define atomic_inc(v) (void) atomic_add_return(1, v) 188#define atomic_inc(v) (void) atomic_add_return(1, v)
147#define atomic_sub(i, v) (void) atomic_sub_return(i, v) 189#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 4a88235c0e76..a47cadc59686 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -62,6 +62,35 @@ static inline int atomic_sub_return(int i, atomic_t *v)
62 return val; 62 return val;
63} 63}
64 64
65static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
66{
67 int ret;
68 unsigned long flags;
69
70 local_irq_save(flags);
71 ret = v->counter;
72 if (likely(ret == old))
73 v->counter = new;
74 local_irq_restore(flags);
75
76 return ret;
77}
78
79static inline int atomic_add_unless(atomic_t *v, int a, int u)
80{
81 int ret;
82 unsigned long flags;
83
84 local_irq_save(flags);
85 ret = v->counter;
86 if (ret != u)
87 v->counter += a;
88 local_irq_restore(flags);
89
90 return ret != u;
91}
92#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
93
65static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 94static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
66{ 95{
67 unsigned long flags; 96 unsigned long flags;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 8c2e78304523..683b05a57d88 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -123,6 +123,33 @@ static inline int atomic_inc_and_test(volatile atomic_t *v)
123 return retval; 123 return retval;
124} 124}
125 125
126static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
127{
128 int ret;
129 unsigned long flags;
130
131 cris_atomic_save(v, flags);
132 ret = v->counter;
133 if (likely(ret == old))
134 v->counter = new;
135 cris_atomic_restore(v, flags);
136 return ret;
137}
138
139static inline int atomic_add_unless(atomic_t *v, int a, int u)
140{
141 int ret;
142 unsigned long flags;
143
144 cris_atomic_save(v, flags);
145 ret = v->counter;
146 if (ret != u)
147 v->counter += a;
148 cris_atomic_restore(v, flags);
149 return ret != u;
150}
151#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
152
126/* Atomic operations are already serializing */ 153/* Atomic operations are already serializing */
127#define smp_mb__before_atomic_dec() barrier() 154#define smp_mb__before_atomic_dec() barrier()
128#define smp_mb__after_atomic_dec() barrier() 155#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index e75968463428..f6539ff569c5 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -414,4 +414,16 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
414 414
415#endif 415#endif
416 416
417#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
418
419#define atomic_add_unless(v, a, u) \
420({ \
421 int c, old; \
422 c = atomic_read(v); \
423 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
424 c = old; \
425 c != (u); \
426})
427#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
428
417#endif /* _ASM_ATOMIC_H */ 429#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 886dbd116899..0b49f9e070f1 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -13,5 +13,6 @@ extern char _eextratext[] __attribute__((weak));
13extern char _end[]; 13extern char _end[];
14extern char __per_cpu_start[], __per_cpu_end[]; 14extern char __per_cpu_start[], __per_cpu_end[];
15extern char __kprobes_text_start[], __kprobes_text_end[]; 15extern char __kprobes_text_start[], __kprobes_text_end[];
16extern char __initdata_begin[], __initdata_end[];
16 17
17#endif /* _ASM_GENERIC_SECTIONS_H_ */ 18#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index 7230f6507995..f23d86819ea8 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -82,6 +82,33 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
82 return ret == 0; 82 return ret == 0;
83} 83}
84 84
85static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
86{
87 int ret;
88 unsigned long flags;
89
90 local_irq_save(flags);
91 ret = v->counter;
92 if (likely(ret == old))
93 v->counter = new;
94 local_irq_restore(flags);
95 return ret;
96}
97
98static inline int atomic_add_unless(atomic_t *v, int a, int u)
99{
100 int ret;
101 unsigned long flags;
102
103 local_irq_save(flags);
104 ret = v->counter;
105 if (ret != u)
106 v->counter += a;
107 local_irq_restore(flags);
108 return ret != u;
109}
110#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
111
85static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) 112static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
86{ 113{
87 __asm__ __volatile__("stc ccr,r1l\n\t" 114 __asm__ __volatile__("stc ccr,r1l\n\t"
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 509720be772a..c68557aa04b2 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -215,6 +215,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
215 return atomic_add_return(-i,v); 215 return atomic_add_return(-i,v);
216} 216}
217 217
218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219
220/**
221 * atomic_add_unless - add unless the number is a given value
222 * @v: pointer of type atomic_t
223 * @a: the amount to add to v...
224 * @u: ...unless v is equal to u.
225 *
226 * Atomically adds @a to @v, so long as it was not @u.
227 * Returns non-zero if @v was not @u, and zero otherwise.
228 */
229#define atomic_add_unless(v, a, u) \
230({ \
231 int c, old; \
232 c = atomic_read(v); \
233 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
234 c = old; \
235 c != (u); \
236})
237#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
238
218#define atomic_inc_return(v) (atomic_add_return(1,v)) 239#define atomic_inc_return(v) (atomic_add_return(1,v))
219#define atomic_dec_return(v) (atomic_sub_return(1,v)) 240#define atomic_dec_return(v) (atomic_sub_return(1,v))
220 241
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index 06ae4d81ba6a..a955e57ad016 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -19,7 +19,7 @@ static inline void kb_wait(void)
19static inline void mach_reboot(void) 19static inline void mach_reboot(void)
20{ 20{
21 int i; 21 int i;
22 for (i = 0; i < 100; i++) { 22 for (i = 0; i < 10; i++) {
23 kb_wait(); 23 kb_wait();
24 udelay(50); 24 udelay(50);
25 outb(0x60, 0x64); /* write Controller Command Byte */ 25 outb(0x60, 0x64); /* write Controller Command Byte */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 8c02b0318703..5c96cf6dcb39 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -65,7 +65,9 @@ struct cpuinfo_x86 {
65 int f00f_bug; 65 int f00f_bug;
66 int coma_bug; 66 int coma_bug;
67 unsigned long loops_per_jiffy; 67 unsigned long loops_per_jiffy;
68 unsigned char x86_num_cores; 68 unsigned char x86_max_cores; /* cpuid returned max cores value */
69 unsigned char booted_cores; /* number of cores as seen by OS */
70 unsigned char apicid;
69} __attribute__((__aligned__(SMP_CACHE_BYTES))); 71} __attribute__((__aligned__(SMP_CACHE_BYTES)));
70 72
71#define X86_VENDOR_INTEL 0 73#define X86_VENDOR_INTEL 0
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 97d52ac49e46..772f85da1206 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
263 263
264#ifdef CONFIG_X86_CMPXCHG 264#ifdef CONFIG_X86_CMPXCHG
265#define __HAVE_ARCH_CMPXCHG 1 265#define __HAVE_ARCH_CMPXCHG 1
266#define cmpxchg(ptr,o,n)\
267 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
268 (unsigned long)(n),sizeof(*(ptr))))
269#endif
266 270
267static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 271static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
268 unsigned long new, int size) 272 unsigned long new, int size)
@@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
291 return old; 295 return old;
292} 296}
293 297
294#define cmpxchg(ptr,o,n)\ 298#ifndef CONFIG_X86_CMPXCHG
295 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 299/*
296 (unsigned long)(n),sizeof(*(ptr)))) 300 * Building a kernel capable running on 80386. It may be necessary to
301 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
302 * a function for each of the sizes we support.
303 */
297 304
305extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
306extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
307extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
308
309static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
310 unsigned long new, int size)
311{
312 switch (size) {
313 case 1:
314 return cmpxchg_386_u8(ptr, old, new);
315 case 2:
316 return cmpxchg_386_u16(ptr, old, new);
317 case 4:
318 return cmpxchg_386_u32(ptr, old, new);
319 }
320 return old;
321}
322
323#define cmpxchg(ptr,o,n) \
324({ \
325 __typeof__(*(ptr)) __ret; \
326 if (likely(boot_cpu_data.x86 > 3)) \
327 __ret = __cmpxchg((ptr), (unsigned long)(o), \
328 (unsigned long)(n), sizeof(*(ptr))); \
329 else \
330 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
331 (unsigned long)(n), sizeof(*(ptr))); \
332 __ret; \
333})
298#endif 334#endif
299 335
300#ifdef CONFIG_X86_CMPXCHG64 336#ifdef CONFIG_X86_CMPXCHG64
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 874a6f890e75..2fbebf85c31d 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -88,6 +88,18 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
88 return new; 88 return new;
89} 89}
90 90
91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
92
93#define atomic_add_unless(v, a, u) \
94({ \
95 int c, old; \
96 c = atomic_read(v); \
97 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
98 c = old; \
99 c != (u); \
100})
101#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
102
91#define atomic_add_return(i,v) \ 103#define atomic_add_return(i,v) \
92({ \ 104({ \
93 int __ia64_aar_i = (i); \ 105 int __ia64_aar_i = (i); \
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 38f3043e7fe1..e3c962eeabf3 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -139,6 +139,18 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
139 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 139 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
140} 140}
141 141
142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
143
144#define atomic_add_unless(v, a, u) \
145({ \
146 int c, old; \
147 c = atomic_read(v); \
148 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
149 c = old; \
150 c != (u); \
151})
152#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
153
142/* Atomic operations are already serializing */ 154/* Atomic operations are already serializing */
143#define smp_mb__before_atomic_dec() barrier() 155#define smp_mb__before_atomic_dec() barrier()
144#define smp_mb__after_atomic_dec() barrier() 156#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h
index df1575db32af..7982285e84ed 100644
--- a/include/asm-m68k/processor.h
+++ b/include/asm-m68k/processor.h
@@ -14,6 +14,7 @@
14#define current_text_addr() ({ __label__ _l; _l: &&_l;}) 14#define current_text_addr() ({ __label__ _l; _l: &&_l;})
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/thread_info.h>
17#include <asm/segment.h> 18#include <asm/segment.h>
18#include <asm/fpu.h> 19#include <asm/fpu.h>
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
@@ -55,17 +56,6 @@ static inline void wrusp(unsigned long usp)
55#endif 56#endif
56#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) 57#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
57 58
58struct task_work {
59 unsigned char sigpending;
60 unsigned char notify_resume; /* request for notification on
61 userspace execution resumption */
62 char need_resched;
63 unsigned char delayed_trace; /* single step a syscall */
64 unsigned char syscall_trace; /* count of syscall interceptors */
65 unsigned char memdie; /* task was selected to be killed */
66 unsigned char pad[2];
67};
68
69struct thread_struct { 59struct thread_struct {
70 unsigned long ksp; /* kernel stack pointer */ 60 unsigned long ksp; /* kernel stack pointer */
71 unsigned long usp; /* user stack pointer */ 61 unsigned long usp; /* user stack pointer */
@@ -78,7 +68,7 @@ struct thread_struct {
78 unsigned long fp[8*3]; 68 unsigned long fp[8*3];
79 unsigned long fpcntl[3]; /* fp control regs */ 69 unsigned long fpcntl[3]; /* fp control regs */
80 unsigned char fpstate[FPSTATESIZE]; /* floating point state */ 70 unsigned char fpstate[FPSTATESIZE]; /* floating point state */
81 struct task_work work; 71 struct thread_info info;
82}; 72};
83 73
84#define INIT_THREAD { \ 74#define INIT_THREAD { \
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 2aed24f6fd2e..9532ca3c45cb 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -2,17 +2,15 @@
2#define _ASM_M68K_THREAD_INFO_H 2#define _ASM_M68K_THREAD_INFO_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/processor.h>
6#include <asm/page.h> 5#include <asm/page.h>
7 6
8struct thread_info { 7struct thread_info {
9 struct task_struct *task; /* main task structure */ 8 struct task_struct *task; /* main task structure */
9 unsigned long flags;
10 struct exec_domain *exec_domain; /* execution domain */ 10 struct exec_domain *exec_domain; /* execution domain */
11 int preempt_count; /* 0 => preemptable, <0 => BUG */ 11 int preempt_count; /* 0 => preemptable, <0 => BUG */
12 __u32 cpu; /* should always be 0 on m68k */ 12 __u32 cpu; /* should always be 0 on m68k */
13 struct restart_block restart_block; 13 struct restart_block restart_block;
14
15 __u8 supervisor_stack[0];
16}; 14};
17 15
18#define PREEMPT_ACTIVE 0x4000000 16#define PREEMPT_ACTIVE 0x4000000
@@ -35,84 +33,29 @@ struct thread_info {
35#define free_thread_info(ti) free_pages((unsigned long)(ti),1) 33#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
36#endif /* PAGE_SHIFT == 13 */ 34#endif /* PAGE_SHIFT == 13 */
37 35
38//#define init_thread_info (init_task.thread.info) 36#define init_thread_info (init_task.thread.info)
39#define init_stack (init_thread_union.stack) 37#define init_stack (init_thread_union.stack)
40 38
41#define current_thread_info() (current->thread_info) 39#define task_thread_info(tsk) (&(tsk)->thread.info)
42 40#define current_thread_info() task_thread_info(current)
43 41
44#define __HAVE_THREAD_FUNCTIONS 42#define __HAVE_THREAD_FUNCTIONS
45 43
46#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 44#define setup_thread_stack(p, org) ({ \
47#define TIF_DELAYED_TRACE 1 /* single step a syscall */ 45 *(struct task_struct **)(p)->thread_info = (p); \
48#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */ 46 task_thread_info(p)->task = (p); \
49#define TIF_SIGPENDING 3 /* signal pending */
50#define TIF_NEED_RESCHED 4 /* rescheduling necessary */
51#define TIF_MEMDIE 5
52
53extern int thread_flag_fixme(void);
54
55/*
56 * flag set/clear/test wrappers
57 * - pass TIF_xxxx constants to these functions
58 */
59
60#define __set_tsk_thread_flag(tsk, flag, val) ({ \
61 switch (flag) { \
62 case TIF_SIGPENDING: \
63 tsk->thread.work.sigpending = val; \
64 break; \
65 case TIF_NEED_RESCHED: \
66 tsk->thread.work.need_resched = val; \
67 break; \
68 case TIF_SYSCALL_TRACE: \
69 tsk->thread.work.syscall_trace = val; \
70 break; \
71 case TIF_MEMDIE: \
72 tsk->thread.work.memdie = val; \
73 break; \
74 default: \
75 thread_flag_fixme(); \
76 } \
77}) 47})
78 48
79#define __get_tsk_thread_flag(tsk, flag) ({ \ 49#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1)
80 int ___res; \
81 switch (flag) { \
82 case TIF_SIGPENDING: \
83 ___res = tsk->thread.work.sigpending; \
84 break; \
85 case TIF_NEED_RESCHED: \
86 ___res = tsk->thread.work.need_resched; \
87 break; \
88 case TIF_SYSCALL_TRACE: \
89 ___res = tsk->thread.work.syscall_trace;\
90 break; \
91 case TIF_MEMDIE: \
92 ___res = tsk->thread.work.memdie;\
93 break; \
94 default: \
95 ___res = thread_flag_fixme(); \
96 } \
97 ___res; \
98})
99
100#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \
101 int __res = __get_tsk_thread_flag(tsk, flag); \
102 __set_tsk_thread_flag(tsk, flag, val); \
103 __res; \
104})
105 50
106#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0) 51/* entry.S relies on these definitions!
107#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0) 52 * bits 0-7 are tested at every exception exit
108#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0) 53 * bits 8-15 are also tested at syscall exit
109#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag) 54 */
110 55#define TIF_SIGPENDING 6 /* signal pending */
111#define set_thread_flag(flag) set_tsk_thread_flag(current, flag) 56#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
112#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag) 57#define TIF_DELAYED_TRACE 14 /* single step a syscall */
113#define test_thread_flag(flag) test_tsk_thread_flag(current, flag) 58#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
114 59#define TIF_MEMDIE 16
115#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
116#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
117 60
118#endif /* _ASM_M68K_THREAD_INFO_H */ 61#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index a83631ed8c8f..3c1cc153c415 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -128,6 +128,18 @@ static inline int atomic_sub_return(int i, atomic_t * v)
128 return temp; 128 return temp;
129} 129}
130 130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132
133#define atomic_add_unless(v, a, u) \
134({ \
135 int c, old; \
136 c = atomic_read(v); \
137 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
138 c = old; \
139 c != (u); \
140})
141#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
142
131#define atomic_dec_return(v) atomic_sub_return(1,(v)) 143#define atomic_dec_return(v) atomic_sub_return(1,(v))
132#define atomic_inc_return(v) atomic_add_return(1,(v)) 144#define atomic_inc_return(v) atomic_add_return(1,(v))
133 145
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 6202eb8a14b7..2c87b41e69ba 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -287,6 +287,27 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
287 return result; 287 return result;
288} 288}
289 289
290#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
291
292/**
293 * atomic_add_unless - add unless the number is a given value
294 * @v: pointer of type atomic_t
295 * @a: the amount to add to v...
296 * @u: ...unless v is equal to u.
297 *
298 * Atomically adds @a to @v, so long as it was not @u.
299 * Returns non-zero if @v was not @u, and zero otherwise.
300 */
301#define atomic_add_unless(v, a, u) \
302({ \
303 int c, old; \
304 c = atomic_read(v); \
305 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
306 c = old; \
307 c != (u); \
308})
309#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
310
290#define atomic_dec_return(v) atomic_sub_return(1,(v)) 311#define atomic_dec_return(v) atomic_sub_return(1,(v))
291#define atomic_inc_return(v) atomic_add_return(1,(v)) 312#define atomic_inc_return(v) atomic_add_return(1,(v))
292 313
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 048a2c7fd0c0..983e9a2b6042 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -164,6 +164,26 @@ static __inline__ int atomic_read(const atomic_t *v)
164} 164}
165 165
166/* exported interface */ 166/* exported interface */
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168
169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
183 c = old; \
184 c != (u); \
185})
186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
167 187
168#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
169#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 189#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 9c0b372a46e1..ec4b14468959 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -164,6 +164,33 @@ static __inline__ int atomic_dec_return(atomic_t *v)
164 return t; 164 return t;
165} 165}
166 166
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168
169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 for (;;) { \
183 if (unlikely(c == (u))) \
184 break; \
185 old = atomic_cmpxchg((v), c, c + (a)); \
186 if (likely(old == c)) \
187 break; \
188 c = old; \
189 } \
190 c != (u); \
191})
192#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
193
167#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 194#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
168#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 195#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
169 196
diff --git a/include/asm-ppc64/btext.h b/include/asm-powerpc/btext.h
index 71cce36bc630..71cce36bc630 100644
--- a/include/asm-ppc64/btext.h
+++ b/include/asm-powerpc/btext.h
diff --git a/include/asm-ppc64/delay.h b/include/asm-powerpc/delay.h
index 05f198cf73d9..1492aa9ab716 100644
--- a/include/asm-ppc64/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -1,5 +1,5 @@
1#ifndef _PPC64_DELAY_H 1#ifndef _ASM_POWERPC_DELAY_H
2#define _PPC64_DELAY_H 2#define _ASM_POWERPC_DELAY_H
3 3
4/* 4/*
5 * Copyright 1996, Paul Mackerras. 5 * Copyright 1996, Paul Mackerras.
@@ -15,10 +15,17 @@
15 15
16extern unsigned long tb_ticks_per_usec; 16extern unsigned long tb_ticks_per_usec;
17 17
18/* define these here to prevent circular dependencies */ 18#ifdef CONFIG_PPC64
19/* define these here to prevent circular dependencies */
20/* these instructions control the thread priority on multi-threaded cpus */
19#define __HMT_low() asm volatile("or 1,1,1") 21#define __HMT_low() asm volatile("or 1,1,1")
20#define __HMT_medium() asm volatile("or 2,2,2") 22#define __HMT_medium() asm volatile("or 2,2,2")
21#define __barrier() asm volatile("":::"memory") 23#else
24#define __HMT_low()
25#define __HMT_medium()
26#endif
27
28#define __barrier() asm volatile("" ::: "memory")
22 29
23static inline unsigned long __get_tb(void) 30static inline unsigned long __get_tb(void)
24{ 31{
@@ -32,7 +39,7 @@ static inline void __delay(unsigned long loops)
32{ 39{
33 unsigned long start = __get_tb(); 40 unsigned long start = __get_tb();
34 41
35 while((__get_tb()-start) < loops) 42 while((__get_tb() - start) < loops)
36 __HMT_low(); 43 __HMT_low();
37 __HMT_medium(); 44 __HMT_medium();
38 __barrier(); 45 __barrier();
@@ -45,4 +52,4 @@ static inline void udelay(unsigned long usecs)
45 __delay(loops); 52 __delay(loops);
46} 53}
47 54
48#endif /* _PPC64_DELAY_H */ 55#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-powerpc/eeh.h
index 89f26ab31908..89f26ab31908 100644
--- a/include/asm-ppc64/eeh.h
+++ b/include/asm-powerpc/eeh.h
diff --git a/include/asm-ppc64/floppy.h b/include/asm-powerpc/floppy.h
index 5c497b588e54..64276a3f6153 100644
--- a/include/asm-ppc64/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -7,22 +7,22 @@
7 * 7 *
8 * Copyright (C) 1995 8 * Copyright (C) 1995
9 */ 9 */
10#ifndef __ASM_PPC64_FLOPPY_H 10#ifndef __ASM_POWERPC_FLOPPY_H
11#define __ASM_PPC64_FLOPPY_H 11#define __ASM_POWERPC_FLOPPY_H
12 12
13#include <linux/config.h> 13#include <linux/config.h>
14#include <asm/machdep.h> 14#include <asm/machdep.h>
15 15
16#define fd_inb(port) inb_p(port) 16#define fd_inb(port) inb_p(port)
17#define fd_outb(value,port) outb_p(value,port) 17#define fd_outb(value,port) outb_p(value,port)
18 18
19#define fd_enable_dma() enable_dma(FLOPPY_DMA) 19#define fd_enable_dma() enable_dma(FLOPPY_DMA)
20#define fd_disable_dma() disable_dma(FLOPPY_DMA) 20#define fd_disable_dma() disable_dma(FLOPPY_DMA)
21#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy") 21#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy")
22#define fd_free_dma() free_dma(FLOPPY_DMA) 22#define fd_free_dma() free_dma(FLOPPY_DMA)
23#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) 23#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
24#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode) 24#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
25#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) 25#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
26#define fd_enable_irq() enable_irq(FLOPPY_IRQ) 26#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
27#define fd_disable_irq() disable_irq(FLOPPY_IRQ) 27#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
28#define fd_cacheflush(addr,size) /* nothing */ 28#define fd_cacheflush(addr,size) /* nothing */
@@ -35,10 +35,10 @@
35 35
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io) 38#define fd_dma_setup(addr,size,mode,io) powerpc_fd_dma_setup(addr,size,mode,io)
39 39
40static __inline__ int 40static __inline__ int powerpc_fd_dma_setup(char *addr, unsigned long size,
41ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io) 41 int mode, int io)
42{ 42{
43 static unsigned long prev_size; 43 static unsigned long prev_size;
44 static dma_addr_t bus_addr = 0; 44 static dma_addr_t bus_addr = 0;
@@ -55,9 +55,8 @@ ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
55 bus_addr = 0; 55 bus_addr = 0;
56 } 56 }
57 57
58 if (!bus_addr) /* need to map it */ { 58 if (!bus_addr) /* need to map it */
59 bus_addr = pci_map_single(NULL, addr, size, dir); 59 bus_addr = pci_map_single(NULL, addr, size, dir);
60 }
61 60
62 /* remember this one as prev */ 61 /* remember this one as prev */
63 prev_addr = addr; 62 prev_addr = addr;
@@ -103,4 +102,4 @@ static int FDC2 = -1;
103 102
104#define EXTRA_FLOPPY_PARAMS 103#define EXTRA_FLOPPY_PARAMS
105 104
106#endif /* __ASM_PPC64_FLOPPY_H */ 105#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/include/asm-ppc64/hvconsole.h b/include/asm-powerpc/hvconsole.h
index 6da93ce74dc0..6da93ce74dc0 100644
--- a/include/asm-ppc64/hvconsole.h
+++ b/include/asm-powerpc/hvconsole.h
diff --git a/include/asm-ppc64/hvcserver.h b/include/asm-powerpc/hvcserver.h
index aecba9665796..aecba9665796 100644
--- a/include/asm-ppc64/hvcserver.h
+++ b/include/asm-powerpc/hvcserver.h
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 062ab9ba68eb..c72ffc709ea8 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -40,6 +40,7 @@ extern note_buf_t crash_notes[];
40#ifdef __powerpc64__ 40#ifdef __powerpc64__
41extern void kexec_smp_wait(void); /* get and clear naca physid, wait for 41extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
42 master to copy new code to 0 */ 42 master to copy new code to 0 */
43extern void __init kexec_setup(void);
43#else 44#else
44struct kimage; 45struct kimage;
45extern void machine_kexec_simple(struct kimage *image); 46extern void machine_kexec_simple(struct kimage *image);
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index 5670f0cd6143..c011abb8b600 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -93,7 +93,9 @@ struct machdep_calls {
93 93
94 void (*init_IRQ)(void); 94 void (*init_IRQ)(void);
95 int (*get_irq)(struct pt_regs *); 95 int (*get_irq)(struct pt_regs *);
96 void (*cpu_irq_down)(int secondary); 96#ifdef CONFIG_KEXEC
97 void (*kexec_cpu_down)(int crash_shutdown, int secondary);
98#endif
97 99
98 /* PCI stuff */ 100 /* PCI stuff */
99 /* Called after scanning the bus, before allocating resources */ 101 /* Called after scanning the bus, before allocating resources */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-powerpc/nvram.h
index def47d720d3d..24bd8c2388ea 100644
--- a/include/asm-ppc64/nvram.h
+++ b/include/asm-powerpc/nvram.h
@@ -1,6 +1,5 @@
1/* 1/*
2 * PreP compliant NVRAM access 2 * NVRAM definitions and access functions.
3 * This needs to be updated for PPC64
4 * 3 *
5 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -8,8 +7,8 @@
8 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
9 */ 8 */
10 9
11#ifndef _PPC64_NVRAM_H 10#ifndef _ASM_POWERPC_NVRAM_H
12#define _PPC64_NVRAM_H 11#define _ASM_POWERPC_NVRAM_H
13 12
14#define NVRW_CNT 0x20 13#define NVRW_CNT 0x20
15#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */ 14#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
@@ -69,7 +68,6 @@ extern int nvram_clear_error_log(void);
69extern struct nvram_partition *nvram_find_partition(int sig, const char *name); 68extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
70 69
71extern int pSeries_nvram_init(void); 70extern int pSeries_nvram_init(void);
72extern int pmac_nvram_init(void);
73extern int mmio_nvram_init(void); 71extern int mmio_nvram_init(void);
74 72
75/* PowerMac specific nvram stuffs */ 73/* PowerMac specific nvram stuffs */
@@ -88,7 +86,11 @@ extern u8 pmac_xpram_read(int xpaddr);
88extern void pmac_xpram_write(int xpaddr, u8 data); 86extern void pmac_xpram_write(int xpaddr, u8 data);
89 87
90/* Synchronize NVRAM */ 88/* Synchronize NVRAM */
91extern int nvram_sync(void); 89extern void nvram_sync(void);
90
91/* Normal access to NVRAM */
92extern unsigned char nvram_read_byte(int i);
93extern void nvram_write_byte(unsigned char c, int i);
92 94
93/* Some offsets in XPRAM */ 95/* Some offsets in XPRAM */
94#define PMAC_XPRAM_MACHINE_LOC 0xe4 96#define PMAC_XPRAM_MACHINE_LOC 0xe4
@@ -112,5 +114,6 @@ struct pmac_machine_location {
112 _IOWR('p', 0x40, int) 114 _IOWR('p', 0x40, int)
113 115
114#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */ 116#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
117#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
115 118
116#endif /* _PPC64_NVRAM_H */ 119#endif /* _ASM_POWERPC_NVRAM_H */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
new file mode 100644
index 000000000000..18c1e5ee81a3
--- /dev/null
+++ b/include/asm-powerpc/page.h
@@ -0,0 +1,179 @@
1#ifndef _ASM_POWERPC_PAGE_H
2#define _ASM_POWERPC_PAGE_H
3
4/*
5 * Copyright (C) 2001,2005 IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifdef __KERNEL__
14#include <linux/config.h>
15#include <asm/asm-compat.h>
16
17/*
18 * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
19 * page size. When using 64K pages however, whether we are really supporting
20 * 64K pages in HW or not is irrelevant to those definitions.
21 */
22#ifdef CONFIG_PPC_64K_PAGES
23#define PAGE_SHIFT 16
24#else
25#define PAGE_SHIFT 12
26#endif
27
28#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
29
30/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
31#define __HAVE_ARCH_GATE_AREA 1
32
33/*
34 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
35 * assign PAGE_MASK to a larger type it gets extended the way we want
36 * (i.e. with 1s in the high bits)
37 */
38#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
39
40#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
41#define KERNELBASE PAGE_OFFSET
42
43#ifdef CONFIG_DISCONTIGMEM
44#define page_to_pfn(page) discontigmem_page_to_pfn(page)
45#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
46#define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
47#endif
48
49#ifdef CONFIG_FLATMEM
50#define pfn_to_page(pfn) (mem_map + (pfn))
51#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
52#define pfn_valid(pfn) ((pfn) < max_mapnr)
53#endif
54
55#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
56#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
57#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
58
59#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
60#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
61
62/*
63 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
64 * and needs to be executable. This means the whole heap ends
65 * up being executable.
66 */
67#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
68 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
69
70#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
71 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
72
73#ifdef __powerpc64__
74#include <asm/page_64.h>
75#else
76#include <asm/page_32.h>
77#endif
78
79/* align addr on a size boundary - adjust address up/down if needed */
80#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
81#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
82
83/* align addr on a size boundary - adjust address up if needed */
84#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
85
86/* to align the pointer to the (next) page boundary */
87#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
88
89#ifndef __ASSEMBLY__
90
91#undef STRICT_MM_TYPECHECKS
92
93#ifdef STRICT_MM_TYPECHECKS
94/* These are used to make use of C type-checking. */
95
96/* PTE level */
97typedef struct { pte_basic_t pte; } pte_t;
98#define pte_val(x) ((x).pte)
99#define __pte(x) ((pte_t) { (x) })
100
101/* 64k pages additionally define a bigger "real PTE" type that gathers
102 * the "second half" part of the PTE for pseudo 64k pages
103 */
104#ifdef CONFIG_PPC_64K_PAGES
105typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
106#else
107typedef struct { pte_t pte; } real_pte_t;
108#endif
109
110/* PMD level */
111typedef struct { unsigned long pmd; } pmd_t;
112#define pmd_val(x) ((x).pmd)
113#define __pmd(x) ((pmd_t) { (x) })
114
115/* PUD level exusts only on 4k pages */
116#ifndef CONFIG_PPC_64K_PAGES
117typedef struct { unsigned long pud; } pud_t;
118#define pud_val(x) ((x).pud)
119#define __pud(x) ((pud_t) { (x) })
120#endif
121
122/* PGD level */
123typedef struct { unsigned long pgd; } pgd_t;
124#define pgd_val(x) ((x).pgd)
125#define __pgd(x) ((pgd_t) { (x) })
126
127/* Page protection bits */
128typedef struct { unsigned long pgprot; } pgprot_t;
129#define pgprot_val(x) ((x).pgprot)
130#define __pgprot(x) ((pgprot_t) { (x) })
131
132#else
133
134/*
135 * .. while these make it easier on the compiler
136 */
137
138typedef pte_basic_t pte_t;
139#define pte_val(x) (x)
140#define __pte(x) (x)
141
142#ifdef CONFIG_PPC_64K_PAGES
143typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
144#else
145typedef unsigned long real_pte_t;
146#endif
147
148
149typedef unsigned long pmd_t;
150#define pmd_val(x) (x)
151#define __pmd(x) (x)
152
153#ifndef CONFIG_PPC_64K_PAGES
154typedef unsigned long pud_t;
155#define pud_val(x) (x)
156#define __pud(x) (x)
157#endif
158
159typedef unsigned long pgd_t;
160#define pgd_val(x) (x)
161#define pgprot_val(x) (x)
162
163typedef unsigned long pgprot_t;
164#define __pgd(x) (x)
165#define __pgprot(x) (x)
166
167#endif
168
169struct page;
170extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
171extern void copy_user_page(void *to, void *from, unsigned long vaddr,
172 struct page *p);
173extern int page_is_ram(unsigned long pfn);
174
175#endif /* __ASSEMBLY__ */
176
177#endif /* __KERNEL__ */
178
179#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
new file mode 100644
index 000000000000..7259cfd85da9
--- /dev/null
+++ b/include/asm-powerpc/page_32.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_POWERPC_PAGE_32_H
2#define _ASM_POWERPC_PAGE_32_H
3
4#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
5
6#define PPC_MEMSTART 0
7
8#ifndef __ASSEMBLY__
9/*
10 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
11 * physical addressing. For now this just the IBM PPC440.
12 */
13#ifdef CONFIG_PTE_64BIT
14typedef unsigned long long pte_basic_t;
15#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
16#define PTE_FMT "%16Lx"
17#else
18typedef unsigned long pte_basic_t;
19#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
20#define PTE_FMT "%.8lx"
21#endif
22
23struct page;
24extern void clear_pages(void *page, int order);
25static inline void clear_page(void *page) { clear_pages(page, 0); }
26extern void copy_page(void *to, void *from);
27
28/* Pure 2^n version of get_order */
29extern __inline__ int get_order(unsigned long size)
30{
31 int lz;
32
33 size = (size-1) >> PAGE_SHIFT;
34 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
35 return 32 - lz;
36}
37
38#endif /* __ASSEMBLY__ */
39
40#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
new file mode 100644
index 000000000000..c16f106b5373
--- /dev/null
+++ b/include/asm-powerpc/page_64.h
@@ -0,0 +1,174 @@
1#ifndef _ASM_POWERPC_PAGE_64_H
2#define _ASM_POWERPC_PAGE_64_H
3
4/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13/*
14 * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
15 * specific, every notion of page number shared with the firmware, TCEs,
16 * iommu, etc... still uses a page size of 4K.
17 */
18#define HW_PAGE_SHIFT 12
19#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
20#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
21
22/*
23 * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
24 * HW_PAGE_SHIFT, that is 4K pages.
25 */
26#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
27
28#define REGION_SIZE 4UL
29#define REGION_SHIFT 60UL
30#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
31
32#define VMALLOCBASE ASM_CONST(0xD000000000000000)
33#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
34#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
35#define USER_REGION_ID (0UL)
36#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
37
38/* Segment size */
39#define SID_SHIFT 28
40#define SID_MASK 0xfffffffffUL
41#define ESID_MASK 0xfffffffff0000000UL
42#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
43
44#ifndef __ASSEMBLY__
45#include <asm/cache.h>
46
47typedef unsigned long pte_basic_t;
48
49static __inline__ void clear_page(void *addr)
50{
51 unsigned long lines, line_size;
52
53 line_size = ppc64_caches.dline_size;
54 lines = ppc64_caches.dlines_per_page;
55
56 __asm__ __volatile__(
57 "mtctr %1 # clear_page\n\
581: dcbz 0,%0\n\
59 add %0,%0,%3\n\
60 bdnz+ 1b"
61 : "=r" (addr)
62 : "r" (lines), "0" (addr), "r" (line_size)
63 : "ctr", "memory");
64}
65
66extern void copy_4K_page(void *to, void *from);
67
68#ifdef CONFIG_PPC_64K_PAGES
69static inline void copy_page(void *to, void *from)
70{
71 unsigned int i;
72 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
73 copy_4K_page(to, from);
74 to += 4096;
75 from += 4096;
76 }
77}
78#else /* CONFIG_PPC_64K_PAGES */
79static inline void copy_page(void *to, void *from)
80{
81 copy_4K_page(to, from);
82}
83#endif /* CONFIG_PPC_64K_PAGES */
84
85/* Log 2 of page table size */
86extern u64 ppc64_pft_size;
87
88/* Large pages size */
89extern unsigned int HPAGE_SHIFT;
90#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
91#define HPAGE_MASK (~(HPAGE_SIZE - 1))
92#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
93
94#endif /* __ASSEMBLY__ */
95
96#ifdef CONFIG_HUGETLB_PAGE
97
98#define HTLB_AREA_SHIFT 40
99#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
100#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
101
102#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
103 - (1U << GET_ESID(addr))) & 0xffff)
104#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
105 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
106
107#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
108#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
109#define ARCH_HAS_SETCLEAR_HUGE_PTE
110
111#define touches_hugepage_low_range(mm, addr, len) \
112 (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
113#define touches_hugepage_high_range(mm, addr, len) \
114 (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
115
116#define __within_hugepage_low_range(addr, len, segmask) \
117 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
118#define within_hugepage_low_range(addr, len) \
119 __within_hugepage_low_range((addr), (len), \
120 current->mm->context.low_htlb_areas)
121#define __within_hugepage_high_range(addr, len, zonemask) \
122 ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
123#define within_hugepage_high_range(addr, len) \
124 __within_hugepage_high_range((addr), (len), \
125 current->mm->context.high_htlb_areas)
126
127#define is_hugepage_only_range(mm, addr, len) \
128 (touches_hugepage_high_range((mm), (addr), (len)) || \
129 touches_hugepage_low_range((mm), (addr), (len)))
130#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
131
132#define in_hugepage_area(context, addr) \
133 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
134 ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
135 ( ((addr) < 0x100000000L) && \
136 ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
137
138#else /* !CONFIG_HUGETLB_PAGE */
139
140#define in_hugepage_area(mm, addr) 0
141
142#endif /* !CONFIG_HUGETLB_PAGE */
143
144#ifdef MODULE
145#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
146#else
147#define __page_aligned \
148 __attribute__((__aligned__(PAGE_SIZE), \
149 __section__(".data.page_aligned")))
150#endif
151
152#define VM_DATA_DEFAULT_FLAGS \
153 (test_thread_flag(TIF_32BIT) ? \
154 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
155
156/*
157 * This is the default if a program doesn't have a PT_GNU_STACK
158 * program header entry. The PPC64 ELF ABI has a non executable stack
159 * stack by default, so in the absense of a PT_GNU_STACK program header
160 * we turn execute permission off.
161 */
162#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
163 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
164
165#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
166 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
167
168#define VM_STACK_DEFAULT_FLAGS \
169 (test_thread_flag(TIF_32BIT) ? \
170 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
171
172#include <asm-generic/page.h>
173
174#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/include/asm-ppc64/serial.h b/include/asm-powerpc/serial.h
index d6bcb79b7d7b..b273d630b32f 100644
--- a/include/asm-ppc64/serial.h
+++ b/include/asm-powerpc/serial.h
@@ -1,21 +1,16 @@
1/* 1/*
2 * include/asm-ppc64/serial.h
3 */
4#ifndef _PPC64_SERIAL_H
5#define _PPC64_SERIAL_H
6
7/*
8 * This assumes you have a 1.8432 MHz clock for your UART.
9 *
10 * It'd be nice if someone built a serial card with a 24.576 MHz
11 * clock, since the 16550A is capable of handling a top speed of 1.5
12 * megabits/second; but this requires the faster clock.
13 *
14 * This program is free software; you can redistribute it and/or 2 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License 3 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 4 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version. 5 * 2 of the License, or (at your option) any later version.
18 */ 6 */
7#ifndef _ASM_POWERPC_SERIAL_H
8#define _ASM_POWERPC_SERIAL_H
9
10/*
11 * Serial ports are not listed here, because they are discovered
12 * through the device tree.
13 */
19 14
20/* Default baud base if not found in device-tree */ 15/* Default baud base if not found in device-tree */
21#define BASE_BAUD ( 1843200 / 16 ) 16#define BASE_BAUD ( 1843200 / 16 )
diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h
index fc323b51366b..411832d5bbdb 100644
--- a/include/asm-powerpc/vdso_datapage.h
+++ b/include/asm-powerpc/vdso_datapage.h
@@ -73,7 +73,7 @@ struct vdso_data {
73 /* those additional ones don't have to be located anywhere 73 /* those additional ones don't have to be located anywhere
74 * special as they were not part of the original systemcfg 74 * special as they were not part of the original systemcfg
75 */ 75 */
76 __s64 wtom_clock_sec; /* Wall to monotonic clock */ 76 __s32 wtom_clock_sec; /* Wall to monotonic clock */
77 __s32 wtom_clock_nsec; 77 __s32 wtom_clock_nsec;
78 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ 78 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
79 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 79 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
diff --git a/include/asm-ppc/immap_85xx.h b/include/asm-ppc/immap_85xx.h
index 50fb5e47094a..9383d0c13ff8 100644
--- a/include/asm-ppc/immap_85xx.h
+++ b/include/asm-ppc/immap_85xx.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * MPC85xx Internal Memory Map 4 * MPC85xx Internal Memory Map
5 * 5 *
6 * Maintainer: Kumar Gala <kumar.gala@freescale.com> 6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 * 7 *
8 * Copyright 2004 Freescale Semiconductor, Inc 8 * Copyright 2004 Freescale Semiconductor, Inc
9 * 9 *
diff --git a/include/asm-ppc/ipic.h b/include/asm-ppc/ipic.h
index 9092b920997a..0fe396a2b666 100644
--- a/include/asm-ppc/ipic.h
+++ b/include/asm-ppc/ipic.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * IPIC external definitions and structure. 4 * IPIC external definitions and structure.
5 * 5 *
6 * Maintainer: Kumar Gala <kumar.gala@freescale.com> 6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 * 7 *
8 * Copyright 2005 Freescale Semiconductor, Inc 8 * Copyright 2005 Freescale Semiconductor, Inc
9 * 9 *
diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h
index ce212201db2a..7cdf60fa69b6 100644
--- a/include/asm-ppc/mpc83xx.h
+++ b/include/asm-ppc/mpc83xx.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * MPC83xx definitions 4 * MPC83xx definitions
5 * 5 *
6 * Maintainer: Kumar Gala <kumar.gala@freescale.com> 6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 * 7 *
8 * Copyright 2005 Freescale Semiconductor, Inc 8 * Copyright 2005 Freescale Semiconductor, Inc
9 * 9 *
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index d98db980cd49..9d14baea3d71 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * MPC85xx definitions 4 * MPC85xx definitions
5 * 5 *
6 * Maintainer: Kumar Gala <kumar.gala@freescale.com> 6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 * 7 *
8 * Copyright 2004 Freescale Semiconductor, Inc 8 * Copyright 2004 Freescale Semiconductor, Inc
9 * 9 *
diff --git a/include/asm-ppc/nvram.h b/include/asm-ppc/nvram.h
deleted file mode 100644
index 31ef16e3fc4f..000000000000
--- a/include/asm-ppc/nvram.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * PreP compliant NVRAM access
3 */
4
5#ifdef __KERNEL__
6#ifndef _PPC_NVRAM_H
7#define _PPC_NVRAM_H
8
9#define NVRAM_AS0 0x74
10#define NVRAM_AS1 0x75
11#define NVRAM_DATA 0x77
12
13
14/* RTC Offsets */
15
16#define MOTO_RTC_SECONDS 0x1FF9
17#define MOTO_RTC_MINUTES 0x1FFA
18#define MOTO_RTC_HOURS 0x1FFB
19#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
20#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
21#define MOTO_RTC_MONTH 0x1FFE
22#define MOTO_RTC_YEAR 0x1FFF
23#define MOTO_RTC_CONTROLA 0x1FF8
24#define MOTO_RTC_CONTROLB 0x1FF9
25
26/* PowerMac specific nvram stuffs */
27
28enum {
29 pmac_nvram_OF, /* Open Firmware partition */
30 pmac_nvram_XPRAM, /* MacOS XPRAM partition */
31 pmac_nvram_NR /* MacOS Name Registry partition */
32};
33
34/* Return partition offset in nvram */
35extern int pmac_get_partition(int partition);
36
37/* Direct access to XPRAM on PowerMacs */
38extern u8 pmac_xpram_read(int xpaddr);
39extern void pmac_xpram_write(int xpaddr, u8 data);
40
41/* Synchronize NVRAM */
42extern void nvram_sync(void);
43
44/* Normal access to NVRAM */
45extern unsigned char nvram_read_byte(int i);
46extern void nvram_write_byte(unsigned char c, int i);
47
48/* Some offsets in XPRAM */
49#define PMAC_XPRAM_MACHINE_LOC 0xe4
50#define PMAC_XPRAM_SOUND_VOLUME 0x08
51
52/* Machine location structure in PowerMac XPRAM */
53struct pmac_machine_location {
54 unsigned int latitude; /* 2+30 bit Fractional number */
55 unsigned int longitude; /* 2+30 bit Fractional number */
56 unsigned int delta; /* mix of GMT delta and DLS */
57};
58
59/*
60 * /dev/nvram ioctls
61 *
62 * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
63 * definitely obsolete. Do not use it if you can avoid it
64 */
65
66#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
67 _IOWR('p', 0x40, int)
68
69#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
70#define IOC_NVRAM_SYNC _IO('p', 0x43) /* Sync NVRAM image */
71
72#endif
73#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h
index bba5305c29ed..83d8c77c124d 100644
--- a/include/asm-ppc/ppc_sys.h
+++ b/include/asm-ppc/ppc_sys.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * PPC system definitions and library functions 4 * PPC system definitions and library functions
5 * 5 *
6 * Maintainer: Kumar Gala <kumar.gala@freescale.com> 6 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
7 * 7 *
8 * Copyright 2005 Freescale Semiconductor, Inc 8 * Copyright 2005 Freescale Semiconductor, Inc
9 * 9 *
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
deleted file mode 100644
index 3efc3288f7e9..000000000000
--- a/include/asm-ppc64/page.h
+++ /dev/null
@@ -1,328 +0,0 @@
1#ifndef _PPC64_PAGE_H
2#define _PPC64_PAGE_H
3
4/*
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <asm/asm-compat.h>
15
16/*
17 * We support either 4k or 64k software page size. When using 64k pages
18 * however, wether we are really supporting 64k pages in HW or not is
19 * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
20 * as use of 64k pages remains a linux kernel specific, every notion of
21 * page number shared with the firmware, TCEs, iommu, etc... still assumes
22 * a page size of 4096.
23 */
24#ifdef CONFIG_PPC_64K_PAGES
25#define PAGE_SHIFT 16
26#else
27#define PAGE_SHIFT 12
28#endif
29
30#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
31#define PAGE_MASK (~(PAGE_SIZE-1))
32
33/* HW_PAGE_SHIFT is always 4k pages */
34#define HW_PAGE_SHIFT 12
35#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
36#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
37
38/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
39 * HW_PAGE_SHIFT, that is 4k pages
40 */
41#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
42
43/* Segment size */
44#define SID_SHIFT 28
45#define SID_MASK 0xfffffffffUL
46#define ESID_MASK 0xfffffffff0000000UL
47#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
48
49/* Large pages size */
50
51#ifndef __ASSEMBLY__
52extern unsigned int HPAGE_SHIFT;
53#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
54#define HPAGE_MASK (~(HPAGE_SIZE - 1))
55#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
56#endif /* __ASSEMBLY__ */
57
58#ifdef CONFIG_HUGETLB_PAGE
59
60
61#define HTLB_AREA_SHIFT 40
62#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
63#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
64
65#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
66 - (1U << GET_ESID(addr))) & 0xffff)
67#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
68 - (1U << GET_HTLB_AREA(addr))) & 0xffff)
69
70#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
71#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
72#define ARCH_HAS_SETCLEAR_HUGE_PTE
73
74#define touches_hugepage_low_range(mm, addr, len) \
75 (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
76#define touches_hugepage_high_range(mm, addr, len) \
77 (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
78
79#define __within_hugepage_low_range(addr, len, segmask) \
80 ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
81#define within_hugepage_low_range(addr, len) \
82 __within_hugepage_low_range((addr), (len), \
83 current->mm->context.low_htlb_areas)
84#define __within_hugepage_high_range(addr, len, zonemask) \
85 ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
86#define within_hugepage_high_range(addr, len) \
87 __within_hugepage_high_range((addr), (len), \
88 current->mm->context.high_htlb_areas)
89
90#define is_hugepage_only_range(mm, addr, len) \
91 (touches_hugepage_high_range((mm), (addr), (len)) || \
92 touches_hugepage_low_range((mm), (addr), (len)))
93#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
94
95#define in_hugepage_area(context, addr) \
96 (cpu_has_feature(CPU_FTR_16M_PAGE) && \
97 ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
98 ( ((addr) < 0x100000000L) && \
99 ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
100
101#else /* !CONFIG_HUGETLB_PAGE */
102
103#define in_hugepage_area(mm, addr) 0
104
105#endif /* !CONFIG_HUGETLB_PAGE */
106
107/* align addr on a size boundary - adjust address up/down if needed */
108#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
109#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
110
111/* align addr on a size boundary - adjust address up if needed */
112#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
113
114/* to align the pointer to the (next) page boundary */
115#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
116
117#ifdef __KERNEL__
118#ifndef __ASSEMBLY__
119#include <asm/cache.h>
120
121#undef STRICT_MM_TYPECHECKS
122
123#define REGION_SIZE 4UL
124#define REGION_SHIFT 60UL
125#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
126
127static __inline__ void clear_page(void *addr)
128{
129 unsigned long lines, line_size;
130
131 line_size = ppc64_caches.dline_size;
132 lines = ppc64_caches.dlines_per_page;
133
134 __asm__ __volatile__(
135 "mtctr %1 # clear_page\n\
1361: dcbz 0,%0\n\
137 add %0,%0,%3\n\
138 bdnz+ 1b"
139 : "=r" (addr)
140 : "r" (lines), "0" (addr), "r" (line_size)
141 : "ctr", "memory");
142}
143
144extern void copy_4K_page(void *to, void *from);
145
146#ifdef CONFIG_PPC_64K_PAGES
147static inline void copy_page(void *to, void *from)
148{
149 unsigned int i;
150 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
151 copy_4K_page(to, from);
152 to += 4096;
153 from += 4096;
154 }
155}
156#else /* CONFIG_PPC_64K_PAGES */
157static inline void copy_page(void *to, void *from)
158{
159 copy_4K_page(to, from);
160}
161#endif /* CONFIG_PPC_64K_PAGES */
162
163struct page;
164extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
165extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
166
167#ifdef STRICT_MM_TYPECHECKS
168/*
169 * These are used to make use of C type-checking.
170 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
171 */
172
173/* PTE level */
174typedef struct { unsigned long pte; } pte_t;
175#define pte_val(x) ((x).pte)
176#define __pte(x) ((pte_t) { (x) })
177
178/* 64k pages additionally define a bigger "real PTE" type that gathers
179 * the "second half" part of the PTE for pseudo 64k pages
180 */
181#ifdef CONFIG_PPC_64K_PAGES
182typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
183#else
184typedef struct { pte_t pte; } real_pte_t;
185#endif
186
187/* PMD level */
188typedef struct { unsigned long pmd; } pmd_t;
189#define pmd_val(x) ((x).pmd)
190#define __pmd(x) ((pmd_t) { (x) })
191
192/* PUD level exusts only on 4k pages */
193#ifndef CONFIG_PPC_64K_PAGES
194typedef struct { unsigned long pud; } pud_t;
195#define pud_val(x) ((x).pud)
196#define __pud(x) ((pud_t) { (x) })
197#endif
198
199/* PGD level */
200typedef struct { unsigned long pgd; } pgd_t;
201#define pgd_val(x) ((x).pgd)
202#define __pgd(x) ((pgd_t) { (x) })
203
204/* Page protection bits */
205typedef struct { unsigned long pgprot; } pgprot_t;
206#define pgprot_val(x) ((x).pgprot)
207#define __pgprot(x) ((pgprot_t) { (x) })
208
209#else
210
211/*
212 * .. while these make it easier on the compiler
213 */
214
215typedef unsigned long pte_t;
216#define pte_val(x) (x)
217#define __pte(x) (x)
218
219#ifdef CONFIG_PPC_64K_PAGES
220typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
221#else
222typedef unsigned long real_pte_t;
223#endif
224
225
226typedef unsigned long pmd_t;
227#define pmd_val(x) (x)
228#define __pmd(x) (x)
229
230#ifndef CONFIG_PPC_64K_PAGES
231typedef unsigned long pud_t;
232#define pud_val(x) (x)
233#define __pud(x) (x)
234#endif
235
236typedef unsigned long pgd_t;
237#define pgd_val(x) (x)
238#define pgprot_val(x) (x)
239
240typedef unsigned long pgprot_t;
241#define __pgd(x) (x)
242#define __pgprot(x) (x)
243
244#endif
245
246#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
247
248extern int page_is_ram(unsigned long pfn);
249
250extern u64 ppc64_pft_size; /* Log 2 of page table size */
251
252/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
253#define __HAVE_ARCH_GATE_AREA 1
254
255#endif /* __ASSEMBLY__ */
256
257#ifdef MODULE
258#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
259#else
260#define __page_aligned \
261 __attribute__((__aligned__(PAGE_SIZE), \
262 __section__(".data.page_aligned")))
263#endif
264
265
266/* This must match the -Ttext linker address */
267/* Note: tophys & tovirt make assumptions about how */
268/* KERNELBASE is defined for performance reasons. */
269/* When KERNELBASE moves, those macros may have */
270/* to change! */
271#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
272#define KERNELBASE PAGE_OFFSET
273#define VMALLOCBASE ASM_CONST(0xD000000000000000)
274
275#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
276#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
277#define USER_REGION_ID (0UL)
278#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
279
280#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
281
282#ifdef CONFIG_FLATMEM
283#define pfn_to_page(pfn) (mem_map + (pfn))
284#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
285#define pfn_valid(pfn) ((pfn) < max_mapnr)
286#endif
287
288#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
289#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
290
291#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
292
293/*
294 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
295 * and needs to be executable. This means the whole heap ends
296 * up being executable.
297 */
298#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
299 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
300
301#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
302 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
303
304#define VM_DATA_DEFAULT_FLAGS \
305 (test_thread_flag(TIF_32BIT) ? \
306 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
307
308/*
309 * This is the default if a program doesn't have a PT_GNU_STACK
310 * program header entry. The PPC64 ELF ABI has a non executable stack
311 * stack by default, so in the absense of a PT_GNU_STACK program header
312 * we turn execute permission off.
313 */
314#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
315 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
316
317#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
318 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
319
320#define VM_STACK_DEFAULT_FLAGS \
321 (test_thread_flag(TIF_32BIT) ? \
322 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
323
324#endif /* __KERNEL__ */
325
326#include <asm-generic/page.h>
327
328#endif /* _PPC64_PAGE_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
deleted file mode 100644
index ddfe186589fa..000000000000
--- a/include/asm-ppc64/prom.h
+++ /dev/null
@@ -1,220 +0,0 @@
1#ifndef _PPC64_PROM_H
2#define _PPC64_PROM_H
3
4/*
5 * Definitions for talking to the Open Firmware PROM on
6 * Power Macintosh computers.
7 *
8 * Copyright (C) 1996 Paul Mackerras.
9 *
10 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17#include <linux/config.h>
18#include <linux/proc_fs.h>
19#include <asm/atomic.h>
20
21#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
22#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
23#define RELOC(x) (*PTRRELOC(&(x)))
24
25/* Definitions used by the flattened device tree */
26#define OF_DT_HEADER 0xd00dfeed /* marker */
27#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
28#define OF_DT_END_NODE 0x2 /* End node */
29#define OF_DT_PROP 0x3 /* Property: name off, size,
30 * content */
31#define OF_DT_NOP 0x4 /* nop */
32#define OF_DT_END 0x9
33
34#define OF_DT_VERSION 0x10
35
36/*
37 * This is what gets passed to the kernel by prom_init or kexec
38 *
39 * The dt struct contains the device tree structure, full pathes and
40 * property contents. The dt strings contain a separate block with just
41 * the strings for the property names, and is fully page aligned and
42 * self contained in a page, so that it can be kept around by the kernel,
43 * each property name appears only once in this page (cheap compression)
44 *
45 * the mem_rsvmap contains a map of reserved ranges of physical memory,
46 * passing it here instead of in the device-tree itself greatly simplifies
47 * the job of everybody. It's just a list of u64 pairs (base/size) that
48 * ends when size is 0
49 */
50struct boot_param_header
51{
52 u32 magic; /* magic word OF_DT_HEADER */
53 u32 totalsize; /* total size of DT block */
54 u32 off_dt_struct; /* offset to structure */
55 u32 off_dt_strings; /* offset to strings */
56 u32 off_mem_rsvmap; /* offset to memory reserve map */
57 u32 version; /* format version */
58 u32 last_comp_version; /* last compatible version */
59 /* version 2 fields below */
60 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
61 /* version 3 fields below */
62 u32 dt_strings_size; /* size of the DT strings block */
63};
64
65
66
67typedef u32 phandle;
68typedef u32 ihandle;
69
70struct address_range {
71 unsigned long space;
72 unsigned long address;
73 unsigned long size;
74};
75
76struct interrupt_info {
77 int line;
78 int sense; /* +ve/-ve logic, edge or level, etc. */
79};
80
81struct pci_address {
82 u32 a_hi;
83 u32 a_mid;
84 u32 a_lo;
85};
86
87struct isa_address {
88 u32 a_hi;
89 u32 a_lo;
90};
91
92struct isa_range {
93 struct isa_address isa_addr;
94 struct pci_address pci_addr;
95 unsigned int size;
96};
97
98struct reg_property {
99 unsigned long address;
100 unsigned long size;
101};
102
103struct reg_property32 {
104 unsigned int address;
105 unsigned int size;
106};
107
108struct reg_property64 {
109 unsigned long address;
110 unsigned long size;
111};
112
113struct property {
114 char *name;
115 int length;
116 unsigned char *value;
117 struct property *next;
118};
119
120struct device_node {
121 char *name;
122 char *type;
123 phandle node;
124 phandle linux_phandle;
125 int n_addrs;
126 struct address_range *addrs;
127 int n_intrs;
128 struct interrupt_info *intrs;
129 char *full_name;
130
131 struct property *properties;
132 struct device_node *parent;
133 struct device_node *child;
134 struct device_node *sibling;
135 struct device_node *next; /* next device of same type */
136 struct device_node *allnext; /* next in list of all nodes */
137 struct proc_dir_entry *pde; /* this node's proc directory */
138 struct kref kref;
139 unsigned long _flags;
140 void *data;
141#ifdef CONFIG_PPC_ISERIES
142 struct list_head Device_List;
143#endif
144};
145
146extern struct device_node *of_chosen;
147
148/* flag descriptions */
149#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
150
151#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
152#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
153
154/*
155 * Until 32-bit ppc can add proc_dir_entries to its device_node
156 * definition, we cannot refer to pde, name_link, and addr_link
157 * in arch-independent code.
158 */
159#define HAVE_ARCH_DEVTREE_FIXUPS
160
161static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
162{
163 dn->pde = de;
164}
165
166
167/* OBSOLETE: Old stlye node lookup */
168extern struct device_node *find_devices(const char *name);
169extern struct device_node *find_type_devices(const char *type);
170extern struct device_node *find_path_device(const char *path);
171extern struct device_node *find_compatible_devices(const char *type,
172 const char *compat);
173extern struct device_node *find_all_nodes(void);
174
175/* New style node lookup */
176extern struct device_node *of_find_node_by_name(struct device_node *from,
177 const char *name);
178extern struct device_node *of_find_node_by_type(struct device_node *from,
179 const char *type);
180extern struct device_node *of_find_compatible_node(struct device_node *from,
181 const char *type, const char *compat);
182extern struct device_node *of_find_node_by_path(const char *path);
183extern struct device_node *of_find_node_by_phandle(phandle handle);
184extern struct device_node *of_find_all_nodes(struct device_node *prev);
185extern struct device_node *of_get_parent(const struct device_node *node);
186extern struct device_node *of_get_next_child(const struct device_node *node,
187 struct device_node *prev);
188extern struct device_node *of_node_get(struct device_node *node);
189extern void of_node_put(struct device_node *node);
190
191/* For scanning the flat device-tree at boot time */
192int __init of_scan_flat_dt(int (*it)(unsigned long node,
193 const char *uname, int depth,
194 void *data),
195 void *data);
196void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
197 unsigned long *size);
198
199/* For updating the device tree at runtime */
200extern void of_attach_node(struct device_node *);
201extern void of_detach_node(const struct device_node *);
202
203/* Other Prototypes */
204extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
205 unsigned long, unsigned long);
206extern void finish_device_tree(void);
207extern void unflatten_device_tree(void);
208extern void early_init_devtree(void *);
209extern int device_is_compatible(struct device_node *device, const char *);
210extern int machine_is_compatible(const char *compat);
211extern unsigned char *get_property(struct device_node *node, const char *name,
212 int *lenp);
213extern void print_properties(struct device_node *node);
214extern int prom_n_addr_cells(struct device_node* np);
215extern int prom_n_size_cells(struct device_node* np);
216extern int prom_n_intr_cells(struct device_node* np);
217extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
218extern int prom_add_property(struct device_node* np, struct property* prop);
219
220#endif /* _PPC64_PROM_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
deleted file mode 100644
index bf9a6aba19c9..000000000000
--- a/include/asm-ppc64/system.h
+++ /dev/null
@@ -1,310 +0,0 @@
1#ifndef __PPC64_SYSTEM_H
2#define __PPC64_SYSTEM_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <linux/config.h>
12#include <linux/compiler.h>
13#include <asm/page.h>
14#include <asm/processor.h>
15#include <asm/hw_irq.h>
16#include <asm/synch.h>
17
18/*
19 * Memory barrier.
20 * The sync instruction guarantees that all memory accesses initiated
21 * by this processor have been performed (with respect to all other
22 * mechanisms that access memory). The eieio instruction is a barrier
23 * providing an ordering (separately) for (a) cacheable stores and (b)
24 * loads and stores to non-cacheable memory (e.g. I/O devices).
25 *
26 * mb() prevents loads and stores being reordered across this point.
27 * rmb() prevents loads being reordered across this point.
28 * wmb() prevents stores being reordered across this point.
29 * read_barrier_depends() prevents data-dependent loads being reordered
30 * across this point (nop on PPC).
31 *
32 * We have to use the sync instructions for mb(), since lwsync doesn't
33 * order loads with respect to previous stores. Lwsync is fine for
34 * rmb(), though.
35 * For wmb(), we use sync since wmb is used in drivers to order
36 * stores to system memory with respect to writes to the device.
37 * However, smp_wmb() can be a lighter-weight eieio barrier on
38 * SMP since it is only used to order updates to system memory.
39 */
40#define mb() __asm__ __volatile__ ("sync" : : : "memory")
41#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
42#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
43#define read_barrier_depends() do { } while(0)
44
45#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
46#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
47
48#ifdef CONFIG_SMP
49#define smp_mb() mb()
50#define smp_rmb() rmb()
51#define smp_wmb() eieio()
52#define smp_read_barrier_depends() read_barrier_depends()
53#else
54#define smp_mb() __asm__ __volatile__("": : :"memory")
55#define smp_rmb() __asm__ __volatile__("": : :"memory")
56#define smp_wmb() __asm__ __volatile__("": : :"memory")
57#define smp_read_barrier_depends() do { } while(0)
58#endif /* CONFIG_SMP */
59
60#ifdef __KERNEL__
61struct task_struct;
62struct pt_regs;
63
64#ifdef CONFIG_DEBUGGER
65
66extern int (*__debugger)(struct pt_regs *regs);
67extern int (*__debugger_ipi)(struct pt_regs *regs);
68extern int (*__debugger_bpt)(struct pt_regs *regs);
69extern int (*__debugger_sstep)(struct pt_regs *regs);
70extern int (*__debugger_iabr_match)(struct pt_regs *regs);
71extern int (*__debugger_dabr_match)(struct pt_regs *regs);
72extern int (*__debugger_fault_handler)(struct pt_regs *regs);
73
74#define DEBUGGER_BOILERPLATE(__NAME) \
75static inline int __NAME(struct pt_regs *regs) \
76{ \
77 if (unlikely(__ ## __NAME)) \
78 return __ ## __NAME(regs); \
79 return 0; \
80}
81
82DEBUGGER_BOILERPLATE(debugger)
83DEBUGGER_BOILERPLATE(debugger_ipi)
84DEBUGGER_BOILERPLATE(debugger_bpt)
85DEBUGGER_BOILERPLATE(debugger_sstep)
86DEBUGGER_BOILERPLATE(debugger_iabr_match)
87DEBUGGER_BOILERPLATE(debugger_dabr_match)
88DEBUGGER_BOILERPLATE(debugger_fault_handler)
89
90#ifdef CONFIG_XMON
91extern void xmon_init(int enable);
92#endif
93
94#else
95static inline int debugger(struct pt_regs *regs) { return 0; }
96static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
97static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
98static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
99static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
100static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
101static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
102#endif
103
104extern int set_dabr(unsigned long dabr);
105extern void _exception(int signr, struct pt_regs *regs, int code,
106 unsigned long addr);
107extern int fix_alignment(struct pt_regs *regs);
108extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
109 int sig);
110extern void show_regs(struct pt_regs * regs);
111extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
112extern int die(const char *str, struct pt_regs *regs, long err);
113
114extern int _get_PVR(void);
115extern void giveup_fpu(struct task_struct *);
116extern void disable_kernel_fp(void);
117extern void flush_fp_to_thread(struct task_struct *);
118extern void enable_kernel_fp(void);
119extern void giveup_altivec(struct task_struct *);
120extern void disable_kernel_altivec(void);
121extern void enable_kernel_altivec(void);
122extern int emulate_altivec(struct pt_regs *);
123extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
124extern void cvt_df(double *from, float *to, struct thread_struct *thread);
125
126#ifdef CONFIG_ALTIVEC
127extern void flush_altivec_to_thread(struct task_struct *);
128#else
129static inline void flush_altivec_to_thread(struct task_struct *t)
130{
131}
132#endif
133
134static inline void flush_spe_to_thread(struct task_struct *t)
135{
136}
137
138extern int mem_init_done; /* set on boot once kmalloc can be called */
139extern unsigned long memory_limit;
140
141/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
142extern unsigned char e2a(unsigned char);
143
144extern struct task_struct *__switch_to(struct task_struct *,
145 struct task_struct *);
146#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
147
148struct thread_struct;
149extern struct task_struct * _switch(struct thread_struct *prev,
150 struct thread_struct *next);
151
152extern unsigned long klimit;
153
154extern int powersave_nap; /* set if nap mode can be used in idle loop */
155
156/*
157 * Atomic exchange
158 *
159 * Changes the memory location '*ptr' to be val and returns
160 * the previous value stored there.
161 *
162 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
163 * is more like most of the other architectures.
164 */
165static __inline__ unsigned long
166__xchg_u32(volatile unsigned int *m, unsigned long val)
167{
168 unsigned long dummy;
169
170 __asm__ __volatile__(
171 EIEIO_ON_SMP
172"1: lwarx %0,0,%3 # __xchg_u32\n\
173 stwcx. %2,0,%3\n\
1742: bne- 1b"
175 ISYNC_ON_SMP
176 : "=&r" (dummy), "=m" (*m)
177 : "r" (val), "r" (m)
178 : "cc", "memory");
179
180 return (dummy);
181}
182
183static __inline__ unsigned long
184__xchg_u64(volatile long *m, unsigned long val)
185{
186 unsigned long dummy;
187
188 __asm__ __volatile__(
189 EIEIO_ON_SMP
190"1: ldarx %0,0,%3 # __xchg_u64\n\
191 stdcx. %2,0,%3\n\
1922: bne- 1b"
193 ISYNC_ON_SMP
194 : "=&r" (dummy), "=m" (*m)
195 : "r" (val), "r" (m)
196 : "cc", "memory");
197
198 return (dummy);
199}
200
201/*
202 * This function doesn't exist, so you'll get a linker error
203 * if something tries to do an invalid xchg().
204 */
205extern void __xchg_called_with_bad_pointer(void);
206
207static __inline__ unsigned long
208__xchg(volatile void *ptr, unsigned long x, unsigned int size)
209{
210 switch (size) {
211 case 4:
212 return __xchg_u32(ptr, x);
213 case 8:
214 return __xchg_u64(ptr, x);
215 }
216 __xchg_called_with_bad_pointer();
217 return x;
218}
219
220#define xchg(ptr,x) \
221 ({ \
222 __typeof__(*(ptr)) _x_ = (x); \
223 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
224 })
225
226#define tas(ptr) (xchg((ptr),1))
227
228#define __HAVE_ARCH_CMPXCHG 1
229
230static __inline__ unsigned long
231__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
232{
233 unsigned int prev;
234
235 __asm__ __volatile__ (
236 EIEIO_ON_SMP
237"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
238 cmpw 0,%0,%3\n\
239 bne- 2f\n\
240 stwcx. %4,0,%2\n\
241 bne- 1b"
242 ISYNC_ON_SMP
243 "\n\
2442:"
245 : "=&r" (prev), "=m" (*p)
246 : "r" (p), "r" (old), "r" (new), "m" (*p)
247 : "cc", "memory");
248
249 return prev;
250}
251
252static __inline__ unsigned long
253__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
254{
255 unsigned long prev;
256
257 __asm__ __volatile__ (
258 EIEIO_ON_SMP
259"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
260 cmpd 0,%0,%3\n\
261 bne- 2f\n\
262 stdcx. %4,0,%2\n\
263 bne- 1b"
264 ISYNC_ON_SMP
265 "\n\
2662:"
267 : "=&r" (prev), "=m" (*p)
268 : "r" (p), "r" (old), "r" (new), "m" (*p)
269 : "cc", "memory");
270
271 return prev;
272}
273
274/* This function doesn't exist, so you'll get a linker error
275 if something tries to do an invalid cmpxchg(). */
276extern void __cmpxchg_called_with_bad_pointer(void);
277
278static __inline__ unsigned long
279__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
280 unsigned int size)
281{
282 switch (size) {
283 case 4:
284 return __cmpxchg_u32(ptr, old, new);
285 case 8:
286 return __cmpxchg_u64(ptr, old, new);
287 }
288 __cmpxchg_called_with_bad_pointer();
289 return old;
290}
291
292#define cmpxchg(ptr,o,n)\
293 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
294 (unsigned long)(n),sizeof(*(ptr))))
295
296/*
297 * We handle most unaligned accesses in hardware. On the other hand
298 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
299 * powers of 2 writes until it reaches sufficient alignment).
300 *
301 * Based on this we disable the IP header alignment in network drivers.
302 */
303#define NET_IP_ALIGN 0
304
305#define arch_align_stack(x) (x)
306
307extern unsigned long reloc_offset(void);
308
309#endif /* __KERNEL__ */
310#endif
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 9d86ba6f12d0..b3bd4f679f72 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -198,6 +198,18 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
198 return retval; 198 return retval;
199} 199}
200 200
201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
202
203#define atomic_add_unless(v, a, u) \
204({ \
205 int c, old; \
206 c = atomic_read(v); \
207 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
208 c = old; \
209 c != (u); \
210})
211#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212
201#define smp_mb__before_atomic_dec() smp_mb() 213#define smp_mb__before_atomic_dec() smp_mb()
202#define smp_mb__after_atomic_dec() smp_mb() 214#define smp_mb__after_atomic_dec() smp_mb()
203#define smp_mb__before_atomic_inc() smp_mb() 215#define smp_mb__before_atomic_inc() smp_mb()
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 3c4f805da1ac..aabfd334462c 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -87,6 +87,35 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
87#define atomic_inc(v) atomic_add(1,(v)) 87#define atomic_inc(v) atomic_add(1,(v))
88#define atomic_dec(v) atomic_sub(1,(v)) 88#define atomic_dec(v) atomic_sub(1,(v))
89 89
90static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
91{
92 int ret;
93 unsigned long flags;
94
95 local_irq_save(flags);
96 ret = v->counter;
97 if (likely(ret == old))
98 v->counter = new;
99 local_irq_restore(flags);
100
101 return ret;
102}
103
104static inline int atomic_add_unless(atomic_t *v, int a, int u)
105{
106 int ret;
107 unsigned long flags;
108
109 local_irq_save(flags);
110 ret = v->counter;
111 if (ret != u)
112 v->counter += a;
113 local_irq_restore(flags);
114
115 return ret != u;
116}
117#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
118
90static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 119static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
91{ 120{
92 unsigned long flags; 121 unsigned long flags;
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 8c3872d3e65f..927a2bc27b30 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -99,6 +99,35 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
99#define atomic_inc(v) atomic_add(1,(v)) 99#define atomic_inc(v) atomic_add(1,(v))
100#define atomic_dec(v) atomic_sub(1,(v)) 100#define atomic_dec(v) atomic_sub(1,(v))
101 101
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{
104 int ret;
105 unsigned long flags;
106
107 local_irq_save(flags);
108 ret = v->counter;
109 if (likely(ret == old))
110 v->counter = new;
111 local_irq_restore(flags);
112
113 return ret;
114}
115
116static inline int atomic_add_unless(atomic_t *v, int a, int u)
117{
118 int ret;
119 unsigned long flags;
120
121 local_irq_save(flags);
122 ret = v->counter;
123 if (ret != u)
124 v->counter += a;
125 local_irq_restore(flags);
126
127 return ret != u;
128}
129#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
130
102static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 131static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
103{ 132{
104 unsigned long flags; 133 unsigned long flags;
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 37f6ab601c3d..62bec7ad271c 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -19,6 +19,8 @@ typedef struct { volatile int counter; } atomic_t;
19#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
20 20
21extern int __atomic_add_return(int, atomic_t *); 21extern int __atomic_add_return(int, atomic_t *);
22extern int atomic_cmpxchg(atomic_t *, int, int);
23extern int atomic_add_unless(atomic_t *, int, int);
22extern void atomic_set(atomic_t *, int); 24extern void atomic_set(atomic_t *, int);
23 25
24#define atomic_read(v) ((v)->counter) 26#define atomic_read(v) ((v)->counter)
@@ -48,6 +50,8 @@ extern void atomic_set(atomic_t *, int);
48#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 50#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
49#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 51#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
50 52
53#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
54
51/* This is the old 24-bit implementation. It's still used internally 55/* This is the old 24-bit implementation. It's still used internally
52 * by some sparc-specific code, notably the semaphore implementation. 56 * by some sparc-specific code, notably the semaphore implementation.
53 */ 57 */
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index e175afcf2cde..8198c3d0d007 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -70,6 +70,18 @@ extern int atomic64_sub_ret(int, atomic64_t *);
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72 72
73#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
74
75#define atomic_add_unless(v, a, u) \
76({ \
77 int c, old; \
78 c = atomic_read(v); \
79 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
80 c = old; \
81 c != (u); \
82})
83#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
84
73/* Atomic operations are already serializing */ 85/* Atomic operations are already serializing */
74#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
75#define smp_mb__before_atomic_dec() membar_storeload_loadload(); 87#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index 395268a8c0de..bede3172ce7f 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -90,6 +90,36 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add
90#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0) 90#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
91#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0) 91#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
92 92
93static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
94{
95 int ret;
96 unsigned long flags;
97
98 local_irq_save(flags);
99 ret = v->counter;
100 if (likely(ret == old))
101 v->counter = new;
102 local_irq_restore(flags);
103
104 return ret;
105}
106
107static inline int atomic_add_unless(atomic_t *v, int a, int u)
108{
109 int ret;
110 unsigned long flags;
111
112 local_irq_save(flags);
113 ret = v->counter;
114 if (ret != u)
115 v->counter += a;
116 local_irq_restore(flags);
117
118 return ret != u;
119}
120
121#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
122
93/* Atomic operations are already serializing on ARM */ 123/* Atomic operations are already serializing on ARM */
94#define smp_mb__before_atomic_dec() barrier() 124#define smp_mb__before_atomic_dec() barrier()
95#define smp_mb__after_atomic_dec() barrier() 125#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 6c5d5ca8383a..5647b7de1749 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -111,6 +111,8 @@ extern unsigned int nmi_watchdog;
111 111
112extern int disable_timer_pin_1; 112extern int disable_timer_pin_1;
113 113
114extern void setup_threshold_lvt(unsigned long lvt_off);
115
114#endif /* CONFIG_X86_LOCAL_APIC */ 116#endif /* CONFIG_X86_LOCAL_APIC */
115 117
116extern unsigned boot_cpu_id; 118extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index fc4c5956e1ea..0866ef67f198 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -360,6 +360,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
360 return atomic_add_return(-i,v); 360 return atomic_add_return(-i,v);
361} 361}
362 362
363#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
364
365/**
366 * atomic_add_unless - add unless the number is a given value
367 * @v: pointer of type atomic_t
368 * @a: the amount to add to v...
369 * @u: ...unless v is equal to u.
370 *
371 * Atomically adds @a to @v, so long as it was not @u.
372 * Returns non-zero if @v was not @u, and zero otherwise.
373 */
374#define atomic_add_unless(v, a, u) \
375({ \
376 int c, old; \
377 c = atomic_read(v); \
378 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
379 c = old; \
380 c != (u); \
381})
382#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
383
363#define atomic_inc_return(v) (atomic_add_return(1,v)) 384#define atomic_inc_return(v) (atomic_add_return(1,v))
364#define atomic_dec_return(v) (atomic_sub_return(1,v)) 385#define atomic_dec_return(v) (atomic_sub_return(1,v))
365 386
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index eda62bae1240..33e53424128b 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,6 @@
9/* L1 cache line size */ 9/* L1 cache line size */
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */ 12#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
13 13
14#endif 14#endif
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 68ac3c62fe3d..33764869387b 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -98,16 +98,19 @@ static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsig
98 98
99static inline void set_intr_gate(int nr, void *func) 99static inline void set_intr_gate(int nr, void *func)
100{ 100{
101 BUG_ON((unsigned)nr > 0xFF);
101 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 102 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
102} 103}
103 104
104static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 105static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
105{ 106{
107 BUG_ON((unsigned)nr > 0xFF);
106 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 108 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
107} 109}
108 110
109static inline void set_system_gate(int nr, void *func) 111static inline void set_system_gate(int nr, void *func)
110{ 112{
113 BUG_ON((unsigned)nr > 0xFF);
111 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 114 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
112} 115}
113 116
@@ -129,9 +132,16 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned
129 132
130static inline void set_tss_desc(unsigned cpu, void *addr) 133static inline void set_tss_desc(unsigned cpu, void *addr)
131{ 134{
132 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS], (unsigned long)addr, 135 /*
133 DESC_TSS, 136 * sizeof(unsigned long) coming from an extra "long" at the end
134 sizeof(struct tss_struct) - 1); 137 * of the iobitmap. See tss_struct definition in processor.h
138 *
139 * -1? seg base+limit should be pointing to the address of the
140 * last valid byte
141 */
142 set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS],
143 (unsigned long)addr, DESC_TSS,
144 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
135} 145}
136 146
137static inline void set_ldt_desc(unsigned cpu, void *addr, int size) 147static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index 16fa3a064d0c..6f2a817b6a7c 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -72,8 +72,15 @@
72 72
73#define MAX_DMA_CHANNELS 8 73#define MAX_DMA_CHANNELS 8
74 74
75/* The maximum address that we can perform a DMA transfer to on this platform */ 75
76#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) 76/* 16MB ISA DMA zone */
77#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
78
79/* 4GB broken PCI/AGP hardware bus master zone */
80#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
81
82/* Compat define for old dma zone */
83#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
77 84
78/* 8237 DMA controllers */ 85/* 8237 DMA controllers */
79#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ 86#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index a3877f570998..c20c28f5c7a0 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -14,18 +14,18 @@
14#define HPET_CFG 0x010 14#define HPET_CFG 0x010
15#define HPET_STATUS 0x020 15#define HPET_STATUS 0x020
16#define HPET_COUNTER 0x0f0 16#define HPET_COUNTER 0x0f0
17#define HPET_T0_CFG 0x100 17#define HPET_Tn_OFFSET 0x20
18#define HPET_T0_CMP 0x108 18#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET)
19#define HPET_T0_ROUTE 0x110 19#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET)
20#define HPET_T1_CFG 0x120 20#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET)
21#define HPET_T1_CMP 0x128 21#define HPET_T0_CFG HPET_Tn_CFG(0)
22#define HPET_T1_ROUTE 0x130 22#define HPET_T0_CMP HPET_Tn_CMP(0)
23#define HPET_T2_CFG 0x140 23#define HPET_T1_CFG HPET_Tn_CFG(1)
24#define HPET_T2_CMP 0x148 24#define HPET_T1_CMP HPET_Tn_CMP(1)
25#define HPET_T2_ROUTE 0x150
26 25
27#define HPET_ID_VENDOR 0xffff0000 26#define HPET_ID_VENDOR 0xffff0000
28#define HPET_ID_LEGSUP 0x00008000 27#define HPET_ID_LEGSUP 0x00008000
28#define HPET_ID_64BIT 0x00002000
29#define HPET_ID_NUMBER 0x00001f00 29#define HPET_ID_NUMBER 0x00001f00
30#define HPET_ID_REV 0x000000ff 30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER_SHIFT 8 31#define HPET_ID_NUMBER_SHIFT 8
@@ -38,11 +38,18 @@
38#define HPET_LEGACY_8254 2 38#define HPET_LEGACY_8254 2
39#define HPET_LEGACY_RTC 8 39#define HPET_LEGACY_RTC 8
40 40
41#define HPET_TN_ENABLE 0x004 41#define HPET_TN_LEVEL 0x0002
42#define HPET_TN_PERIODIC 0x008 42#define HPET_TN_ENABLE 0x0004
43#define HPET_TN_PERIODIC_CAP 0x010 43#define HPET_TN_PERIODIC 0x0008
44#define HPET_TN_SETVAL 0x040 44#define HPET_TN_PERIODIC_CAP 0x0010
45#define HPET_TN_32BIT 0x100 45#define HPET_TN_64BIT_CAP 0x0020
46#define HPET_TN_SETVAL 0x0040
47#define HPET_TN_32BIT 0x0100
48#define HPET_TN_ROUTE 0x3e00
49#define HPET_TN_FSB 0x4000
50#define HPET_TN_FSB_CAP 0x8000
51
52#define HPET_TN_ROUTE_SHIFT 9
46 53
47extern int is_hpet_enabled(void); 54extern int is_hpet_enabled(void);
48extern int hpet_rtc_timer_init(void); 55extern int hpet_rtc_timer_init(void);
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index dc97668ea0f9..c14a8c7267a6 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -55,7 +55,7 @@ struct hw_interrupt_type;
55#define CALL_FUNCTION_VECTOR 0xfc 55#define CALL_FUNCTION_VECTOR 0xfc
56#define KDB_VECTOR 0xfb /* reserved for KDB */ 56#define KDB_VECTOR 0xfb /* reserved for KDB */
57#define THERMAL_APIC_VECTOR 0xfa 57#define THERMAL_APIC_VECTOR 0xfa
58/* 0xf9 free */ 58#define THRESHOLD_APIC_VECTOR 0xf9
59#define INVALIDATE_TLB_VECTOR_END 0xf8 59#define INVALIDATE_TLB_VECTOR_END 0xf8
60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ 60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
61 61
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index 6efa00fe4e7b..c7bc9c0525ba 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -165,6 +165,11 @@ struct siginfo_t;
165int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); 165int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
166int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); 166int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
167int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); 167int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
168
169struct linux_binprm;
170extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
171 unsigned long stack_top, int exec_stack);
172
168#endif 173#endif
169 174
170#endif /* !CONFIG_IA32_SUPPORT */ 175#endif /* !CONFIG_IA32_SUPPORT */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 869249db6795..5d298b799a9f 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,6 +67,8 @@ struct mce_log {
67/* Software defined banks */ 67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128 68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */
71#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4
70 72
71void mce_log(struct mce *m); 73void mce_log(struct mce *m);
72#ifdef CONFIG_X86_MCE_INTEL 74#ifdef CONFIG_X86_MCE_INTEL
@@ -77,4 +79,12 @@ static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
77} 79}
78#endif 80#endif
79 81
82#ifdef CONFIG_X86_MCE_AMD
83void mce_amd_feature_init(struct cpuinfo_x86 *c);
84#else
85static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
86{
87}
88#endif
89
80#endif 90#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index b40c661f111e..69baaa8a3ce0 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -17,16 +17,15 @@
17/* Simple perfect hash to map physical addresses to node numbers */ 17/* Simple perfect hash to map physical addresses to node numbers */
18extern int memnode_shift; 18extern int memnode_shift;
19extern u8 memnodemap[NODEMAPSIZE]; 19extern u8 memnodemap[NODEMAPSIZE];
20extern int maxnode;
21 20
22extern struct pglist_data *node_data[]; 21extern struct pglist_data *node_data[];
23 22
24static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) 23static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
25{ 24{
26 int nid; 25 unsigned nid;
27 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE); 26 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
28 nid = memnodemap[addr >> memnode_shift]; 27 nid = memnodemap[addr >> memnode_shift];
29 VIRTUAL_BUG_ON(nid > maxnode); 28 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
30 return nid; 29 return nid;
31} 30}
32 31
@@ -41,9 +40,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
41#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 40#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
42#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 41#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
43 42
44/* AK: this currently doesn't deal with invalid addresses. We'll see 43/* Requires pfn_valid(pfn) to be true */
45 if the 2.5 kernel doesn't pass them
46 (2.4 used to). */
47#define pfn_to_page(pfn) ({ \ 44#define pfn_to_page(pfn) ({ \
48 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ 45 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
49 ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \ 46 ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index f267e10c023d..6f8a17d105ab 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -16,7 +16,7 @@
16/* 16/*
17 * A maximum of 255 APICs with the current APIC ID architecture. 17 * A maximum of 255 APICs with the current APIC ID architecture.
18 */ 18 */
19#define MAX_APICS 128 19#define MAX_APICS 255
20 20
21struct intel_mp_floating 21struct intel_mp_floating
22{ 22{
@@ -157,7 +157,8 @@ struct mpc_config_lintsrc
157 */ 157 */
158 158
159#define MAX_MP_BUSSES 256 159#define MAX_MP_BUSSES 256
160#define MAX_IRQ_SOURCES 256 160/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
161#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
161enum mp_bustype { 162enum mp_bustype {
162 MP_BUS_ISA = 1, 163 MP_BUS_ISA = 1,
163 MP_BUS_EISA, 164 MP_BUS_EISA,
@@ -172,7 +173,7 @@ extern int smp_found_config;
172extern void find_smp_config (void); 173extern void find_smp_config (void);
173extern void get_smp_config (void); 174extern void get_smp_config (void);
174extern int nr_ioapics; 175extern int nr_ioapics;
175extern int apic_version [MAX_APICS]; 176extern unsigned char apic_version [MAX_APICS];
176extern int mp_irq_entries; 177extern int mp_irq_entries;
177extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; 178extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
178extern int mpc_default_type; 179extern int mpc_default_type;
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 5a7fe3c6c3d8..24dc39651bc4 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -19,7 +19,7 @@
19 : "=a" (a__), "=d" (b__) \ 19 : "=a" (a__), "=d" (b__) \
20 : "c" (msr)); \ 20 : "c" (msr)); \
21 val = a__ | (b__<<32); \ 21 val = a__ | (b__<<32); \
22} while(0); 22} while(0)
23 23
24#define wrmsr(msr,val1,val2) \ 24#define wrmsr(msr,val1,val2) \
25 __asm__ __volatile__("wrmsr" \ 25 __asm__ __volatile__("wrmsr" \
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index bcf55c3f7f7f..d51e56fdc3da 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -17,6 +17,8 @@ extern void numa_add_cpu(int cpu);
17extern void numa_init_array(void); 17extern void numa_init_array(void);
18extern int numa_off; 18extern int numa_off;
19 19
20extern void numa_set_node(int cpu, int node);
21
20extern unsigned char apicid_to_node[256]; 22extern unsigned char apicid_to_node[256];
21 23
22#define NUMA_NO_NODE 0xff 24#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e5ab4d231f2c..06e489f32472 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -11,7 +11,7 @@
11#define PAGE_SIZE (1UL << PAGE_SHIFT) 11#define PAGE_SIZE (1UL << PAGE_SHIFT)
12#endif 12#endif
13#define PAGE_MASK (~(PAGE_SIZE-1)) 13#define PAGE_MASK (~(PAGE_SIZE-1))
14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT)) 14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
15 15
16#define THREAD_ORDER 1 16#define THREAD_ORDER 1
17#ifdef __ASSEMBLY__ 17#ifdef __ASSEMBLY__
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index bbf89aa8a1af..8733ccfa442e 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -15,6 +15,7 @@ struct x8664_pda {
15 int irqcount; /* Irq nesting counter. Starts with -1 */ 15 int irqcount; /* Irq nesting counter. Starts with -1 */
16 int cpunumber; /* Logical CPU number */ 16 int cpunumber; /* Logical CPU number */
17 char *irqstackptr; /* top of irqstack */ 17 char *irqstackptr; /* top of irqstack */
18 int nodenumber; /* number of current node */
18 unsigned int __softirq_pending; 19 unsigned int __softirq_pending;
19 unsigned int __nmi_count; /* number of NMI on this CPUs */ 20 unsigned int __nmi_count; /* number of NMI on this CPUs */
20 struct mm_struct *active_mm; 21 struct mm_struct *active_mm;
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 7309fffeec9a..ecf58c7c1650 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512];
16extern pud_t level3_ident_pgt[512]; 16extern pud_t level3_ident_pgt[512];
17extern pmd_t level2_kernel_pgt[512]; 17extern pmd_t level2_kernel_pgt[512];
18extern pgd_t init_level4_pgt[]; 18extern pgd_t init_level4_pgt[];
19extern pgd_t boot_level4_pgt[];
19extern unsigned long __supported_pte_mask; 20extern unsigned long __supported_pte_mask;
20 21
21#define swapper_pg_dir init_level4_pgt 22#define swapper_pg_dir init_level4_pgt
@@ -247,7 +248,7 @@ static inline unsigned long pud_bad(pud_t pud)
247#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this 248#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
248 right? */ 249 right? */
249#define pte_page(x) pfn_to_page(pte_pfn(x)) 250#define pte_page(x) pfn_to_page(pte_pfn(x))
250#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) 251#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
251 252
252static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 253static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
253{ 254{
@@ -354,7 +355,7 @@ static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
354#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 355#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
355#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) 356#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
356#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) 357#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
357#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) 358#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
358 359
359#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 360#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
360#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) 361#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 03837d34fba0..4861246548f7 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -61,10 +61,12 @@ struct cpuinfo_x86 {
61 int x86_cache_alignment; 61 int x86_cache_alignment;
62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
63 __u8 x86_virt_bits, x86_phys_bits; 63 __u8 x86_virt_bits, x86_phys_bits;
64 __u8 x86_num_cores; 64 __u8 x86_max_cores; /* cpuid returned max cores value */
65 __u32 x86_power; 65 __u32 x86_power;
66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */ 66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
67 unsigned long loops_per_jiffy; 67 unsigned long loops_per_jiffy;
68 __u8 apicid;
69 __u8 booted_cores; /* number of cores as seen by OS */
68} ____cacheline_aligned; 70} ____cacheline_aligned;
69 71
70#define X86_VENDOR_INTEL 0 72#define X86_VENDOR_INTEL 0
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index dbb37b0adb43..34501086afef 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -11,6 +11,8 @@ struct pt_regs;
11extern void start_kernel(void); 11extern void start_kernel(void);
12extern void pda_init(int); 12extern void pda_init(int);
13 13
14extern void zap_low_mappings(int cpu);
15
14extern void early_idt_handler(void); 16extern void early_idt_handler(void);
15 17
16extern void mcheck_init(struct cpuinfo_x86 *c); 18extern void mcheck_init(struct cpuinfo_x86 *c);
@@ -22,6 +24,8 @@ extern void mtrr_bp_init(void);
22#define mtrr_bp_init() do {} while (0) 24#define mtrr_bp_init() do {} while (0)
23#endif 25#endif
24extern void init_memory_mapping(unsigned long start, unsigned long end); 26extern void init_memory_mapping(unsigned long start, unsigned long end);
27extern void size_zones(unsigned long *z, unsigned long *h,
28 unsigned long start_pfn, unsigned long end_pfn);
25 29
26extern void system_call(void); 30extern void system_call(void);
27extern int kernel_syscall(void); 31extern int kernel_syscall(void);
diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h
deleted file mode 100644
index 46077e9c1910..000000000000
--- a/include/asm-x86_64/rwsem.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Ported by Andi Kleen <ak@suse.de> to x86-64.
5 *
6 * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h
7 *
8 *
9 * The MSW of the count is the negated number of active writers and waiting
10 * lockers, and the LSW is the total number of active locks
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
15 * uncontended lock. This can be determined because XADD returns the old value.
16 * Readers increment by 1 and see a positive value when uncontended, negative
17 * if there are writers (and maybe) readers waiting (in which case it goes to
18 * sleep).
19 *
20 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
21 * be extended to 65534 by manually checking the whole MSW rather than relying
22 * on the S flag.
23 *
24 * The value of ACTIVE_BIAS supports up to 65535 active processes.
25 *
26 * This should be totally fair - if anything is waiting, a process that wants a
27 * lock will go to the back of the queue. When the currently active lock is
28 * released, if there's a writer at the front of the queue, then that and only
29 * that will be woken up; if there's a bunch of consecutive readers at the
30 * front, then they'll all be woken up, but no other readers will be.
31 */
32
33#ifndef _X8664_RWSEM_H
34#define _X8664_RWSEM_H
35
36#ifndef _LINUX_RWSEM_H
37#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
38#endif
39
40#ifdef __KERNEL__
41
42#include <linux/list.h>
43#include <linux/spinlock.h>
44
45struct rwsem_waiter;
46
47extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
48extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
49extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
50extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
51
52/*
53 * the semaphore definition
54 */
55struct rw_semaphore {
56 signed int count;
57#define RWSEM_UNLOCKED_VALUE 0x00000000
58#define RWSEM_ACTIVE_BIAS 0x00000001
59#define RWSEM_ACTIVE_MASK 0x0000ffff
60#define RWSEM_WAITING_BIAS (-0x00010000)
61#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 spinlock_t wait_lock;
64 struct list_head wait_list;
65#if RWSEM_DEBUG
66 int debug;
67#endif
68};
69
70/*
71 * initialisation
72 */
73#if RWSEM_DEBUG
74#define __RWSEM_DEBUG_INIT , 0
75#else
76#define __RWSEM_DEBUG_INIT /* */
77#endif
78
79#define __RWSEM_INITIALIZER(name) \
80{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
81 __RWSEM_DEBUG_INIT }
82
83#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85
86static inline void init_rwsem(struct rw_semaphore *sem)
87{
88 sem->count = RWSEM_UNLOCKED_VALUE;
89 spin_lock_init(&sem->wait_lock);
90 INIT_LIST_HEAD(&sem->wait_list);
91#if RWSEM_DEBUG
92 sem->debug = 0;
93#endif
94}
95
96/*
97 * lock for reading
98 */
99static inline void __down_read(struct rw_semaphore *sem)
100{
101 __asm__ __volatile__(
102 "# beginning down_read\n\t"
103LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */
104 " js 2f\n\t" /* jump if we weren't granted the lock */
105 "1:\n\t"
106 LOCK_SECTION_START("") \
107 "2:\n\t"
108 " call rwsem_down_read_failed_thunk\n\t"
109 " jmp 1b\n"
110 LOCK_SECTION_END \
111 "# ending down_read\n\t"
112 : "+m"(sem->count)
113 : "D"(sem)
114 : "memory", "cc");
115}
116
117
118/*
119 * trylock for reading -- returns 1 if successful, 0 if contention
120 */
121static inline int __down_read_trylock(struct rw_semaphore *sem)
122{
123 __s32 result, tmp;
124 __asm__ __volatile__(
125 "# beginning __down_read_trylock\n\t"
126 " movl %0,%1\n\t"
127 "1:\n\t"
128 " movl %1,%2\n\t"
129 " addl %3,%2\n\t"
130 " jle 2f\n\t"
131LOCK_PREFIX " cmpxchgl %2,%0\n\t"
132 " jnz 1b\n\t"
133 "2:\n\t"
134 "# ending __down_read_trylock\n\t"
135 : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
136 : "i"(RWSEM_ACTIVE_READ_BIAS)
137 : "memory", "cc");
138 return result>=0 ? 1 : 0;
139}
140
141
142/*
143 * lock for writing
144 */
145static inline void __down_write(struct rw_semaphore *sem)
146{
147 int tmp;
148
149 tmp = RWSEM_ACTIVE_WRITE_BIAS;
150 __asm__ __volatile__(
151 "# beginning down_write\n\t"
152LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */
153 " testl %0,%0\n\t" /* was the count 0 before? */
154 " jnz 2f\n\t" /* jump if we weren't granted the lock */
155 "1:\n\t"
156 LOCK_SECTION_START("")
157 "2:\n\t"
158 " call rwsem_down_write_failed_thunk\n\t"
159 " jmp 1b\n"
160 LOCK_SECTION_END
161 "# ending down_write"
162 : "=&r" (tmp)
163 : "0"(tmp), "D"(sem)
164 : "memory", "cc");
165}
166
167/*
168 * trylock for writing -- returns 1 if successful, 0 if contention
169 */
170static inline int __down_write_trylock(struct rw_semaphore *sem)
171{
172 signed long ret = cmpxchg(&sem->count,
173 RWSEM_UNLOCKED_VALUE,
174 RWSEM_ACTIVE_WRITE_BIAS);
175 if (ret == RWSEM_UNLOCKED_VALUE)
176 return 1;
177 return 0;
178}
179
180/*
181 * unlock after reading
182 */
183static inline void __up_read(struct rw_semaphore *sem)
184{
185 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
186 __asm__ __volatile__(
187 "# beginning __up_read\n\t"
188LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */
189 " js 2f\n\t" /* jump if the lock is being waited upon */
190 "1:\n\t"
191 LOCK_SECTION_START("")
192 "2:\n\t"
193 " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */
194 " jnz 1b\n\t"
195 " call rwsem_wake_thunk\n\t"
196 " jmp 1b\n"
197 LOCK_SECTION_END
198 "# ending __up_read\n"
199 : "+m"(sem->count), [tmp] "+r" (tmp)
200 : "D"(sem)
201 : "memory", "cc");
202}
203
204/*
205 * unlock after writing
206 */
207static inline void __up_write(struct rw_semaphore *sem)
208{
209 unsigned tmp;
210 __asm__ __volatile__(
211 "# beginning __up_write\n\t"
212 " movl %[bias],%[tmp]\n\t"
213LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
214 " jnz 2f\n\t" /* jump if the lock is being waited upon */
215 "1:\n\t"
216 LOCK_SECTION_START("")
217 "2:\n\t"
218 " decw %w[tmp]\n\t" /* did the active count reduce to 0? */
219 " jnz 1b\n\t" /* jump back if not */
220 " call rwsem_wake_thunk\n\t"
221 " jmp 1b\n"
222 LOCK_SECTION_END
223 "# ending __up_write\n"
224 : "+m"(sem->count), [tmp] "=r" (tmp)
225 : "D"(sem), [bias] "i"(-RWSEM_ACTIVE_WRITE_BIAS)
226 : "memory", "cc");
227}
228
229/*
230 * downgrade write lock to read lock
231 */
232static inline void __downgrade_write(struct rw_semaphore *sem)
233{
234 __asm__ __volatile__(
235 "# beginning __downgrade_write\n\t"
236LOCK_PREFIX " addl %[bias],(%%rdi)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
237 " js 2f\n\t" /* jump if the lock is being waited upon */
238 "1:\n\t"
239 LOCK_SECTION_START("")
240 "2:\n\t"
241 " call rwsem_downgrade_thunk\n"
242 " jmp 1b\n"
243 LOCK_SECTION_END
244 "# ending __downgrade_write\n"
245 : "=m"(sem->count)
246 : "D"(sem), [bias] "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
247 : "memory", "cc");
248}
249
250/*
251 * implement atomic add functionality
252 */
253static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
254{
255 __asm__ __volatile__(
256LOCK_PREFIX "addl %1,%0"
257 :"=m"(sem->count)
258 :"ir"(delta), "m"(sem->count));
259}
260
261/*
262 * implement exchange and add functionality
263 */
264static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
265{
266 int tmp = delta;
267
268 __asm__ __volatile__(
269LOCK_PREFIX "xaddl %0,(%2)"
270 : "=r"(tmp), "=m"(sem->count)
271 : "r"(sem), "m"(sem->count), "0" (tmp)
272 : "memory");
273
274 return tmp+delta;
275}
276
277static inline int rwsem_is_locked(struct rw_semaphore *sem)
278{
279 return (sem->count != 0);
280}
281
282#endif /* __KERNEL__ */
283#endif /* _X8664_RWSEM_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index b9fb2173ef99..d030409a8fb5 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void);
47extern void unlock_ipi_call_lock(void); 47extern void unlock_ipi_call_lock(void);
48extern int smp_num_siblings; 48extern int smp_num_siblings;
49extern void smp_send_reschedule(int cpu); 49extern void smp_send_reschedule(int cpu);
50extern void zap_low_mappings(void);
51void smp_stop_cpu(void); 50void smp_stop_cpu(void);
52extern int smp_call_function_single(int cpuid, void (*func) (void *info), 51extern int smp_call_function_single(int cpuid, void (*func) (void *info),
53 void *info, int retry, int wait); 52 void *info, int retry, int wait);
@@ -82,6 +81,8 @@ extern int safe_smp_processor_id(void);
82extern int __cpu_disable(void); 81extern int __cpu_disable(void);
83extern void __cpu_die(unsigned int cpu); 82extern void __cpu_die(unsigned int cpu);
84extern void prefill_possible_map(void); 83extern void prefill_possible_map(void);
84extern unsigned num_processors;
85extern unsigned disabled_cpus;
85 86
86#endif /* !ASSEMBLY */ 87#endif /* !ASSEMBLY */
87 88
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 69636831ad2f..fe484a699cc3 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -18,22 +18,22 @@
18 */ 18 */
19 19
20#define __raw_spin_is_locked(x) \ 20#define __raw_spin_is_locked(x) \
21 (*(volatile signed char *)(&(x)->slock) <= 0) 21 (*(volatile signed int *)(&(x)->slock) <= 0)
22 22
23#define __raw_spin_lock_string \ 23#define __raw_spin_lock_string \
24 "\n1:\t" \ 24 "\n1:\t" \
25 "lock ; decb %0\n\t" \ 25 "lock ; decl %0\n\t" \
26 "js 2f\n" \ 26 "js 2f\n" \
27 LOCK_SECTION_START("") \ 27 LOCK_SECTION_START("") \
28 "2:\t" \ 28 "2:\t" \
29 "rep;nop\n\t" \ 29 "rep;nop\n\t" \
30 "cmpb $0,%0\n\t" \ 30 "cmpl $0,%0\n\t" \
31 "jle 2b\n\t" \ 31 "jle 2b\n\t" \
32 "jmp 1b\n" \ 32 "jmp 1b\n" \
33 LOCK_SECTION_END 33 LOCK_SECTION_END
34 34
35#define __raw_spin_unlock_string \ 35#define __raw_spin_unlock_string \
36 "movb $1,%0" \ 36 "movl $1,%0" \
37 :"=m" (lock->slock) : : "memory" 37 :"=m" (lock->slock) : : "memory"
38 38
39static inline void __raw_spin_lock(raw_spinlock_t *lock) 39static inline void __raw_spin_lock(raw_spinlock_t *lock)
@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
47 47
48static inline int __raw_spin_trylock(raw_spinlock_t *lock) 48static inline int __raw_spin_trylock(raw_spinlock_t *lock)
49{ 49{
50 char oldval; 50 int oldval;
51 51
52 __asm__ __volatile__( 52 __asm__ __volatile__(
53 "xchgb %b0,%1" 53 "xchgl %0,%1"
54 :"=q" (oldval), "=m" (lock->slock) 54 :"=q" (oldval), "=m" (lock->slock)
55 :"0" (0) : "memory"); 55 :"0" (0) : "memory");
56 56
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 1c603cd7e4d0..d39ebd5263ed 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -28,6 +28,8 @@ extern int __node_distance(int, int);
28#define pcibus_to_node(bus) ((long)(bus->sysdata)) 28#define pcibus_to_node(bus) ((long)(bus->sysdata))
29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); 29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
30 30
31#define numa_node_id() read_pda(nodenumber)
32
31/* sched_domains SD_NODE_INIT for x86_64 machines */ 33/* sched_domains SD_NODE_INIT for x86_64 machines */
32#define SD_NODE_INIT (struct sched_domain) { \ 34#define SD_NODE_INIT (struct sched_domain) { \
33 .span = CPU_MASK_NONE, \ 35 .span = CPU_MASK_NONE, \
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 3c494b65d33a..2c42150bce0c 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -462,7 +462,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr)
462#define __NR_tkill 200 462#define __NR_tkill 200
463__SYSCALL(__NR_tkill, sys_tkill) 463__SYSCALL(__NR_tkill, sys_tkill)
464#define __NR_time 201 464#define __NR_time 201
465__SYSCALL(__NR_time, sys_time64) 465__SYSCALL(__NR_time, sys_time)
466#define __NR_futex 202 466#define __NR_futex 202
467__SYSCALL(__NR_futex, sys_futex) 467__SYSCALL(__NR_futex, sys_futex)
468#define __NR_sched_setaffinity 203 468#define __NR_sched_setaffinity 203
@@ -608,6 +608,7 @@ do { \
608#define __ARCH_WANT_SYS_SIGPENDING 608#define __ARCH_WANT_SYS_SIGPENDING
609#define __ARCH_WANT_SYS_SIGPROCMASK 609#define __ARCH_WANT_SYS_SIGPROCMASK
610#define __ARCH_WANT_SYS_RT_SIGACTION 610#define __ARCH_WANT_SYS_RT_SIGACTION
611#define __ARCH_WANT_SYS_TIME
611#define __ARCH_WANT_COMPAT_SYS_TIME 612#define __ARCH_WANT_COMPAT_SYS_TIME
612#endif 613#endif
613 614
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 12b5732dc6e5..3670cc7695da 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -223,6 +223,26 @@ static inline int atomic_sub_return(int i, atomic_t * v)
223 */ 223 */
224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) 224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
225 225
226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
227
228/**
229 * atomic_add_unless - add unless the number is a given value
230 * @v: pointer of type atomic_t
231 * @a: the amount to add to v...
232 * @u: ...unless v is equal to u.
233 *
234 * Atomically adds @a to @v, so long as it was not @u.
235 * Returns non-zero if @v was not @u, and zero otherwise.
236 */
237#define atomic_add_unless(v, a, u) \
238({ \
239 int c, old; \
240 c = atomic_read(v); \
241 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
242 c = old; \
243 c != (u); \
244})
245#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
226 246
227static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 247static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
228{ 248{
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 93c5b3cdf951..9a66401073fc 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -16,6 +16,8 @@
16#define _LINUX_ACCT_H 16#define _LINUX_ACCT_H
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/jiffies.h>
20
19#include <asm/param.h> 21#include <asm/param.h>
20#include <asm/byteorder.h> 22#include <asm/byteorder.h>
21 23
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 403d71dcb7c8..49fd37629ee4 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -124,7 +124,7 @@ struct kiocb {
124 (x)->ki_users = 1; \ 124 (x)->ki_users = 1; \
125 (x)->ki_key = KIOCB_SYNC_KEY; \ 125 (x)->ki_key = KIOCB_SYNC_KEY; \
126 (x)->ki_filp = (filp); \ 126 (x)->ki_filp = (filp); \
127 (x)->ki_ctx = &tsk->active_mm->default_kioctx; \ 127 (x)->ki_ctx = NULL; \
128 (x)->ki_cancel = NULL; \ 128 (x)->ki_cancel = NULL; \
129 (x)->ki_dtor = NULL; \ 129 (x)->ki_dtor = NULL; \
130 (x)->ki_obj.tsk = tsk; \ 130 (x)->ki_obj.tsk = tsk; \
@@ -210,8 +210,15 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id);
210int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 210int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
211 struct iocb *iocb)); 211 struct iocb *iocb));
212 212
213#define get_ioctx(kioctx) do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0) 213#define get_ioctx(kioctx) do { \
214#define put_ioctx(kioctx) do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0) 214 BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \
215 atomic_inc(&(kioctx)->users); \
216} while (0)
217#define put_ioctx(kioctx) do { \
218 BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0)); \
219 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
220 __put_ioctx(kioctx); \
221} while (0)
215 222
216#define in_aio() !is_sync_wait(current->io_wait) 223#define in_aio() !is_sync_wait(current->io_wait)
217/* may be used for debugging */ 224/* may be used for debugging */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cb3c3ef50f50..38c2fb7ebe09 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -84,6 +84,16 @@ static __inline__ int get_bitmask_order(unsigned int count)
84 return order; /* We could be slightly more clever with -1 here... */ 84 return order; /* We could be slightly more clever with -1 here... */
85} 85}
86 86
87static __inline__ int get_count_order(unsigned int count)
88{
89 int order;
90
91 order = fls(count) - 1;
92 if (count & (count - 1))
93 order++;
94 return order;
95}
96
87/* 97/*
88 * hweightN: returns the hamming weight (i.e. the number 98 * hweightN: returns the hamming weight (i.e. the number
89 * of bits set) of a N-bit word 99 * of bits set) of a N-bit word
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 025a7f084dbd..a33a31e71bbc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -406,6 +406,7 @@ struct request_queue
406 406
407 atomic_t refcnt; 407 atomic_t refcnt;
408 408
409 unsigned int nr_sorted;
409 unsigned int in_flight; 410 unsigned int in_flight;
410 411
411 /* 412 /*
@@ -631,6 +632,7 @@ static inline void elv_dispatch_add_tail(struct request_queue *q,
631{ 632{
632 if (q->last_merge == rq) 633 if (q->last_merge == rq)
633 q->last_merge = NULL; 634 q->last_merge = NULL;
635 q->nr_sorted--;
634 636
635 q->end_sector = rq_end_sector(rq); 637 q->end_sector = rq_end_sector(rq);
636 q->boundary_rq = rq; 638 q->boundary_rq = rq;
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
new file mode 100644
index 000000000000..605ebe24bb2e
--- /dev/null
+++ b/include/linux/cm4000_cs.h
@@ -0,0 +1,66 @@
1#ifndef _CM4000_H_
2#define _CM4000_H_
3
4#define MAX_ATR 33
5
6#define CM4000_MAX_DEV 4
7
8/* those two structures are passed via ioctl() from/to userspace. They are
9 * used by existing userspace programs, so I kepth the awkward "bIFSD" naming
10 * not to break compilation of userspace apps. -HW */
11
12typedef struct atreq {
13 int32_t atr_len;
14 unsigned char atr[64];
15 int32_t power_act;
16 unsigned char bIFSD;
17 unsigned char bIFSC;
18} atreq_t;
19
20
21/* what is particularly stupid in the original driver is the arch-dependant
22 * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
23 * will lay out the structure members differently than the 64bit kernel.
24 *
25 * I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t".
26 * On 32bit this will make no difference. With 64bit kernels, it will make
27 * 32bit apps work, too.
28 */
29
30typedef struct ptsreq {
31 u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/
32 unsigned char flags;
33 unsigned char pts1;
34 unsigned char pts2;
35 unsigned char pts3;
36} ptsreq_t;
37
38#define CM_IOC_MAGIC 'c'
39#define CM_IOC_MAXNR 255
40
41#define CM_IOCGSTATUS _IOR (CM_IOC_MAGIC, 0, unsigned char *)
42#define CM_IOCGATR _IOWR(CM_IOC_MAGIC, 1, atreq_t *)
43#define CM_IOCSPTS _IOW (CM_IOC_MAGIC, 2, ptsreq_t *)
44#define CM_IOCSRDR _IO (CM_IOC_MAGIC, 3)
45#define CM_IOCARDOFF _IO (CM_IOC_MAGIC, 4)
46
47#define CM_IOSDBGLVL _IOW(CM_IOC_MAGIC, 250, int*)
48
49/* card and device states */
50#define CM_CARD_INSERTED 0x01
51#define CM_CARD_POWERED 0x02
52#define CM_ATR_PRESENT 0x04
53#define CM_ATR_VALID 0x08
54#define CM_STATE_VALID 0x0f
55/* extra info only from CM4000 */
56#define CM_NO_READER 0x10
57#define CM_BAD_CARD 0x20
58
59
60#ifdef __KERNEL__
61
62#define DEVICE_NAME "cmm"
63#define MODULE_NAME "cm4000_cs"
64
65#endif /* __KERNEL__ */
66#endif /* _CM4000_H_ */
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 2209ad3499a3..174f3379e5d9 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -259,6 +259,14 @@ COMPATIBLE_IOCTL(RTC_RD_TIME)
259COMPATIBLE_IOCTL(RTC_SET_TIME) 259COMPATIBLE_IOCTL(RTC_SET_TIME)
260COMPATIBLE_IOCTL(RTC_WKALM_SET) 260COMPATIBLE_IOCTL(RTC_WKALM_SET)
261COMPATIBLE_IOCTL(RTC_WKALM_RD) 261COMPATIBLE_IOCTL(RTC_WKALM_RD)
262/*
263 * These two are only for the sbus rtc driver, but
264 * hwclock tries them on every rtc device first when
265 * running on sparc. On other architectures the entries
266 * are useless but harmless.
267 */
268COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
269COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
262/* Little m */ 270/* Little m */
263COMPATIBLE_IOCTL(MTIOCTOP) 271COMPATIBLE_IOCTL(MTIOCTOP)
264/* Socket level stuff */ 272/* Socket level stuff */
diff --git a/include/linux/file.h b/include/linux/file.h
index d3b1a15d5f21..418b6101b59a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -33,13 +33,13 @@ struct fdtable {
33 * Open file table structure 33 * Open file table structure
34 */ 34 */
35struct files_struct { 35struct files_struct {
36 atomic_t count; 36 atomic_t count;
37 spinlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */
38 struct fdtable *fdt; 37 struct fdtable *fdt;
39 struct fdtable fdtab; 38 struct fdtable fdtab;
40 fd_set close_on_exec_init; 39 fd_set close_on_exec_init;
41 fd_set open_fds_init; 40 fd_set open_fds_init;
42 struct file * fd_array[NR_OPEN_DEFAULT]; 41 struct file * fd_array[NR_OPEN_DEFAULT];
42 spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
43}; 43};
44 44
45#define files_fdtable(files) (rcu_dereference((files)->fdt)) 45#define files_fdtable(files) (rcu_dereference((files)->fdt))
diff --git a/include/linux/font.h b/include/linux/font.h
index 8aac48c37f3d..53b129f07f6f 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -31,7 +31,6 @@ struct font_desc {
31#define SUN12x22_IDX 7 31#define SUN12x22_IDX 7
32#define ACORN8x8_IDX 8 32#define ACORN8x8_IDX 8
33#define MINI4x6_IDX 9 33#define MINI4x6_IDX 9
34#define RL_IDX 10
35 34
36extern const struct font_desc font_vga_8x8, 35extern const struct font_desc font_vga_8x8,
37 font_vga_8x16, 36 font_vga_8x16,
@@ -42,7 +41,6 @@ extern const struct font_desc font_vga_8x8,
42 font_sun_8x16, 41 font_sun_8x16,
43 font_sun_12x22, 42 font_sun_12x22,
44 font_acorn_8x8, 43 font_acorn_8x8,
45 font_rl,
46 font_mini_4x6; 44 font_mini_4x6;
47 45
48/* Find a font with a specific name */ 46/* Find a font with a specific name */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 114d5d59f695..934aa9bda481 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -4,7 +4,7 @@
4 * Definitions for any platform device related flags or structures for 4 * Definitions for any platform device related flags or structures for
5 * Freescale processor devices 5 * Freescale processor devices
6 * 6 *
7 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 7 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
8 * 8 *
9 * Copyright 2004 Freescale Semiconductor, Inc 9 * Copyright 2004 Freescale Semiconductor, Inc
10 * 10 *
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8eeaa53a68c9..eef5ccdcd731 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -78,7 +78,7 @@ struct hd_struct {
78 sector_t start_sect; 78 sector_t start_sect;
79 sector_t nr_sects; 79 sector_t nr_sects;
80 struct kobject kobj; 80 struct kobject kobj;
81 unsigned ios[2], sectors[2]; 81 unsigned ios[2], sectors[2]; /* READs and WRITEs */
82 int policy, partno; 82 int policy, partno;
83}; 83};
84 84
@@ -89,7 +89,7 @@ struct hd_struct {
89#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 89#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
90 90
91struct disk_stats { 91struct disk_stats {
92 unsigned sectors[2]; 92 unsigned sectors[2]; /* READs and WRITEs */
93 unsigned ios[2]; 93 unsigned ios[2];
94 unsigned merges[2]; 94 unsigned merges[2];
95 unsigned ticks[2]; 95 unsigned ticks[2];
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c3779432a723..313dfe9b443a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -14,6 +14,13 @@ struct vm_area_struct;
14/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */ 14/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
15#define __GFP_DMA ((__force gfp_t)0x01u) 15#define __GFP_DMA ((__force gfp_t)0x01u)
16#define __GFP_HIGHMEM ((__force gfp_t)0x02u) 16#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
17#ifdef CONFIG_DMA_IS_DMA32
18#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */
19#elif BITS_PER_LONG < 64
20#define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */
21#else
22#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */
23#endif
17 24
18/* 25/*
19 * Action modifiers - doesn't change the zoning 26 * Action modifiers - doesn't change the zoning
@@ -39,8 +46,7 @@ struct vm_area_struct;
39#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ 46#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
40#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ 47#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
41#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
42#define __GFP_NORECLAIM ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
43#define __GFP_HARDWALL ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */
44 50
45#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 51#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
46#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 52#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -49,7 +55,7 @@ struct vm_area_struct;
49#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 55#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
50 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 56 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
51 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ 57 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
52 __GFP_NOMEMALLOC|__GFP_NORECLAIM|__GFP_HARDWALL) 58 __GFP_NOMEMALLOC|__GFP_HARDWALL)
53 59
54#define GFP_ATOMIC (__GFP_HIGH) 60#define GFP_ATOMIC (__GFP_HIGH)
55#define GFP_NOIO (__GFP_WAIT) 61#define GFP_NOIO (__GFP_WAIT)
@@ -64,6 +70,10 @@ struct vm_area_struct;
64 70
65#define GFP_DMA __GFP_DMA 71#define GFP_DMA __GFP_DMA
66 72
73/* 4GB DMA on some platforms */
74#define GFP_DMA32 __GFP_DMA32
75
76
67#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK)) 77#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK))
68 78
69/* 79/*
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 5912874ca83c..71d2b8a723b9 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -90,6 +90,8 @@ extern void synchronize_irq(unsigned int irq);
90#define nmi_enter() irq_enter() 90#define nmi_enter() irq_enter()
91#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) 91#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
92 92
93struct task_struct;
94
93#ifndef CONFIG_VIRT_CPU_ACCOUNTING 95#ifndef CONFIG_VIRT_CPU_ACCOUNTING
94static inline void account_user_vtime(struct task_struct *tsk) 96static inline void account_user_vtime(struct task_struct *tsk)
95{ 97{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0cea162b08c0..1056717ee501 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -102,8 +102,8 @@ static inline unsigned long hugetlb_total_pages(void)
102#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) 102#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
103 103
104#ifndef HPAGE_MASK 104#ifndef HPAGE_MASK
105#define HPAGE_MASK 0 /* Keep the compiler happy */ 105#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
106#define HPAGE_SIZE 0 106#define HPAGE_SIZE PAGE_SIZE
107#endif 107#endif
108 108
109#endif /* !CONFIG_HUGETLB_PAGE */ 109#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 74abaecdb572..1543daaa9c5e 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -107,6 +107,7 @@
107#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ 107#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
108#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */ 108#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */
109#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ 109#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
110#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
110 111
111#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */ 112#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
112#define I2C_DRIVERID_EXP1 0xF1 113#define I2C_DRIVERID_EXP1 0xF1
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 68ab5f2ab9cd..dcfd2ecccb5d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -51,7 +51,6 @@
51 .page_table_lock = SPIN_LOCK_UNLOCKED, \ 51 .page_table_lock = SPIN_LOCK_UNLOCKED, \
52 .mmlist = LIST_HEAD_INIT(name.mmlist), \ 52 .mmlist = LIST_HEAD_INIT(name.mmlist), \
53 .cpu_vm_mask = CPU_MASK_ALL, \ 53 .cpu_vm_mask = CPU_MASK_ALL, \
54 .default_kioctx = INIT_KIOCTX(name.default_kioctx, name), \
55} 54}
56 55
57#define INIT_SIGNALS(sig) { \ 56#define INIT_SIGNALS(sig) { \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0a90205184b0..41f150a3d2dd 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -9,6 +9,7 @@
9#include <linux/preempt.h> 9#include <linux/preempt.h>
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h>
12#include <asm/atomic.h> 13#include <asm/atomic.h>
13#include <asm/ptrace.h> 14#include <asm/ptrace.h>
14#include <asm/system.h> 15#include <asm/system.h>
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7b115feca4df..1013a42d10b1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -206,12 +206,6 @@ struct vm_operations_struct {
206struct mmu_gather; 206struct mmu_gather;
207struct inode; 207struct inode;
208 208
209#ifdef ARCH_HAS_ATOMIC_UNSIGNED
210typedef unsigned page_flags_t;
211#else
212typedef unsigned long page_flags_t;
213#endif
214
215/* 209/*
216 * Each physical page in the system has a struct page associated with 210 * Each physical page in the system has a struct page associated with
217 * it to keep track of whatever it is we are using the page for at the 211 * it to keep track of whatever it is we are using the page for at the
@@ -219,7 +213,7 @@ typedef unsigned long page_flags_t;
219 * a page. 213 * a page.
220 */ 214 */
221struct page { 215struct page {
222 page_flags_t flags; /* Atomic flags, some possibly 216 unsigned long flags; /* Atomic flags, some possibly
223 * updated asynchronously */ 217 * updated asynchronously */
224 atomic_t _count; /* Usage count, see below. */ 218 atomic_t _count; /* Usage count, see below. */
225 atomic_t _mapcount; /* Count of ptes mapped in mms, 219 atomic_t _mapcount; /* Count of ptes mapped in mms,
@@ -435,7 +429,7 @@ static inline void put_page(struct page *page)
435#endif 429#endif
436 430
437/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ 431/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
438#define SECTIONS_PGOFF ((sizeof(page_flags_t)*8) - SECTIONS_WIDTH) 432#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
439#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 433#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
440#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 434#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
441 435
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f5fa3082fd6a..2c8edad5dccf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -71,10 +71,11 @@ struct per_cpu_pageset {
71#endif 71#endif
72 72
73#define ZONE_DMA 0 73#define ZONE_DMA 0
74#define ZONE_NORMAL 1 74#define ZONE_DMA32 1
75#define ZONE_HIGHMEM 2 75#define ZONE_NORMAL 2
76#define ZONE_HIGHMEM 3
76 77
77#define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */ 78#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */
78#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ 79#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
79 80
80 81
@@ -108,9 +109,10 @@ struct per_cpu_pageset {
108 109
109/* 110/*
110 * On machines where it is needed (eg PCs) we divide physical memory 111 * On machines where it is needed (eg PCs) we divide physical memory
111 * into multiple physical zones. On a PC we have 3 zones: 112 * into multiple physical zones. On a PC we have 4 zones:
112 * 113 *
113 * ZONE_DMA < 16 MB ISA DMA capable memory 114 * ZONE_DMA < 16 MB ISA DMA capable memory
115 * ZONE_DMA32 0 MB Empty
114 * ZONE_NORMAL 16-896 MB direct mapped by the kernel 116 * ZONE_NORMAL 16-896 MB direct mapped by the kernel
115 * ZONE_HIGHMEM > 896 MB only page cache and user processes 117 * ZONE_HIGHMEM > 896 MB only page cache and user processes
116 */ 118 */
@@ -329,7 +331,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive,
329void build_all_zonelists(void); 331void build_all_zonelists(void);
330void wakeup_kswapd(struct zone *zone, int order); 332void wakeup_kswapd(struct zone *zone, int order);
331int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 333int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
332 int alloc_type, int can_try_harder, gfp_t gfp_high); 334 int classzone_idx, int alloc_flags);
333 335
334#ifdef CONFIG_HAVE_MEMORY_PRESENT 336#ifdef CONFIG_HAVE_MEMORY_PRESENT
335void memory_present(int nid, unsigned long start, unsigned long end); 337void memory_present(int nid, unsigned long start, unsigned long end);
@@ -433,7 +435,9 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
433 435
434#include <linux/topology.h> 436#include <linux/topology.h>
435/* Returns the number of the current Node. */ 437/* Returns the number of the current Node. */
438#ifndef numa_node_id
436#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) 439#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
440#endif
437 441
438#ifndef CONFIG_NEED_MULTIPLE_NODES 442#ifndef CONFIG_NEED_MULTIPLE_NODES
439 443
@@ -453,12 +457,12 @@ extern struct pglist_data contig_page_data;
453#include <asm/sparsemem.h> 457#include <asm/sparsemem.h>
454#endif 458#endif
455 459
456#if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) 460#if BITS_PER_LONG == 32
457/* 461/*
458 * with 32 bit page->flags field, we reserve 8 bits for node/zone info. 462 * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
459 * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes. 463 * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
460 */ 464 */
461#define FLAGS_RESERVED 8 465#define FLAGS_RESERVED 9
462 466
463#elif BITS_PER_LONG == 64 467#elif BITS_PER_LONG == 64
464/* 468/*
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 72975fa8795d..934a2479f160 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -112,7 +112,6 @@ struct nfnl_callback
112{ 112{
113 int (*call)(struct sock *nl, struct sk_buff *skb, 113 int (*call)(struct sock *nl, struct sk_buff *skb,
114 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp); 114 struct nlmsghdr *nlh, struct nfattr *cda[], int *errp);
115 kernel_cap_t cap_required; /* capabilities required for this msg */
116 u_int16_t attr_count; /* number of nfattr's */ 115 u_int16_t attr_count; /* number of nfattr's */
117}; 116};
118 117
@@ -154,11 +153,14 @@ extern void nfattr_parse(struct nfattr *tb[], int maxattr,
154 153
155#define nfattr_bad_size(tb, max, cta_min) \ 154#define nfattr_bad_size(tb, max, cta_min) \
156({ int __i, __res = 0; \ 155({ int __i, __res = 0; \
157 for (__i=0; __i<max; __i++) \ 156 for (__i=0; __i<max; __i++) { \
157 if (!cta_min[__i]) \
158 continue; \
158 if (tb[__i] && NFA_PAYLOAD(tb[__i]) < cta_min[__i]){ \ 159 if (tb[__i] && NFA_PAYLOAD(tb[__i]) < cta_min[__i]){ \
159 __res = 1; \ 160 __res = 1; \
160 break; \ 161 break; \
161 } \ 162 } \
163 } \
162 __res; \ 164 __res; \
163}) 165})
164 166
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ba6c310a055f..ee700c6eb442 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -53,12 +53,12 @@ void release_pages(struct page **pages, int nr, int cold);
53 53
54static inline struct page *page_cache_alloc(struct address_space *x) 54static inline struct page *page_cache_alloc(struct address_space *x)
55{ 55{
56 return alloc_pages(mapping_gfp_mask(x)|__GFP_NORECLAIM, 0); 56 return alloc_pages(mapping_gfp_mask(x), 0);
57} 57}
58 58
59static inline struct page *page_cache_alloc_cold(struct address_space *x) 59static inline struct page *page_cache_alloc_cold(struct address_space *x)
60{ 60{
61 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD|__GFP_NORECLAIM, 0); 61 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
62} 62}
63 63
64typedef int filler_t(void *, struct page *); 64typedef int filler_t(void *, struct page *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d00f8ba7f22b..d4c1c8fd2925 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -805,6 +805,10 @@
805#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051 805#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
806#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058 806#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
807#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059 807#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
808#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
809#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
810#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
811#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b
808#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645 812#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
809 813
810#define PCI_VENDOR_ID_YAMAHA 0x1073 814#define PCI_VENDOR_ID_YAMAHA 0x1073
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5451eb1e781d..fb8d2d24e4bb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -38,7 +38,7 @@ extern void free_percpu(const void *);
38 38
39#else /* CONFIG_SMP */ 39#else /* CONFIG_SMP */
40 40
41#define per_cpu_ptr(ptr, cpu) (ptr) 41#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
42 42
43static inline void *__alloc_percpu(size_t size, size_t align) 43static inline void *__alloc_percpu(size_t size, size_t align)
44{ 44{
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1514098d156d..5be87ba3b7ac 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -94,55 +94,6 @@ struct pm_dev
94 struct list_head entry; 94 struct list_head entry;
95}; 95};
96 96
97#ifdef CONFIG_PM
98
99extern int pm_active;
100
101#define PM_IS_ACTIVE() (pm_active != 0)
102
103/*
104 * Register a device with power management
105 */
106struct pm_dev __deprecated *
107pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
108
109/*
110 * Unregister a device with power management
111 */
112void __deprecated pm_unregister(struct pm_dev *dev);
113
114/*
115 * Unregister all devices with matching callback
116 */
117void __deprecated pm_unregister_all(pm_callback callback);
118
119/*
120 * Send a request to all devices
121 */
122int __deprecated pm_send_all(pm_request_t rqst, void *data);
123
124#else /* CONFIG_PM */
125
126#define PM_IS_ACTIVE() 0
127
128static inline struct pm_dev *pm_register(pm_dev_t type,
129 unsigned long id,
130 pm_callback callback)
131{
132 return NULL;
133}
134
135static inline void pm_unregister(struct pm_dev *dev) {}
136
137static inline void pm_unregister_all(pm_callback callback) {}
138
139static inline int pm_send_all(pm_request_t rqst, void *data)
140{
141 return 0;
142}
143
144#endif /* CONFIG_PM */
145
146/* Functions above this comment are list-based old-style power 97/* Functions above this comment are list-based old-style power
147 * managment. Please avoid using them. */ 98 * managment. Please avoid using them. */
148 99
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
new file mode 100644
index 000000000000..1252b45face1
--- /dev/null
+++ b/include/linux/pm_legacy.h
@@ -0,0 +1,56 @@
1#ifndef __LINUX_PM_LEGACY_H__
2#define __LINUX_PM_LEGACY_H__
3
4#include <linux/config.h>
5
6#ifdef CONFIG_PM_LEGACY
7
8extern int pm_active;
9
10#define PM_IS_ACTIVE() (pm_active != 0)
11
12/*
13 * Register a device with power management
14 */
15struct pm_dev __deprecated *
16pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
17
18/*
19 * Unregister a device with power management
20 */
21void __deprecated pm_unregister(struct pm_dev *dev);
22
23/*
24 * Unregister all devices with matching callback
25 */
26void __deprecated pm_unregister_all(pm_callback callback);
27
28/*
29 * Send a request to all devices
30 */
31int __deprecated pm_send_all(pm_request_t rqst, void *data);
32
33#else /* CONFIG_PM_LEGACY */
34
35#define PM_IS_ACTIVE() 0
36
37static inline struct pm_dev *pm_register(pm_dev_t type,
38 unsigned long id,
39 pm_callback callback)
40{
41 return NULL;
42}
43
44static inline void pm_unregister(struct pm_dev *dev) {}
45
46static inline void pm_unregister_all(pm_callback callback) {}
47
48static inline int pm_send_all(pm_request_t rqst, void *data)
49{
50 return 0;
51}
52
53#endif /* CONFIG_PM_LEGACY */
54
55#endif /* __LINUX_PM_LEGACY_H__ */
56
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index dd98c54a23b4..d9a2f5254a51 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
10#include <linux/thread_info.h>
10#include <linux/linkage.h> 11#include <linux/linkage.h>
11 12
12#ifdef CONFIG_DEBUG_PREEMPT 13#ifdef CONFIG_DEBUG_PREEMPT
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2bbf968b23d9..2038bd27b041 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -357,7 +357,6 @@ struct mm_struct {
357 /* aio bits */ 357 /* aio bits */
358 rwlock_t ioctx_list_lock; 358 rwlock_t ioctx_list_lock;
359 struct kioctx *ioctx_list; 359 struct kioctx *ioctx_list;
360 struct kioctx default_kioctx;
361}; 360};
362 361
363struct sighand_struct { 362struct sighand_struct {
@@ -1233,32 +1232,49 @@ static inline void task_unlock(struct task_struct *p)
1233 spin_unlock(&p->alloc_lock); 1232 spin_unlock(&p->alloc_lock);
1234} 1233}
1235 1234
1235#ifndef __HAVE_THREAD_FUNCTIONS
1236
1237#define task_thread_info(task) (task)->thread_info
1238
1239static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1240{
1241 *task_thread_info(p) = *task_thread_info(org);
1242 task_thread_info(p)->task = p;
1243}
1244
1245static inline unsigned long *end_of_stack(struct task_struct *p)
1246{
1247 return (unsigned long *)(p->thread_info + 1);
1248}
1249
1250#endif
1251
1236/* set thread flags in other task's structures 1252/* set thread flags in other task's structures
1237 * - see asm/thread_info.h for TIF_xxxx flags available 1253 * - see asm/thread_info.h for TIF_xxxx flags available
1238 */ 1254 */
1239static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 1255static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1240{ 1256{
1241 set_ti_thread_flag(tsk->thread_info,flag); 1257 set_ti_thread_flag(task_thread_info(tsk), flag);
1242} 1258}
1243 1259
1244static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1260static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1245{ 1261{
1246 clear_ti_thread_flag(tsk->thread_info,flag); 1262 clear_ti_thread_flag(task_thread_info(tsk), flag);
1247} 1263}
1248 1264
1249static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 1265static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1250{ 1266{
1251 return test_and_set_ti_thread_flag(tsk->thread_info,flag); 1267 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1252} 1268}
1253 1269
1254static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 1270static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1255{ 1271{
1256 return test_and_clear_ti_thread_flag(tsk->thread_info,flag); 1272 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1257} 1273}
1258 1274
1259static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 1275static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1260{ 1276{
1261 return test_ti_thread_flag(tsk->thread_info,flag); 1277 return test_ti_thread_flag(task_thread_info(tsk), flag);
1262} 1278}
1263 1279
1264static inline void set_tsk_need_resched(struct task_struct *tsk) 1280static inline void set_tsk_need_resched(struct task_struct *tsk)
@@ -1329,12 +1345,12 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped);
1329 1345
1330static inline unsigned int task_cpu(const struct task_struct *p) 1346static inline unsigned int task_cpu(const struct task_struct *p)
1331{ 1347{
1332 return p->thread_info->cpu; 1348 return task_thread_info(p)->cpu;
1333} 1349}
1334 1350
1335static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 1351static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1336{ 1352{
1337 p->thread_info->cpu = cpu; 1353 task_thread_info(p)->cpu = cpu;
1338} 1354}
1339 1355
1340#else 1356#else
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index b63ce7014093..fa1ff3b165fe 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -2,11 +2,10 @@
2#define __LINUX_SMPLOCK_H 2#define __LINUX_SMPLOCK_H
3 3
4#include <linux/config.h> 4#include <linux/config.h>
5#ifdef CONFIG_LOCK_KERNEL
5#include <linux/sched.h> 6#include <linux/sched.h>
6#include <linux/spinlock.h> 7#include <linux/spinlock.h>
7 8
8#ifdef CONFIG_LOCK_KERNEL
9
10#define kernel_locked() (current->lock_depth >= 0) 9#define kernel_locked() (current->lock_depth >= 0)
11 10
12extern int __lockfunc __reacquire_kernel_lock(void); 11extern int __lockfunc __reacquire_kernel_lock(void);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index d252f45a0f9b..1c4eb41dbd89 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -27,31 +27,6 @@ extern long do_no_restart_syscall(struct restart_block *parm);
27 * - pass TIF_xxxx constants to these functions 27 * - pass TIF_xxxx constants to these functions
28 */ 28 */
29 29
30static inline void set_thread_flag(int flag)
31{
32 set_bit(flag,&current_thread_info()->flags);
33}
34
35static inline void clear_thread_flag(int flag)
36{
37 clear_bit(flag,&current_thread_info()->flags);
38}
39
40static inline int test_and_set_thread_flag(int flag)
41{
42 return test_and_set_bit(flag,&current_thread_info()->flags);
43}
44
45static inline int test_and_clear_thread_flag(int flag)
46{
47 return test_and_clear_bit(flag,&current_thread_info()->flags);
48}
49
50static inline int test_thread_flag(int flag)
51{
52 return test_bit(flag,&current_thread_info()->flags);
53}
54
55static inline void set_ti_thread_flag(struct thread_info *ti, int flag) 30static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
56{ 31{
57 set_bit(flag,&ti->flags); 32 set_bit(flag,&ti->flags);
@@ -77,15 +52,19 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
77 return test_bit(flag,&ti->flags); 52 return test_bit(flag,&ti->flags);
78} 53}
79 54
80static inline void set_need_resched(void) 55#define set_thread_flag(flag) \
81{ 56 set_ti_thread_flag(current_thread_info(), flag)
82 set_thread_flag(TIF_NEED_RESCHED); 57#define clear_thread_flag(flag) \
83} 58 clear_ti_thread_flag(current_thread_info(), flag)
84 59#define test_and_set_thread_flag(flag) \
85static inline void clear_need_resched(void) 60 test_and_set_ti_thread_flag(current_thread_info(), flag)
86{ 61#define test_and_clear_thread_flag(flag) \
87 clear_thread_flag(TIF_NEED_RESCHED); 62 test_and_clear_ti_thread_flag(current_thread_info(), flag)
88} 63#define test_thread_flag(flag) \
64 test_ti_thread_flag(current_thread_info(), flag)
65
66#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
67#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
89 68
90#endif 69#endif
91 70
diff --git a/include/linux/time.h b/include/linux/time.h
index 8e83f4e778bb..bfbe92d0767c 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -101,7 +101,7 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
101static inline void 101static inline void
102set_normalized_timespec (struct timespec *ts, time_t sec, long nsec) 102set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
103{ 103{
104 while (nsec > NSEC_PER_SEC) { 104 while (nsec >= NSEC_PER_SEC) {
105 nsec -= NSEC_PER_SEC; 105 nsec -= NSEC_PER_SEC;
106 ++sec; 106 ++sec;
107 } 107 }
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 748d04385256..856d232c7562 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -819,7 +819,7 @@ typedef void (*usb_complete_t)(struct urb *, struct pt_regs *);
819 */ 819 */
820struct urb 820struct urb
821{ 821{
822 /* private, usb core and host controller only fields in the urb */ 822 /* private: usb core and host controller only fields in the urb */
823 struct kref kref; /* reference count of the URB */ 823 struct kref kref; /* reference count of the URB */
824 spinlock_t lock; /* lock for the URB */ 824 spinlock_t lock; /* lock for the URB */
825 void *hcpriv; /* private data for host controller */ 825 void *hcpriv; /* private data for host controller */
@@ -827,7 +827,7 @@ struct urb
827 atomic_t use_count; /* concurrent submissions counter */ 827 atomic_t use_count; /* concurrent submissions counter */
828 u8 reject; /* submissions will fail */ 828 u8 reject; /* submissions will fail */
829 829
830 /* public, documented fields in the urb that can be used by drivers */ 830 /* public: documented fields in the urb that can be used by drivers */
831 struct list_head urb_list; /* list head for use by the urb's 831 struct list_head urb_list; /* list head for use by the urb's
832 * current owner */ 832 * current owner */
833 struct usb_device *dev; /* (in) pointer to associated device */ 833 struct usb_device *dev; /* (in) pointer to associated device */
@@ -1045,7 +1045,7 @@ struct usb_sg_request {
1045 size_t bytes; 1045 size_t bytes;
1046 1046
1047 /* 1047 /*
1048 * members below are private to usbcore, 1048 * members below are private: to usbcore,
1049 * and are not provided for driver access! 1049 * and are not provided for driver access!
1050 */ 1050 */
1051 spinlock_t lock; 1051 spinlock_t lock;
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index a114fff6568b..1cded681eb6d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -636,6 +636,7 @@ typedef __u64 v4l2_std_id;
636#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000) 636#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000)
637#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000) 637#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000)
638#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000) 638#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000)
639#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000)
639 640
640/* ATSC/HDTV */ 641/* ATSC/HDTV */
641#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000) 642#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 0f1ba95ec8d6..ad3e9bb670c3 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -49,6 +49,7 @@ struct ir_input_state {
49 49
50extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE]; 50extern IR_KEYTAB_TYPE ir_codes_rc5_tv[IR_KEYTAB_SIZE];
51extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE]; 51extern IR_KEYTAB_TYPE ir_codes_winfast[IR_KEYTAB_SIZE];
52extern IR_KEYTAB_TYPE ir_codes_pinnacle[IR_KEYTAB_SIZE];
52extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE]; 53extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
53extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE]; 54extern IR_KEYTAB_TYPE ir_codes_hauppauge_new[IR_KEYTAB_SIZE];
54extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE]; 55extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 00fa57eb9fde..730f21ed91db 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -19,4 +19,6 @@ struct IR_i2c {
19 char phys[32]; 19 char phys[32];
20 int (*get_key)(struct IR_i2c*, u32*, u32*); 20 int (*get_key)(struct IR_i2c*, u32*, u32*);
21}; 21};
22
23int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
22#endif 24#endif
diff --git a/include/media/tuner.h b/include/media/tuner.h
index 9184e534b7ef..faa0f8e3091b 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -113,6 +113,7 @@
113#define TUNER_PHILIPS_TD1316 67 113#define TUNER_PHILIPS_TD1316 67
114 114
115#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ 115#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
116#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
116 117
117#define NOTUNER 0 118#define NOTUNER 0
118#define PAL 1 /* PAL_BG */ 119#define PAL 1 /* PAL_BG */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
new file mode 100644
index 000000000000..d3fd48157eb8
--- /dev/null
+++ b/include/media/v4l2-common.h
@@ -0,0 +1,110 @@
1/*
2 v4l2 common internal API header
3
4 This header contains internal shared ioctl definitions for use by the
5 internal low-level v4l2 drivers.
6 Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal
7 define,
8
9 Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#ifndef V4L2_COMMON_H_
27#define V4L2_COMMON_H_
28
29/* VIDIOC_INT_AUDIO_CLOCK_FREQ */
30enum v4l2_audio_clock_freq {
31 V4L2_AUDCLK_32_KHZ = 32000,
32 V4L2_AUDCLK_441_KHZ = 44100,
33 V4L2_AUDCLK_48_KHZ = 48000,
34};
35
36/* VIDIOC_INT_G_REGISTER and VIDIOC_INT_S_REGISTER */
37struct v4l2_register {
38 u32 i2c_id; /* I2C driver ID of the I2C chip. 0 for the I2C adapter. */
39 unsigned long reg;
40 u32 val;
41};
42
43/* VIDIOC_INT_DECODE_VBI_LINE */
44struct v4l2_decode_vbi_line {
45 u32 is_second_field; /* Set to 0 for the first (odd) field,
46 set to 1 for the second (even) field. */
47 u8 *p; /* Pointer to the sliced VBI data from the decoder.
48 On exit points to the start of the payload. */
49 u32 line; /* Line number of the sliced VBI data (1-23) */
50 u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
51};
52
53/* VIDIOC_INT_G_CHIP_IDENT: identifies the actual chip installed on the board */
54enum v4l2_chip_ident {
55 /* general idents: reserved range 0-49 */
56 V4L2_IDENT_UNKNOWN = 0,
57
58 /* module saa7115: reserved range 100-149 */
59 V4L2_IDENT_SAA7114 = 104,
60 V4L2_IDENT_SAA7115 = 105,
61
62 /* module saa7127: reserved range 150-199 */
63 V4L2_IDENT_SAA7127 = 157,
64 V4L2_IDENT_SAA7129 = 159,
65
66 /* module cx25840: reserved range 200-249 */
67 V4L2_IDENT_CX25840 = 240,
68 V4L2_IDENT_CX25841 = 241,
69 V4L2_IDENT_CX25842 = 242,
70 V4L2_IDENT_CX25843 = 243,
71};
72
73/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
74#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register)
75#define VIDIOC_INT_G_REGISTER _IOWR('d', 101, struct v4l2_register)
76
77/* Reset the I2C chip */
78#define VIDIOC_INT_RESET _IO ('d', 102)
79
80/* Set the frequency of the audio clock output.
81 Used to slave an audio processor to the video decoder, ensuring that audio
82 and video remain synchronized. */
83#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOR ('d', 103, enum v4l2_audio_clock_freq)
84
85/* Video decoders that support sliced VBI need to implement this ioctl.
86 Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI
87 data that was generated by the decoder. The driver then parses the sliced
88 VBI data and sets the other fields in the struct accordingly. The pointer p
89 is updated to point to the start of the payload which can be copied
90 verbatim into the data field of the v4l2_sliced_vbi_data struct. If no
91 valid VBI data was found, then the type field is set to 0 on return. */
92#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line)
93
94/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
95 filled with the data packets that should be output. Note that if you set
96 the line field to 0, then that VBI signal is disabled. */
97#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
98
99/* Used to obtain the sliced VBI packet from a readback register. Not all
100 video decoders support this. If no data is available because the readback
101 register contains invalid or erroneous data -EIO is returned. Note that
102 you must fill in the 'id' member and the 'field' member (to determine
103 whether CC data from the first or second field should be obtained). */
104#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data *)
105
106/* Returns the chip identifier or V4L2_IDENT_UNKNOWN if no identification can
107 be made. */
108#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *)
109
110#endif /* V4L2_COMMON_H_ */