aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2005-12-06 17:31:30 -0500
committerLen Brown <len.brown@intel.com>2005-12-06 17:31:30 -0500
commit3d5271f9883cba7b54762bc4fe027d4172f06db7 (patch)
treeab8a881a14478598a0c8bda0d26c62cdccfffd6d /include/asm-parisc
parent378b2556f4e09fa6f87ff0cb5c4395ff28257d02 (diff)
parent9115a6c787596e687df03010d97fccc5e0762506 (diff)
Pull release into acpica branch
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/assembly.h71
-rw-r--r--include/asm-parisc/atomic.h20
-rw-r--r--include/asm-parisc/bitops.h290
-rw-r--r--include/asm-parisc/cacheflush.h35
-rw-r--r--include/asm-parisc/dma-mapping.h8
-rw-r--r--include/asm-parisc/errno.h1
-rw-r--r--include/asm-parisc/grfioctl.h2
-rw-r--r--include/asm-parisc/ide.h1
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/led.h3
-rw-r--r--include/asm-parisc/mmzone.h6
-rw-r--r--include/asm-parisc/parisc-device.h7
-rw-r--r--include/asm-parisc/pci.h2
-rw-r--r--include/asm-parisc/pgtable.h5
-rw-r--r--include/asm-parisc/processor.h19
-rw-r--r--include/asm-parisc/psw.h51
-rw-r--r--include/asm-parisc/ptrace.h2
-rw-r--r--include/asm-parisc/semaphore.h3
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h24
-rw-r--r--include/asm-parisc/spinlock_types.h8
-rw-r--r--include/asm-parisc/system.h48
-rw-r--r--include/asm-parisc/tlbflush.h24
-rw-r--r--include/asm-parisc/types.h2
-rw-r--r--include/asm-parisc/unistd.h17
25 files changed, 376 insertions, 285 deletions
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h
index 30b023411fef..3ce3440d1b0c 100644
--- a/include/asm-parisc/assembly.h
+++ b/include/asm-parisc/assembly.h
@@ -21,7 +21,9 @@
21#ifndef _PARISC_ASSEMBLY_H 21#ifndef _PARISC_ASSEMBLY_H
22#define _PARISC_ASSEMBLY_H 22#define _PARISC_ASSEMBLY_H
23 23
24#ifdef __LP64__ 24#define CALLEE_FLOAT_FRAME_SIZE 80
25
26#ifdef CONFIG_64BIT
25#define LDREG ldd 27#define LDREG ldd
26#define STREG std 28#define STREG std
27#define LDREGX ldd,s 29#define LDREGX ldd,s
@@ -30,8 +32,8 @@
30#define SHRREG shrd 32#define SHRREG shrd
31#define RP_OFFSET 16 33#define RP_OFFSET 16
32#define FRAME_SIZE 128 34#define FRAME_SIZE 128
33#define CALLEE_SAVE_FRAME_SIZE 144 35#define CALLEE_REG_FRAME_SIZE 144
34#else 36#else /* CONFIG_64BIT */
35#define LDREG ldw 37#define LDREG ldw
36#define STREG stw 38#define STREG stw
37#define LDREGX ldwx,s 39#define LDREGX ldwx,s
@@ -40,9 +42,11 @@
40#define SHRREG shr 42#define SHRREG shr
41#define RP_OFFSET 20 43#define RP_OFFSET 20
42#define FRAME_SIZE 64 44#define FRAME_SIZE 64
43#define CALLEE_SAVE_FRAME_SIZE 128 45#define CALLEE_REG_FRAME_SIZE 128
44#endif 46#endif
45 47
48#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
49
46#ifdef CONFIG_PA20 50#ifdef CONFIG_PA20
47#define BL b,l 51#define BL b,l
48# ifdef CONFIG_64BIT 52# ifdef CONFIG_64BIT
@@ -300,9 +304,35 @@
300 fldd,mb -8(\regs), %fr0 304 fldd,mb -8(\regs), %fr0
301 .endm 305 .endm
302 306
307 .macro callee_save_float
308 fstd,ma %fr12, 8(%r30)
309 fstd,ma %fr13, 8(%r30)
310 fstd,ma %fr14, 8(%r30)
311 fstd,ma %fr15, 8(%r30)
312 fstd,ma %fr16, 8(%r30)
313 fstd,ma %fr17, 8(%r30)
314 fstd,ma %fr18, 8(%r30)
315 fstd,ma %fr19, 8(%r30)
316 fstd,ma %fr20, 8(%r30)
317 fstd,ma %fr21, 8(%r30)
318 .endm
319
320 .macro callee_rest_float
321 fldd,mb -8(%r30), %fr21
322 fldd,mb -8(%r30), %fr20
323 fldd,mb -8(%r30), %fr19
324 fldd,mb -8(%r30), %fr18
325 fldd,mb -8(%r30), %fr17
326 fldd,mb -8(%r30), %fr16
327 fldd,mb -8(%r30), %fr15
328 fldd,mb -8(%r30), %fr14
329 fldd,mb -8(%r30), %fr13
330 fldd,mb -8(%r30), %fr12
331 .endm
332
303#ifdef __LP64__ 333#ifdef __LP64__
304 .macro callee_save 334 .macro callee_save
305 std,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30) 335 std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
306 mfctl %cr27, %r3 336 mfctl %cr27, %r3
307 std %r4, -136(%r30) 337 std %r4, -136(%r30)
308 std %r5, -128(%r30) 338 std %r5, -128(%r30)
@@ -340,13 +370,13 @@
340 ldd -128(%r30), %r5 370 ldd -128(%r30), %r5
341 ldd -136(%r30), %r4 371 ldd -136(%r30), %r4
342 mtctl %r3, %cr27 372 mtctl %r3, %cr27
343 ldd,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3 373 ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
344 .endm 374 .endm
345 375
346#else /* ! __LP64__ */ 376#else /* ! __LP64__ */
347 377
348 .macro callee_save 378 .macro callee_save
349 stw,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30) 379 stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
350 mfctl %cr27, %r3 380 mfctl %cr27, %r3
351 stw %r4, -124(%r30) 381 stw %r4, -124(%r30)
352 stw %r5, -120(%r30) 382 stw %r5, -120(%r30)
@@ -384,7 +414,7 @@
384 ldw -120(%r30), %r5 414 ldw -120(%r30), %r5
385 ldw -124(%r30), %r4 415 ldw -124(%r30), %r4
386 mtctl %r3, %cr27 416 mtctl %r3, %cr27
387 ldw,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3 417 ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
388 .endm 418 .endm
389#endif /* ! __LP64__ */ 419#endif /* ! __LP64__ */
390 420
@@ -450,5 +480,30 @@
450 REST_CR (%cr22, PT_PSW (\regs)) 480 REST_CR (%cr22, PT_PSW (\regs))
451 .endm 481 .endm
452 482
483
484 /* First step to create a "relied upon translation"
485 * See PA 2.0 Arch. page F-4 and F-5.
486 *
487 * The ssm was originally necessary due to a "PCxT bug".
488 * But someone decided it needed to be added to the architecture
489 * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual.
490 * It's been carried forward into PA 2.0 Arch as well. :^(
491 *
492 * "ssm 0,%r0" is a NOP with side effects (prefetch barrier).
493 * rsm/ssm prevents the ifetch unit from speculatively fetching
494 * instructions past this line in the code stream.
495 * PA 2.0 processor will single step all insn in the same QUAD (4 insn).
496 */
497 .macro pcxt_ssm_bug
498 rsm PSW_SM_I,%r0
499 nop /* 1 */
500 nop /* 2 */
501 nop /* 3 */
502 nop /* 4 */
503 nop /* 5 */
504 nop /* 6 */
505 nop /* 7 */
506 .endm
507
453#endif /* __ASSEMBLY__ */ 508#endif /* __ASSEMBLY__ */
454#endif 509#endif
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 048a2c7fd0c0..983e9a2b6042 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -164,6 +164,26 @@ static __inline__ int atomic_read(const atomic_t *v)
164} 164}
165 165
166/* exported interface */ 166/* exported interface */
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168
169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
183 c = old; \
184 c != (u); \
185})
186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
167 187
168#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
169#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 189#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index af7db694b22d..55b98c67fd82 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -2,7 +2,7 @@
2#define _PARISC_BITOPS_H 2#define _PARISC_BITOPS_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/spinlock.h> 5#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
6#include <asm/byteorder.h> 6#include <asm/byteorder.h>
7#include <asm/atomic.h> 7#include <asm/atomic.h>
8 8
@@ -12,193 +12,157 @@
12 * to include/asm-i386/bitops.h or kerneldoc 12 * to include/asm-i386/bitops.h or kerneldoc
13 */ 13 */
14 14
15#ifdef __LP64__ 15#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
16# define SHIFT_PER_LONG 6
17#ifndef BITS_PER_LONG
18# define BITS_PER_LONG 64
19#endif
20#else
21# define SHIFT_PER_LONG 5
22#ifndef BITS_PER_LONG
23# define BITS_PER_LONG 32
24#endif
25#endif
26
27#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
28 16
29 17
30#define smp_mb__before_clear_bit() smp_mb() 18#define smp_mb__before_clear_bit() smp_mb()
31#define smp_mb__after_clear_bit() smp_mb() 19#define smp_mb__after_clear_bit() smp_mb()
32 20
33static __inline__ void set_bit(int nr, volatile unsigned long * address) 21/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
22 * on use of volatile and __*_bit() (set/clear/change):
23 * *_bit() want use of volatile.
24 * __*_bit() are "relaxed" and don't use spinlock or volatile.
25 */
26
27static __inline__ void set_bit(int nr, volatile unsigned long * addr)
34{ 28{
35 unsigned long mask; 29 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
36 unsigned long *addr = (unsigned long *) address;
37 unsigned long flags; 30 unsigned long flags;
38 31
39 addr += (nr >> SHIFT_PER_LONG); 32 addr += (nr >> SHIFT_PER_LONG);
40 mask = 1L << CHOP_SHIFTCOUNT(nr);
41 _atomic_spin_lock_irqsave(addr, flags); 33 _atomic_spin_lock_irqsave(addr, flags);
42 *addr |= mask; 34 *addr |= mask;
43 _atomic_spin_unlock_irqrestore(addr, flags); 35 _atomic_spin_unlock_irqrestore(addr, flags);
44} 36}
45 37
46static __inline__ void __set_bit(int nr, volatile unsigned long * address) 38static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
47{ 39{
48 unsigned long mask; 40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
49 unsigned long *addr = (unsigned long *) address;
50 41
51 addr += (nr >> SHIFT_PER_LONG); 42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
52 mask = 1L << CHOP_SHIFTCOUNT(nr);
53 *addr |= mask;
54} 43}
55 44
56static __inline__ void clear_bit(int nr, volatile unsigned long * address) 45static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
57{ 46{
58 unsigned long mask; 47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
59 unsigned long *addr = (unsigned long *) address;
60 unsigned long flags; 48 unsigned long flags;
61 49
62 addr += (nr >> SHIFT_PER_LONG); 50 addr += (nr >> SHIFT_PER_LONG);
63 mask = 1L << CHOP_SHIFTCOUNT(nr);
64 _atomic_spin_lock_irqsave(addr, flags); 51 _atomic_spin_lock_irqsave(addr, flags);
65 *addr &= ~mask; 52 *addr &= mask;
66 _atomic_spin_unlock_irqrestore(addr, flags); 53 _atomic_spin_unlock_irqrestore(addr, flags);
67} 54}
68 55
69static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address) 56static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
70{ 57{
71 unsigned long mask; 58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
72 unsigned long *addr = (unsigned long *) address;
73 59
74 addr += (nr >> SHIFT_PER_LONG); 60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
75 mask = 1L << CHOP_SHIFTCOUNT(nr);
76 *addr &= ~mask;
77} 61}
78 62
79static __inline__ void change_bit(int nr, volatile unsigned long * address) 63static __inline__ void change_bit(int nr, volatile unsigned long * addr)
80{ 64{
81 unsigned long mask; 65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
82 unsigned long *addr = (unsigned long *) address;
83 unsigned long flags; 66 unsigned long flags;
84 67
85 addr += (nr >> SHIFT_PER_LONG); 68 addr += (nr >> SHIFT_PER_LONG);
86 mask = 1L << CHOP_SHIFTCOUNT(nr);
87 _atomic_spin_lock_irqsave(addr, flags); 69 _atomic_spin_lock_irqsave(addr, flags);
88 *addr ^= mask; 70 *addr ^= mask;
89 _atomic_spin_unlock_irqrestore(addr, flags); 71 _atomic_spin_unlock_irqrestore(addr, flags);
90} 72}
91 73
92static __inline__ void __change_bit(int nr, volatile unsigned long * address) 74static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
93{ 75{
94 unsigned long mask; 76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
95 unsigned long *addr = (unsigned long *) address;
96 77
97 addr += (nr >> SHIFT_PER_LONG); 78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
98 mask = 1L << CHOP_SHIFTCOUNT(nr);
99 *addr ^= mask;
100} 79}
101 80
102static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address) 81static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
103{ 82{
104 unsigned long mask; 83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
105 unsigned long *addr = (unsigned long *) address; 84 unsigned long oldbit;
106 int oldbit;
107 unsigned long flags; 85 unsigned long flags;
108 86
109 addr += (nr >> SHIFT_PER_LONG); 87 addr += (nr >> SHIFT_PER_LONG);
110 mask = 1L << CHOP_SHIFTCOUNT(nr);
111 _atomic_spin_lock_irqsave(addr, flags); 88 _atomic_spin_lock_irqsave(addr, flags);
112 oldbit = (*addr & mask) ? 1 : 0; 89 oldbit = *addr;
113 *addr |= mask; 90 *addr = oldbit | mask;
114 _atomic_spin_unlock_irqrestore(addr, flags); 91 _atomic_spin_unlock_irqrestore(addr, flags);
115 92
116 return oldbit; 93 return (oldbit & mask) ? 1 : 0;
117} 94}
118 95
119static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) 96static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
120{ 97{
121 unsigned long mask; 98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
122 unsigned long *addr = (unsigned long *) address; 99 unsigned long oldbit;
123 int oldbit; 100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
124 101
125 addr += (nr >> SHIFT_PER_LONG); 102 oldbit = *addr;
126 mask = 1L << CHOP_SHIFTCOUNT(nr); 103 *addr = oldbit | mask;
127 oldbit = (*addr & mask) ? 1 : 0;
128 *addr |= mask;
129 104
130 return oldbit; 105 return (oldbit & mask) ? 1 : 0;
131} 106}
132 107
133static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address) 108static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
134{ 109{
135 unsigned long mask; 110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
136 unsigned long *addr = (unsigned long *) address; 111 unsigned long oldbit;
137 int oldbit;
138 unsigned long flags; 112 unsigned long flags;
139 113
140 addr += (nr >> SHIFT_PER_LONG); 114 addr += (nr >> SHIFT_PER_LONG);
141 mask = 1L << CHOP_SHIFTCOUNT(nr);
142 _atomic_spin_lock_irqsave(addr, flags); 115 _atomic_spin_lock_irqsave(addr, flags);
143 oldbit = (*addr & mask) ? 1 : 0; 116 oldbit = *addr;
144 *addr &= ~mask; 117 *addr = oldbit & ~mask;
145 _atomic_spin_unlock_irqrestore(addr, flags); 118 _atomic_spin_unlock_irqrestore(addr, flags);
146 119
147 return oldbit; 120 return (oldbit & mask) ? 1 : 0;
148} 121}
149 122
150static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) 123static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
151{ 124{
152 unsigned long mask; 125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *) address; 126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 int oldbit; 127 unsigned long oldbit;
155 128
156 addr += (nr >> SHIFT_PER_LONG); 129 oldbit = *addr;
157 mask = 1L << CHOP_SHIFTCOUNT(nr); 130 *addr = oldbit & ~mask;
158 oldbit = (*addr & mask) ? 1 : 0;
159 *addr &= ~mask;
160 131
161 return oldbit; 132 return (oldbit & mask) ? 1 : 0;
162} 133}
163 134
164static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address) 135static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
165{ 136{
166 unsigned long mask; 137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
167 unsigned long *addr = (unsigned long *) address; 138 unsigned long oldbit;
168 int oldbit;
169 unsigned long flags; 139 unsigned long flags;
170 140
171 addr += (nr >> SHIFT_PER_LONG); 141 addr += (nr >> SHIFT_PER_LONG);
172 mask = 1L << CHOP_SHIFTCOUNT(nr);
173 _atomic_spin_lock_irqsave(addr, flags); 142 _atomic_spin_lock_irqsave(addr, flags);
174 oldbit = (*addr & mask) ? 1 : 0; 143 oldbit = *addr;
175 *addr ^= mask; 144 *addr = oldbit ^ mask;
176 _atomic_spin_unlock_irqrestore(addr, flags); 145 _atomic_spin_unlock_irqrestore(addr, flags);
177 146
178 return oldbit; 147 return (oldbit & mask) ? 1 : 0;
179} 148}
180 149
181static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) 150static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
182{ 151{
183 unsigned long mask; 152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
184 unsigned long *addr = (unsigned long *) address; 153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
185 int oldbit; 154 unsigned long oldbit;
186 155
187 addr += (nr >> SHIFT_PER_LONG); 156 oldbit = *addr;
188 mask = 1L << CHOP_SHIFTCOUNT(nr); 157 *addr = oldbit ^ mask;
189 oldbit = (*addr & mask) ? 1 : 0;
190 *addr ^= mask;
191 158
192 return oldbit; 159 return (oldbit & mask) ? 1 : 0;
193} 160}
194 161
195static __inline__ int test_bit(int nr, const volatile unsigned long *address) 162static __inline__ int test_bit(int nr, const volatile unsigned long *address)
196{ 163{
197 unsigned long mask; 164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
198 const unsigned long *addr = (const unsigned long *)address; 165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
199
200 addr += (nr >> SHIFT_PER_LONG);
201 mask = 1L << CHOP_SHIFTCOUNT(nr);
202 166
203 return !!(*addr & mask); 167 return !!(*addr & mask);
204} 168}
@@ -229,7 +193,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
229 unsigned long ret; 193 unsigned long ret;
230 194
231 __asm__( 195 __asm__(
232#if BITS_PER_LONG > 32 196#ifdef __LP64__
233 " ldi 63,%1\n" 197 " ldi 63,%1\n"
234 " extrd,u,*<> %0,63,32,%%r0\n" 198 " extrd,u,*<> %0,63,32,%%r0\n"
235 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ 199 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
@@ -304,14 +268,7 @@ static __inline__ int fls(int x)
304 * hweightN: returns the hamming weight (i.e. the number 268 * hweightN: returns the hamming weight (i.e. the number
305 * of bits set) of a N-bit word 269 * of bits set) of a N-bit word
306 */ 270 */
307#define hweight64(x) \ 271#define hweight64(x) generic_hweight64(x)
308({ \
309 unsigned long __x = (x); \
310 unsigned int __w; \
311 __w = generic_hweight32((unsigned int) __x); \
312 __w += generic_hweight32((unsigned int) (__x>>32)); \
313 __w; \
314})
315#define hweight32(x) generic_hweight32(x) 272#define hweight32(x) generic_hweight32(x)
316#define hweight16(x) generic_hweight16(x) 273#define hweight16(x) generic_hweight16(x)
317#define hweight8(x) generic_hweight8(x) 274#define hweight8(x) generic_hweight8(x)
@@ -324,7 +281,13 @@ static __inline__ int fls(int x)
324 */ 281 */
325static inline int sched_find_first_bit(const unsigned long *b) 282static inline int sched_find_first_bit(const unsigned long *b)
326{ 283{
327#ifndef __LP64__ 284#ifdef __LP64__
285 if (unlikely(b[0]))
286 return __ffs(b[0]);
287 if (unlikely(b[1]))
288 return __ffs(b[1]) + 64;
289 return __ffs(b[2]) + 128;
290#else
328 if (unlikely(b[0])) 291 if (unlikely(b[0]))
329 return __ffs(b[0]); 292 return __ffs(b[0]);
330 if (unlikely(b[1])) 293 if (unlikely(b[1]))
@@ -334,14 +297,6 @@ static inline int sched_find_first_bit(const unsigned long *b)
334 if (b[3]) 297 if (b[3])
335 return __ffs(b[3]) + 96; 298 return __ffs(b[3]) + 96;
336 return __ffs(b[4]) + 128; 299 return __ffs(b[4]) + 128;
337#else
338 if (unlikely(b[0]))
339 return __ffs(b[0]);
340 if (unlikely(((unsigned int)b[1])))
341 return __ffs(b[1]) + 64;
342 if (b[1] >> 32)
343 return __ffs(b[1] >> 32) + 96;
344 return __ffs(b[2]) + 128;
345#endif 300#endif
346} 301}
347 302
@@ -391,7 +346,7 @@ found_middle:
391 346
392static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) 347static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
393{ 348{
394 const unsigned long *p = addr + (offset >> 6); 349 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
395 unsigned long result = offset & ~(BITS_PER_LONG-1); 350 unsigned long result = offset & ~(BITS_PER_LONG-1);
396 unsigned long tmp; 351 unsigned long tmp;
397 352
@@ -445,71 +400,90 @@ found_middle:
445 * test_and_{set,clear}_bit guarantee atomicity without 400 * test_and_{set,clear}_bit guarantee atomicity without
446 * disabling interrupts. 401 * disabling interrupts.
447 */ 402 */
448#ifdef __LP64__
449#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
450#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
451#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
452#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
453#else
454#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
455#define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
456#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
457#define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
458#endif
459 403
460#endif /* __KERNEL__ */ 404/* '3' is bits per byte */
405#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
461 406
462static __inline__ int ext2_test_bit(int nr, __const__ void * addr) 407#define ext2_test_bit(nr, addr) \
463{ 408 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
464 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; 409#define ext2_set_bit(nr, addr) \
410 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
411#define ext2_clear_bit(nr, addr) \
412 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
465 413
466 return (ADDR[nr >> 3] >> (nr & 7)) & 1; 414#define ext2_set_bit_atomic(l,nr,addr) \
467} 415 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
416#define ext2_clear_bit_atomic(l,nr,addr) \
417 test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
418
419#endif /* __KERNEL__ */
468 420
469/*
470 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
471 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
472 */
473 421
474#define ext2_find_first_zero_bit(addr, size) \ 422#define ext2_find_first_zero_bit(addr, size) \
475 ext2_find_next_zero_bit((addr), (size), 0) 423 ext2_find_next_zero_bit((addr), (size), 0)
476 424
477extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, 425/* include/linux/byteorder does not support "unsigned long" type */
478 unsigned long size, unsigned long offset) 426static inline unsigned long ext2_swabp(unsigned long * x)
479{ 427{
480 unsigned int *p = ((unsigned int *) addr) + (offset >> 5); 428#ifdef __LP64__
481 unsigned int result = offset & ~31UL; 429 return (unsigned long) __swab64p((u64 *) x);
482 unsigned int tmp; 430#else
431 return (unsigned long) __swab32p((u32 *) x);
432#endif
433}
434
435/* include/linux/byteorder doesn't support "unsigned long" type */
436static inline unsigned long ext2_swab(unsigned long y)
437{
438#ifdef __LP64__
439 return (unsigned long) __swab64((u64) y);
440#else
441 return (unsigned long) __swab32((u32) y);
442#endif
443}
444
445static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
446{
447 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
448 unsigned long result = offset & ~(BITS_PER_LONG - 1);
449 unsigned long tmp;
483 450
484 if (offset >= size) 451 if (offset >= size)
485 return size; 452 return size;
486 size -= result; 453 size -= result;
487 offset &= 31UL; 454 offset &= (BITS_PER_LONG - 1UL);
488 if (offset) { 455 if (offset) {
489 tmp = cpu_to_le32p(p++); 456 tmp = ext2_swabp(p++);
490 tmp |= ~0UL >> (32-offset); 457 tmp |= (~0UL >> (BITS_PER_LONG - offset));
491 if (size < 32) 458 if (size < BITS_PER_LONG)
492 goto found_first; 459 goto found_first;
493 if (tmp != ~0U) 460 if (~tmp)
494 goto found_middle; 461 goto found_middle;
495 size -= 32; 462 size -= BITS_PER_LONG;
496 result += 32; 463 result += BITS_PER_LONG;
497 } 464 }
498 while (size >= 32) { 465
499 if ((tmp = cpu_to_le32p(p++)) != ~0U) 466 while (size & ~(BITS_PER_LONG - 1)) {
500 goto found_middle; 467 if (~(tmp = *(p++)))
501 result += 32; 468 goto found_middle_swap;
502 size -= 32; 469 result += BITS_PER_LONG;
470 size -= BITS_PER_LONG;
503 } 471 }
504 if (!size) 472 if (!size)
505 return result; 473 return result;
506 tmp = cpu_to_le32p(p); 474 tmp = ext2_swabp(p);
507found_first: 475found_first:
508 tmp |= ~0U << size; 476 tmp |= ~0UL << size;
477 if (tmp == ~0UL) /* Are any bits zero? */
478 return result + size; /* Nope. Skip ffz */
509found_middle: 479found_middle:
510 return result + ffz(tmp); 480 return result + ffz(tmp);
481
482found_middle_swap:
483 return result + ffz(ext2_swab(tmp));
511} 484}
512 485
486
513/* Bitmap functions for the minix filesystem. */ 487/* Bitmap functions for the minix filesystem. */
514#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) 488#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
515#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) 489#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index aa592d8c0e39..1bc3c83ee74b 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -100,30 +100,34 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
100 100
101/* Simple function to work out if we have an existing address translation 101/* Simple function to work out if we have an existing address translation
102 * for a user space vma. */ 102 * for a user space vma. */
103static inline pte_t *__translation_exists(struct mm_struct *mm, 103static inline int translation_exists(struct vm_area_struct *vma,
104 unsigned long addr) 104 unsigned long addr, unsigned long pfn)
105{ 105{
106 pgd_t *pgd = pgd_offset(mm, addr); 106 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
107 pmd_t *pmd; 107 pmd_t *pmd;
108 pte_t *pte; 108 pte_t pte;
109 109
110 if(pgd_none(*pgd)) 110 if(pgd_none(*pgd))
111 return NULL; 111 return 0;
112 112
113 pmd = pmd_offset(pgd, addr); 113 pmd = pmd_offset(pgd, addr);
114 if(pmd_none(*pmd) || pmd_bad(*pmd)) 114 if(pmd_none(*pmd) || pmd_bad(*pmd))
115 return NULL; 115 return 0;
116 116
117 pte = pte_offset_map(pmd, addr); 117 /* We cannot take the pte lock here: flush_cache_page is usually
118 * called with pte lock already held. Whereas flush_dcache_page
119 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
120 * the vma itself is secure, but the pte might come or go racily.
121 */
122 pte = *pte_offset_map(pmd, addr);
123 /* But pte_unmap() does nothing on this architecture */
118 124
119 /* The PA flush mappings show up as pte_none, but they're 125 /* Filter out coincidental file entries and swap entries */
120 * valid none the less */ 126 if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
121 if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0)) 127 return 0;
122 return NULL;
123 return pte;
124}
125#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
126 128
129 return pte_pfn(pte) == pfn;
130}
127 131
128/* Private function to flush a page from the cache of a non-current 132/* Private function to flush a page from the cache of a non-current
129 * process. cr25 contains the Page Directory of the current user 133 * process. cr25 contains the Page Directory of the current user
@@ -175,9 +179,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
175{ 179{
176 BUG_ON(!vma->vm_mm->context); 180 BUG_ON(!vma->vm_mm->context);
177 181
178 if(likely(translation_exists(vma, vmaddr))) 182 if (likely(translation_exists(vma, vmaddr, pfn)))
179 __flush_cache_page(vma, vmaddr); 183 __flush_cache_page(vma, vmaddr);
180 184
181} 185}
182#endif 186#endif
183
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 4db84f969e9e..74d4ac6f2151 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -9,8 +9,8 @@
9/* See Documentation/DMA-mapping.txt */ 9/* See Documentation/DMA-mapping.txt */
10struct hppa_dma_ops { 10struct hppa_dma_ops {
11 int (*dma_supported)(struct device *dev, u64 mask); 11 int (*dma_supported)(struct device *dev, u64 mask);
12 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, int flag); 12 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
13 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, int flag); 13 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
14 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova); 14 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
15 dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction); 15 dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
16 void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction); 16 void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
@@ -49,14 +49,14 @@ extern struct hppa_dma_ops *hppa_dma_ops;
49 49
50static inline void * 50static inline void *
51dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 51dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
52 int flag) 52 gfp_t flag)
53{ 53{
54 return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag); 54 return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
55} 55}
56 56
57static inline void * 57static inline void *
58dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 58dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
59 int flag) 59 gfp_t flag)
60{ 60{
61 return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag); 61 return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
62} 62}
diff --git a/include/asm-parisc/errno.h b/include/asm-parisc/errno.h
index 08464c405471..e2f3ddc796be 100644
--- a/include/asm-parisc/errno.h
+++ b/include/asm-parisc/errno.h
@@ -114,6 +114,7 @@
114 114
115#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */ 115#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
116#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */ 116#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
117#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
117 118
118/* for robust mutexes */ 119/* for robust mutexes */
119#define EOWNERDEAD 254 /* Owner died */ 120#define EOWNERDEAD 254 /* Owner died */
diff --git a/include/asm-parisc/grfioctl.h b/include/asm-parisc/grfioctl.h
index d3cfc0168fb1..6a910311b56b 100644
--- a/include/asm-parisc/grfioctl.h
+++ b/include/asm-parisc/grfioctl.h
@@ -69,6 +69,8 @@
69#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */ 69#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
70#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */ 70#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
71#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */ 71#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
72#define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */
73#define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */
72 74
73/* structure for ioctl(GCDESCRIBE) */ 75/* structure for ioctl(GCDESCRIBE) */
74 76
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h
index 3243cf2cd227..b27bf7aeb256 100644
--- a/include/asm-parisc/ide.h
+++ b/include/asm-parisc/ide.h
@@ -22,7 +22,6 @@
22 22
23#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) 23#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
24#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) 24#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
25#define ide_check_region(from,extent) check_region((from), (extent))
26#define ide_request_region(from,extent,name) request_region((from), (extent), (name)) 25#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
27#define ide_release_region(from,extent) release_region((from), (extent)) 26#define ide_release_region(from,extent) release_region((from), (extent))
28/* Generic I/O and MEMIO string operations. */ 27/* Generic I/O and MEMIO string operations. */
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf22056..b0a30e2c9813 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
8#define _ASM_PARISC_IRQ_H 8#define _ASM_PARISC_IRQ_H
9 9
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/cpumask.h>
11#include <asm/types.h> 12#include <asm/types.h>
12 13
13#define NO_IRQ (-1) 14#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
49extern int txn_claim_irq(int); 50extern int txn_claim_irq(int);
50extern unsigned int txn_alloc_data(unsigned int); 51extern unsigned int txn_alloc_data(unsigned int);
51extern unsigned long txn_alloc_addr(unsigned int); 52extern unsigned long txn_alloc_addr(unsigned int);
53extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
52 54
53extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); 55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
54 56extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
56 57
57/* soft power switch support (power.c) */ 58/* soft power switch support (power.c) */
58extern struct tasklet_struct power_tasklet; 59extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/led.h b/include/asm-parisc/led.h
index 1ac8ab6c580d..efadfd543ec6 100644
--- a/include/asm-parisc/led.h
+++ b/include/asm-parisc/led.h
@@ -23,9 +23,6 @@
23 23
24#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ 24#define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */
25 25
26/* led tasklet struct */
27extern struct tasklet_struct led_tasklet;
28
29/* register_led_driver() */ 26/* register_led_driver() */
30int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); 27int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg);
31 28
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index 595d3dce120a..ae039f4fd711 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -27,12 +27,6 @@ extern struct node_map_data node_data[];
27}) 27})
28#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid)) 28#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
29 29
30#define local_mapnr(kvaddr) \
31({ \
32 unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
33 (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
34})
35
36#define pfn_to_page(pfn) \ 30#define pfn_to_page(pfn) \
37({ \ 31({ \
38 unsigned long __pfn = (pfn); \ 32 unsigned long __pfn = (pfn); \
diff --git a/include/asm-parisc/parisc-device.h b/include/asm-parisc/parisc-device.h
index ef69ab4b17a9..1d247e32a608 100644
--- a/include/asm-parisc/parisc-device.h
+++ b/include/asm-parisc/parisc-device.h
@@ -1,7 +1,7 @@
1#include <linux/device.h> 1#include <linux/device.h>
2 2
3struct parisc_device { 3struct parisc_device {
4 unsigned long hpa; /* Hard Physical Address */ 4 struct resource hpa; /* Hard Physical Address */
5 struct parisc_device_id id; 5 struct parisc_device_id id;
6 struct parisc_driver *driver; /* Driver for this device */ 6 struct parisc_driver *driver; /* Driver for this device */
7 char name[80]; /* The hardware description */ 7 char name[80]; /* The hardware description */
@@ -39,6 +39,11 @@ struct parisc_driver {
39#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) 39#define to_parisc_driver(d) container_of(d, struct parisc_driver, drv)
40#define parisc_parent(d) to_parisc_device(d->dev.parent) 40#define parisc_parent(d) to_parisc_device(d->dev.parent)
41 41
42static inline char *parisc_pathname(struct parisc_device *d)
43{
44 return d->dev.bus_id;
45}
46
42static inline void 47static inline void
43parisc_set_drvdata(struct parisc_device *d, void *p) 48parisc_set_drvdata(struct parisc_device *d, void *p)
44{ 49{
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index d0b761f690b5..fa39d07d49e9 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -69,7 +69,7 @@ struct pci_hba_data {
69#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS) 69#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
70#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1)) 70#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
71 71
72#if CONFIG_64BIT 72#ifdef CONFIG_64BIT
73#define PCI_F_EXTEND 0xffffffff00000000UL 73#define PCI_F_EXTEND 0xffffffff00000000UL
74#define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a) 74#define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a)
75 75
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 820c6e712cd7..b4554711c3e7 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/mm.h> /* for vm_area_struct */
15#include <asm/processor.h> 16#include <asm/processor.h>
16#include <asm/cache.h> 17#include <asm/cache.h>
17#include <asm/bitops.h> 18#include <asm/bitops.h>
@@ -418,7 +419,6 @@ extern void paging_init (void);
418 419
419#define PG_dcache_dirty PG_arch_1 420#define PG_dcache_dirty PG_arch_1
420 421
421struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
422extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 422extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
423 423
424/* Encode and de-code a swap entry */ 424/* Encode and de-code a swap entry */
@@ -464,6 +464,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
464 464
465extern spinlock_t pa_dbit_lock; 465extern spinlock_t pa_dbit_lock;
466 466
467struct mm_struct;
467static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 468static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
468{ 469{
469 pte_t old_pte; 470 pte_t old_pte;
@@ -501,6 +502,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
501#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 502#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
502 remap_pfn_range(vma, vaddr, pfn, size, prot) 503 remap_pfn_range(vma, vaddr, pfn, size, prot)
503 504
505#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
506
504#define MK_IOSPACE_PFN(space, pfn) (pfn) 507#define MK_IOSPACE_PFN(space, pfn) (pfn)
505#define GET_IOSPACE(pfn) 0 508#define GET_IOSPACE(pfn) 0
506#define GET_PFN(pfn) (pfn) 509#define GET_PFN(pfn) (pfn)
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h
index a9dfadd05658..aae40e8c3aa8 100644
--- a/include/asm-parisc/processor.h
+++ b/include/asm-parisc/processor.h
@@ -122,8 +122,27 @@ struct thread_struct {
122}; 122};
123 123
124/* Thread struct flags. */ 124/* Thread struct flags. */
125#define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */
126#define PARISC_UAC_SIGBUS (1UL << 1)
125#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */ 127#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
126 128
129#define PARISC_UAC_SHIFT 0
130#define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS)
131
132#define SET_UNALIGN_CTL(task,value) \
133 ({ \
134 (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \
135 | (((value) << PARISC_UAC_SHIFT) & \
136 PARISC_UAC_MASK)); \
137 0; \
138 })
139
140#define GET_UNALIGN_CTL(task,addr) \
141 ({ \
142 put_user(((task)->thread.flags & PARISC_UAC_MASK) \
143 >> PARISC_UAC_SHIFT, (int __user *) (addr)); \
144 })
145
127#define INIT_THREAD { \ 146#define INIT_THREAD { \
128 regs: { gr: { 0, }, \ 147 regs: { gr: { 0, }, \
129 fr: { 0, }, \ 148 fr: { 0, }, \
diff --git a/include/asm-parisc/psw.h b/include/asm-parisc/psw.h
index 51323029f377..4334d6ca2add 100644
--- a/include/asm-parisc/psw.h
+++ b/include/asm-parisc/psw.h
@@ -1,4 +1,7 @@
1#ifndef _PARISC_PSW_H 1#ifndef _PARISC_PSW_H
2
3#include <linux/config.h>
4
2#define PSW_I 0x00000001 5#define PSW_I 0x00000001
3#define PSW_D 0x00000002 6#define PSW_D 0x00000002
4#define PSW_P 0x00000004 7#define PSW_P 0x00000004
@@ -9,6 +12,16 @@
9#define PSW_G 0x00000040 /* PA1.x only */ 12#define PSW_G 0x00000040 /* PA1.x only */
10#define PSW_O 0x00000080 /* PA2.0 only */ 13#define PSW_O 0x00000080 /* PA2.0 only */
11 14
15/* ssm/rsm instructions number PSW_W and PSW_E differently */
16#define PSW_SM_I PSW_I /* Enable External Interrupts */
17#define PSW_SM_D PSW_D
18#define PSW_SM_P PSW_P
19#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
20#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
21#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
22
23#define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I
24
12#define PSW_CB 0x0000ff00 25#define PSW_CB 0x0000ff00
13 26
14#define PSW_M 0x00010000 27#define PSW_M 0x00010000
@@ -30,33 +43,21 @@
30#define PSW_Z 0x40000000 /* PA1.x only */ 43#define PSW_Z 0x40000000 /* PA1.x only */
31#define PSW_Y 0x80000000 /* PA1.x only */ 44#define PSW_Y 0x80000000 /* PA1.x only */
32 45
33#ifdef __LP64__ 46#ifdef CONFIG_64BIT
34#define PSW_HI_CB 0x000000ff /* PA2.0 only */ 47# define PSW_HI_CB 0x000000ff /* PA2.0 only */
35#endif 48#endif
36 49
37/* PSW bits to be used with ssm/rsm */ 50#ifdef CONFIG_64BIT
38#define PSW_SM_I 0x1 51# define USER_PSW_HI_MASK PSW_HI_CB
39#define PSW_SM_D 0x2 52# define WIDE_PSW PSW_W
40#define PSW_SM_P 0x4 53#else
41#define PSW_SM_Q 0x8 54# define WIDE_PSW 0
42#define PSW_SM_R 0x10
43#define PSW_SM_F 0x20
44#define PSW_SM_G 0x40
45#define PSW_SM_O 0x80
46#define PSW_SM_E 0x100
47#define PSW_SM_W 0x200
48
49#ifdef __LP64__
50# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
51# define KERNEL_PSW (PSW_W | PSW_C | PSW_Q | PSW_P | PSW_D)
52# define REAL_MODE_PSW (PSW_W | PSW_Q)
53# define USER_PSW_MASK (PSW_W | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
54# define USER_PSW_HI_MASK (PSW_HI_CB)
55#else
56# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
57# define KERNEL_PSW (PSW_C | PSW_Q | PSW_P | PSW_D)
58# define REAL_MODE_PSW (PSW_Q)
59# define USER_PSW_MASK (PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
60#endif 55#endif
61 56
57/* Used when setting up for rfi */
58#define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D)
59#define REAL_MODE_PSW (WIDE_PSW | PSW_Q)
60#define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
61#define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
62
62#endif 63#endif
diff --git a/include/asm-parisc/ptrace.h b/include/asm-parisc/ptrace.h
index 3f428aa371a4..93f990e418f1 100644
--- a/include/asm-parisc/ptrace.h
+++ b/include/asm-parisc/ptrace.h
@@ -49,7 +49,7 @@ struct pt_regs {
49#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) 49#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
50#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) 50#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
51#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) 51#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
52#define profile_pc(regs) instruction_pointer(regs) 52unsigned long profile_pc(struct pt_regs *);
53extern void show_regs(struct pt_regs *); 53extern void show_regs(struct pt_regs *);
54#endif 54#endif
55 55
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index f78bb2e34538..c9ee41cd0707 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -49,9 +49,6 @@ struct semaphore {
49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
50} 50}
51 51
52#define __MUTEX_INITIALIZER(name) \
53 __SEMAPHORE_INITIALIZER(name,1)
54
55#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 52#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
56 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 53 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
57 54
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540b..dbdbd2e9fdf9 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
29#define cpu_logical_map(cpu) (cpu) 29#define cpu_logical_map(cpu) (cpu)
30 30
31extern void smp_send_reschedule(int cpu); 31extern void smp_send_reschedule(int cpu);
32extern void smp_send_all_nop(void);
32 33
33#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
34 35
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
53 54
54#define raw_smp_processor_id() (current_thread_info()->cpu) 55#define raw_smp_processor_id() (current_thread_info()->cpu)
55 56
56#endif /* CONFIG_SMP */ 57#else /* CONFIG_SMP */
58
59static inline void smp_send_all_nop(void) { return; }
60
61#endif
57 62
58#define NO_PROC_ID 0xFF /* No processor magic marker */ 63#define NO_PROC_ID 0xFF /* No processor magic marker */
59#define ANY_PROC_ID 0xFF /* Any processor magic marker */ 64#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 43eaa6e742e0..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -5,29 +5,31 @@
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/spinlock_types.h> 6#include <asm/spinlock_types.h>
7 7
8/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
9 * since it only has load-and-zero. Moreover, at least on some PA processors,
10 * the semaphore address has to be 16-byte aligned.
11 */
12
13static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8static inline int __raw_spin_is_locked(raw_spinlock_t *x)
14{ 9{
15 volatile unsigned int *a = __ldcw_align(x); 10 volatile unsigned int *a = __ldcw_align(x);
16 return *a == 0; 11 return *a == 0;
17} 12}
18 13
19#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
20#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
21 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
22 17
23static inline void __raw_spin_lock(raw_spinlock_t *x) 18static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
19 unsigned long flags)
24{ 20{
25 volatile unsigned int *a; 21 volatile unsigned int *a;
26 22
27 mb(); 23 mb();
28 a = __ldcw_align(x); 24 a = __ldcw_align(x);
29 while (__ldcw(a) == 0) 25 while (__ldcw(a) == 0)
30 while (*a == 0); 26 while (*a == 0)
27 if (flags & PSW_SM_I) {
28 local_irq_enable();
29 cpu_relax();
30 local_irq_disable();
31 } else
32 cpu_relax();
31 mb(); 33 mb();
32} 34}
33 35
@@ -65,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
65 67
66static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 68static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
67{ 69{
68 unsigned long flags;
69 local_irq_save(flags);
70 __raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
71 71
72 rw->counter++; 72 rw->counter++;
73 73
74 __raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
75 local_irq_restore(flags);
76} 75}
77 76
78static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
79{ 78{
80 unsigned long flags;
81 local_irq_save(flags);
82 __raw_spin_lock(&rw->lock); 79 __raw_spin_lock(&rw->lock);
83 80
84 rw->counter--; 81 rw->counter--;
85 82
86 __raw_spin_unlock(&rw->lock); 83 __raw_spin_unlock(&rw->lock);
87 local_irq_restore(flags);
88} 84}
89 85
90/* write_lock is less trivial. We optimistically grab the lock and check 86/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h
index 785bba822fbf..d6b479bdb886 100644
--- a/include/asm-parisc/spinlock_types.h
+++ b/include/asm-parisc/spinlock_types.h
@@ -6,11 +6,15 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9#ifdef CONFIG_PA20
10 volatile unsigned int slock;
11# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
12#else
9 volatile unsigned int lock[4]; 13 volatile unsigned int lock[4];
14# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
15#endif
10} raw_spinlock_t; 16} raw_spinlock_t;
11 17
12#define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
13
14typedef struct { 18typedef struct {
15 raw_spinlock_t lock; 19 raw_spinlock_t lock;
16 volatile int counter; 20 volatile int counter;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 26ff844a21c1..f3928d3a80cb 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -138,13 +138,7 @@ static inline void set_eiem(unsigned long val)
138#define set_wmb(var, value) do { var = value; wmb(); } while (0) 138#define set_wmb(var, value) do { var = value; wmb(); } while (0)
139 139
140 140
141/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ 141#ifndef CONFIG_PA20
142#define __ldcw(a) ({ \
143 unsigned __ret; \
144 __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
145 __ret; \
146})
147
148/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, 142/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
149 and GCC only guarantees 8-byte alignment for stack locals, we can't 143 and GCC only guarantees 8-byte alignment for stack locals, we can't
150 be assured of 16-byte alignment for atomic lock data even if we 144 be assured of 16-byte alignment for atomic lock data even if we
@@ -152,37 +146,41 @@ static inline void set_eiem(unsigned long val)
152 we use a struct containing an array of four ints for the atomic lock 146 we use a struct containing an array of four ints for the atomic lock
153 type and dynamically select the 16-byte aligned int from the array 147 type and dynamically select the 16-byte aligned int from the array
154 for the semaphore. */ 148 for the semaphore. */
149
155#define __PA_LDCW_ALIGNMENT 16 150#define __PA_LDCW_ALIGNMENT 16
156#define __ldcw_align(a) ({ \ 151#define __ldcw_align(a) ({ \
157 unsigned long __ret = (unsigned long) &(a)->lock[0]; \ 152 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
158 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ 153 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
159 (volatile unsigned int *) __ret; \ 154 (volatile unsigned int *) __ret; \
160}) 155})
156#define LDCW "ldcw"
161 157
162#ifdef CONFIG_SMP 158#else /*CONFIG_PA20*/
163# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) 159/* From: "Jim Hull" <jim.hull of hp.com>
164#endif 160 I've attached a summary of the change, but basically, for PA 2.0, as
161 long as the ",CO" (coherent operation) completer is specified, then the
162 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
163 they only require "natural" alignment (4-byte for ldcw, 8-byte for
164 ldcd). */
165 165
166#define KERNEL_START (0x10100000 - 0x1000) 166#define __PA_LDCW_ALIGNMENT 4
167#define __ldcw_align(a) ((volatile unsigned int *)a)
168#define LDCW "ldcw,co"
167 169
168/* This is for the serialisation of PxTLB broadcasts. At least on the 170#endif /*!CONFIG_PA20*/
169 * N class systems, only one PxTLB inter processor broadcast can be
170 * active at any one time on the Merced bus. This tlb purge
171 * synchronisation is fairly lightweight and harmless so we activate
172 * it on all SMP systems not just the N class. */
173#ifdef CONFIG_SMP
174extern spinlock_t pa_tlb_lock;
175 171
176#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) 172/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
177#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 173#define __ldcw(a) ({ \
178 174 unsigned __ret; \
179#else 175 __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \
180 176 __ret; \
181#define purge_tlb_start(x) do { } while(0) 177})
182#define purge_tlb_end(x) do { } while (0)
183 178
179#ifdef CONFIG_SMP
180# define __lock_aligned __attribute__((__section__(".data.lock_aligned")))
184#endif 181#endif
185 182
183#define KERNEL_START (0x10100000 - 0x1000)
186#define arch_align_stack(x) (x) 184#define arch_align_stack(x) (x)
187 185
188#endif 186#endif
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index eb27b78930e8..c9ec39c6fc6c 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -7,6 +7,20 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <asm/mmu_context.h> 8#include <asm/mmu_context.h>
9 9
10
11/* This is for the serialisation of PxTLB broadcasts. At least on the
12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all SMP systems not just the N class. We also need to have
16 * preemption disabled on uniprocessor machines, and spin_lock does that
17 * nicely.
18 */
19extern spinlock_t pa_tlb_lock;
20
21#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
22#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
23
10extern void flush_tlb_all(void); 24extern void flush_tlb_all(void);
11 25
12/* 26/*
@@ -64,29 +78,25 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
64{ 78{
65 unsigned long npages; 79 unsigned long npages;
66 80
67
68 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 81 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
69 if (npages >= 512) /* XXX arbitrary, should be tuned */ 82 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
70 flush_tlb_all(); 83 flush_tlb_all();
71 else { 84 else {
72
73 mtsp(vma->vm_mm->context,1); 85 mtsp(vma->vm_mm->context,1);
86 purge_tlb_start();
74 if (split_tlb) { 87 if (split_tlb) {
75 purge_tlb_start();
76 while (npages--) { 88 while (npages--) {
77 pdtlb(start); 89 pdtlb(start);
78 pitlb(start); 90 pitlb(start);
79 start += PAGE_SIZE; 91 start += PAGE_SIZE;
80 } 92 }
81 purge_tlb_end();
82 } else { 93 } else {
83 purge_tlb_start();
84 while (npages--) { 94 while (npages--) {
85 pdtlb(start); 95 pdtlb(start);
86 start += PAGE_SIZE; 96 start += PAGE_SIZE;
87 } 97 }
88 purge_tlb_end();
89 } 98 }
99 purge_tlb_end();
90 } 100 }
91} 101}
92 102
diff --git a/include/asm-parisc/types.h b/include/asm-parisc/types.h
index d21b9d0d63ea..34fdce361a5a 100644
--- a/include/asm-parisc/types.h
+++ b/include/asm-parisc/types.h
@@ -33,8 +33,10 @@ typedef unsigned long long __u64;
33 33
34#ifdef __LP64__ 34#ifdef __LP64__
35#define BITS_PER_LONG 64 35#define BITS_PER_LONG 64
36#define SHIFT_PER_LONG 6
36#else 37#else
37#define BITS_PER_LONG 32 38#define BITS_PER_LONG 32
39#define SHIFT_PER_LONG 5
38#endif 40#endif
39 41
40#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h
index 6a9f0cadff58..80b7b98c70a1 100644
--- a/include/asm-parisc/unistd.h
+++ b/include/asm-parisc/unistd.h
@@ -687,8 +687,8 @@
687#define __NR_shmget (__NR_Linux + 194) 687#define __NR_shmget (__NR_Linux + 194)
688#define __NR_shmctl (__NR_Linux + 195) 688#define __NR_shmctl (__NR_Linux + 195)
689 689
690#define __NR_getpmsg (__NR_Linux + 196) /* some people actually want streams */ 690#define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */
691#define __NR_putpmsg (__NR_Linux + 197) /* some people actually want streams */ 691#define __NR_putpmsg (__NR_Linux + 197)
692 692
693#define __NR_lstat64 (__NR_Linux + 198) 693#define __NR_lstat64 (__NR_Linux + 198)
694#define __NR_truncate64 (__NR_Linux + 199) 694#define __NR_truncate64 (__NR_Linux + 199)
@@ -755,8 +755,14 @@
755#define __NR_mbind (__NR_Linux + 260) 755#define __NR_mbind (__NR_Linux + 260)
756#define __NR_get_mempolicy (__NR_Linux + 261) 756#define __NR_get_mempolicy (__NR_Linux + 261)
757#define __NR_set_mempolicy (__NR_Linux + 262) 757#define __NR_set_mempolicy (__NR_Linux + 262)
758#define __NR_vserver (__NR_Linux + 263)
759#define __NR_add_key (__NR_Linux + 264)
760#define __NR_request_key (__NR_Linux + 265)
761#define __NR_keyctl (__NR_Linux + 266)
762#define __NR_ioprio_set (__NR_Linux + 267)
763#define __NR_ioprio_get (__NR_Linux + 268)
758 764
759#define __NR_Linux_syscalls 263 765#define __NR_Linux_syscalls 269
760 766
761#define HPUX_GATEWAY_ADDR 0xC0000004 767#define HPUX_GATEWAY_ADDR 0xC0000004
762#define LINUX_GATEWAY_ADDR 0x100 768#define LINUX_GATEWAY_ADDR 0x100
@@ -807,10 +813,10 @@
807#define K_INLINE_SYSCALL(name, nr, args...) ({ \ 813#define K_INLINE_SYSCALL(name, nr, args...) ({ \
808 long __sys_res; \ 814 long __sys_res; \
809 { \ 815 { \
810 register unsigned long __res asm("r28"); \ 816 register unsigned long __res __asm__("r28"); \
811 K_LOAD_ARGS_##nr(args) \ 817 K_LOAD_ARGS_##nr(args) \
812 /* FIXME: HACK stw/ldw r19 around syscall */ \ 818 /* FIXME: HACK stw/ldw r19 around syscall */ \
813 asm volatile( \ 819 __asm__ volatile( \
814 K_STW_ASM_PIC \ 820 K_STW_ASM_PIC \
815 " ble 0x100(%%sr2, %%r0)\n" \ 821 " ble 0x100(%%sr2, %%r0)\n" \
816 " ldi %1, %%r20\n" \ 822 " ldi %1, %%r20\n" \
@@ -1005,7 +1011,6 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
1005 struct pt_regs *regs); 1011 struct pt_regs *regs);
1006int sys_vfork(struct pt_regs *regs); 1012int sys_vfork(struct pt_regs *regs);
1007int sys_pipe(int *fildes); 1013int sys_pipe(int *fildes);
1008long sys_ptrace(long request, pid_t pid, long addr, long data);
1009struct sigaction; 1014struct sigaction;
1010asmlinkage long sys_rt_sigaction(int sig, 1015asmlinkage long sys_rt_sigaction(int sig,
1011 const struct sigaction __user *act, 1016 const struct sigaction __user *act,