diff options
Diffstat (limited to 'include/asm-parisc')
-rw-r--r-- | include/asm-parisc/assembly.h | 71 | ||||
-rw-r--r-- | include/asm-parisc/bitops.h | 290 | ||||
-rw-r--r-- | include/asm-parisc/errno.h | 1 | ||||
-rw-r--r-- | include/asm-parisc/grfioctl.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/led.h | 3 | ||||
-rw-r--r-- | include/asm-parisc/parisc-device.h | 7 | ||||
-rw-r--r-- | include/asm-parisc/pci.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/pgtable.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/processor.h | 19 | ||||
-rw-r--r-- | include/asm-parisc/psw.h | 51 | ||||
-rw-r--r-- | include/asm-parisc/ptrace.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/spinlock.h | 5 | ||||
-rw-r--r-- | include/asm-parisc/spinlock_types.h | 8 | ||||
-rw-r--r-- | include/asm-parisc/system.h | 48 | ||||
-rw-r--r-- | include/asm-parisc/tlbflush.h | 29 | ||||
-rw-r--r-- | include/asm-parisc/types.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/unistd.h | 16 |
17 files changed, 318 insertions, 240 deletions
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h index 30b023411fef..3ce3440d1b0c 100644 --- a/include/asm-parisc/assembly.h +++ b/include/asm-parisc/assembly.h | |||
@@ -21,7 +21,9 @@ | |||
21 | #ifndef _PARISC_ASSEMBLY_H | 21 | #ifndef _PARISC_ASSEMBLY_H |
22 | #define _PARISC_ASSEMBLY_H | 22 | #define _PARISC_ASSEMBLY_H |
23 | 23 | ||
24 | #ifdef __LP64__ | 24 | #define CALLEE_FLOAT_FRAME_SIZE 80 |
25 | |||
26 | #ifdef CONFIG_64BIT | ||
25 | #define LDREG ldd | 27 | #define LDREG ldd |
26 | #define STREG std | 28 | #define STREG std |
27 | #define LDREGX ldd,s | 29 | #define LDREGX ldd,s |
@@ -30,8 +32,8 @@ | |||
30 | #define SHRREG shrd | 32 | #define SHRREG shrd |
31 | #define RP_OFFSET 16 | 33 | #define RP_OFFSET 16 |
32 | #define FRAME_SIZE 128 | 34 | #define FRAME_SIZE 128 |
33 | #define CALLEE_SAVE_FRAME_SIZE 144 | 35 | #define CALLEE_REG_FRAME_SIZE 144 |
34 | #else | 36 | #else /* CONFIG_64BIT */ |
35 | #define LDREG ldw | 37 | #define LDREG ldw |
36 | #define STREG stw | 38 | #define STREG stw |
37 | #define LDREGX ldwx,s | 39 | #define LDREGX ldwx,s |
@@ -40,9 +42,11 @@ | |||
40 | #define SHRREG shr | 42 | #define SHRREG shr |
41 | #define RP_OFFSET 20 | 43 | #define RP_OFFSET 20 |
42 | #define FRAME_SIZE 64 | 44 | #define FRAME_SIZE 64 |
43 | #define CALLEE_SAVE_FRAME_SIZE 128 | 45 | #define CALLEE_REG_FRAME_SIZE 128 |
44 | #endif | 46 | #endif |
45 | 47 | ||
48 | #define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE) | ||
49 | |||
46 | #ifdef CONFIG_PA20 | 50 | #ifdef CONFIG_PA20 |
47 | #define BL b,l | 51 | #define BL b,l |
48 | # ifdef CONFIG_64BIT | 52 | # ifdef CONFIG_64BIT |
@@ -300,9 +304,35 @@ | |||
300 | fldd,mb -8(\regs), %fr0 | 304 | fldd,mb -8(\regs), %fr0 |
301 | .endm | 305 | .endm |
302 | 306 | ||
307 | .macro callee_save_float | ||
308 | fstd,ma %fr12, 8(%r30) | ||
309 | fstd,ma %fr13, 8(%r30) | ||
310 | fstd,ma %fr14, 8(%r30) | ||
311 | fstd,ma %fr15, 8(%r30) | ||
312 | fstd,ma %fr16, 8(%r30) | ||
313 | fstd,ma %fr17, 8(%r30) | ||
314 | fstd,ma %fr18, 8(%r30) | ||
315 | fstd,ma %fr19, 8(%r30) | ||
316 | fstd,ma %fr20, 8(%r30) | ||
317 | fstd,ma %fr21, 8(%r30) | ||
318 | .endm | ||
319 | |||
320 | .macro callee_rest_float | ||
321 | fldd,mb -8(%r30), %fr21 | ||
322 | fldd,mb -8(%r30), %fr20 | ||
323 | fldd,mb -8(%r30), %fr19 | ||
324 | fldd,mb -8(%r30), %fr18 | ||
325 | fldd,mb -8(%r30), %fr17 | ||
326 | fldd,mb -8(%r30), %fr16 | ||
327 | fldd,mb -8(%r30), %fr15 | ||
328 | fldd,mb -8(%r30), %fr14 | ||
329 | fldd,mb -8(%r30), %fr13 | ||
330 | fldd,mb -8(%r30), %fr12 | ||
331 | .endm | ||
332 | |||
303 | #ifdef __LP64__ | 333 | #ifdef __LP64__ |
304 | .macro callee_save | 334 | .macro callee_save |
305 | std,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30) | 335 | std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30) |
306 | mfctl %cr27, %r3 | 336 | mfctl %cr27, %r3 |
307 | std %r4, -136(%r30) | 337 | std %r4, -136(%r30) |
308 | std %r5, -128(%r30) | 338 | std %r5, -128(%r30) |
@@ -340,13 +370,13 @@ | |||
340 | ldd -128(%r30), %r5 | 370 | ldd -128(%r30), %r5 |
341 | ldd -136(%r30), %r4 | 371 | ldd -136(%r30), %r4 |
342 | mtctl %r3, %cr27 | 372 | mtctl %r3, %cr27 |
343 | ldd,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3 | 373 | ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3 |
344 | .endm | 374 | .endm |
345 | 375 | ||
346 | #else /* ! __LP64__ */ | 376 | #else /* ! __LP64__ */ |
347 | 377 | ||
348 | .macro callee_save | 378 | .macro callee_save |
349 | stw,ma %r3, CALLEE_SAVE_FRAME_SIZE(%r30) | 379 | stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30) |
350 | mfctl %cr27, %r3 | 380 | mfctl %cr27, %r3 |
351 | stw %r4, -124(%r30) | 381 | stw %r4, -124(%r30) |
352 | stw %r5, -120(%r30) | 382 | stw %r5, -120(%r30) |
@@ -384,7 +414,7 @@ | |||
384 | ldw -120(%r30), %r5 | 414 | ldw -120(%r30), %r5 |
385 | ldw -124(%r30), %r4 | 415 | ldw -124(%r30), %r4 |
386 | mtctl %r3, %cr27 | 416 | mtctl %r3, %cr27 |
387 | ldw,mb -CALLEE_SAVE_FRAME_SIZE(%r30), %r3 | 417 | ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3 |
388 | .endm | 418 | .endm |
389 | #endif /* ! __LP64__ */ | 419 | #endif /* ! __LP64__ */ |
390 | 420 | ||
@@ -450,5 +480,30 @@ | |||
450 | REST_CR (%cr22, PT_PSW (\regs)) | 480 | REST_CR (%cr22, PT_PSW (\regs)) |
451 | .endm | 481 | .endm |
452 | 482 | ||
483 | |||
484 | /* First step to create a "relied upon translation" | ||
485 | * See PA 2.0 Arch. page F-4 and F-5. | ||
486 | * | ||
487 | * The ssm was originally necessary due to a "PCxT bug". | ||
488 | * But someone decided it needed to be added to the architecture | ||
489 | * and this "feature" went into rev3 of PA-RISC 1.1 Arch Manual. | ||
490 | * It's been carried forward into PA 2.0 Arch as well. :^( | ||
491 | * | ||
492 | * "ssm 0,%r0" is a NOP with side effects (prefetch barrier). | ||
493 | * rsm/ssm prevents the ifetch unit from speculatively fetching | ||
494 | * instructions past this line in the code stream. | ||
495 | * PA 2.0 processor will single step all insn in the same QUAD (4 insn). | ||
496 | */ | ||
497 | .macro pcxt_ssm_bug | ||
498 | rsm PSW_SM_I,%r0 | ||
499 | nop /* 1 */ | ||
500 | nop /* 2 */ | ||
501 | nop /* 3 */ | ||
502 | nop /* 4 */ | ||
503 | nop /* 5 */ | ||
504 | nop /* 6 */ | ||
505 | nop /* 7 */ | ||
506 | .endm | ||
507 | |||
453 | #endif /* __ASSEMBLY__ */ | 508 | #endif /* __ASSEMBLY__ */ |
454 | #endif | 509 | #endif |
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index af7db694b22d..55b98c67fd82 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/spinlock.h> | 5 | #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ |
6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
8 | 8 | ||
@@ -12,193 +12,157 @@ | |||
12 | * to include/asm-i386/bitops.h or kerneldoc | 12 | * to include/asm-i386/bitops.h or kerneldoc |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifdef __LP64__ | 15 | #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) |
16 | # define SHIFT_PER_LONG 6 | ||
17 | #ifndef BITS_PER_LONG | ||
18 | # define BITS_PER_LONG 64 | ||
19 | #endif | ||
20 | #else | ||
21 | # define SHIFT_PER_LONG 5 | ||
22 | #ifndef BITS_PER_LONG | ||
23 | # define BITS_PER_LONG 32 | ||
24 | #endif | ||
25 | #endif | ||
26 | |||
27 | #define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1)) | ||
28 | 16 | ||
29 | 17 | ||
30 | #define smp_mb__before_clear_bit() smp_mb() | 18 | #define smp_mb__before_clear_bit() smp_mb() |
31 | #define smp_mb__after_clear_bit() smp_mb() | 19 | #define smp_mb__after_clear_bit() smp_mb() |
32 | 20 | ||
33 | static __inline__ void set_bit(int nr, volatile unsigned long * address) | 21 | /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion |
22 | * on use of volatile and __*_bit() (set/clear/change): | ||
23 | * *_bit() want use of volatile. | ||
24 | * __*_bit() are "relaxed" and don't use spinlock or volatile. | ||
25 | */ | ||
26 | |||
27 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | ||
34 | { | 28 | { |
35 | unsigned long mask; | 29 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
36 | unsigned long *addr = (unsigned long *) address; | ||
37 | unsigned long flags; | 30 | unsigned long flags; |
38 | 31 | ||
39 | addr += (nr >> SHIFT_PER_LONG); | 32 | addr += (nr >> SHIFT_PER_LONG); |
40 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
41 | _atomic_spin_lock_irqsave(addr, flags); | 33 | _atomic_spin_lock_irqsave(addr, flags); |
42 | *addr |= mask; | 34 | *addr |= mask; |
43 | _atomic_spin_unlock_irqrestore(addr, flags); | 35 | _atomic_spin_unlock_irqrestore(addr, flags); |
44 | } | 36 | } |
45 | 37 | ||
46 | static __inline__ void __set_bit(int nr, volatile unsigned long * address) | 38 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr) |
47 | { | 39 | { |
48 | unsigned long mask; | 40 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); |
49 | unsigned long *addr = (unsigned long *) address; | ||
50 | 41 | ||
51 | addr += (nr >> SHIFT_PER_LONG); | 42 | *m |= 1UL << CHOP_SHIFTCOUNT(nr); |
52 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
53 | *addr |= mask; | ||
54 | } | 43 | } |
55 | 44 | ||
56 | static __inline__ void clear_bit(int nr, volatile unsigned long * address) | 45 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
57 | { | 46 | { |
58 | unsigned long mask; | 47 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); |
59 | unsigned long *addr = (unsigned long *) address; | ||
60 | unsigned long flags; | 48 | unsigned long flags; |
61 | 49 | ||
62 | addr += (nr >> SHIFT_PER_LONG); | 50 | addr += (nr >> SHIFT_PER_LONG); |
63 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
64 | _atomic_spin_lock_irqsave(addr, flags); | 51 | _atomic_spin_lock_irqsave(addr, flags); |
65 | *addr &= ~mask; | 52 | *addr &= mask; |
66 | _atomic_spin_unlock_irqrestore(addr, flags); | 53 | _atomic_spin_unlock_irqrestore(addr, flags); |
67 | } | 54 | } |
68 | 55 | ||
69 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address) | 56 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr) |
70 | { | 57 | { |
71 | unsigned long mask; | 58 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); |
72 | unsigned long *addr = (unsigned long *) address; | ||
73 | 59 | ||
74 | addr += (nr >> SHIFT_PER_LONG); | 60 | *m &= ~(1UL << CHOP_SHIFTCOUNT(nr)); |
75 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
76 | *addr &= ~mask; | ||
77 | } | 61 | } |
78 | 62 | ||
79 | static __inline__ void change_bit(int nr, volatile unsigned long * address) | 63 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
80 | { | 64 | { |
81 | unsigned long mask; | 65 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
82 | unsigned long *addr = (unsigned long *) address; | ||
83 | unsigned long flags; | 66 | unsigned long flags; |
84 | 67 | ||
85 | addr += (nr >> SHIFT_PER_LONG); | 68 | addr += (nr >> SHIFT_PER_LONG); |
86 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
87 | _atomic_spin_lock_irqsave(addr, flags); | 69 | _atomic_spin_lock_irqsave(addr, flags); |
88 | *addr ^= mask; | 70 | *addr ^= mask; |
89 | _atomic_spin_unlock_irqrestore(addr, flags); | 71 | _atomic_spin_unlock_irqrestore(addr, flags); |
90 | } | 72 | } |
91 | 73 | ||
92 | static __inline__ void __change_bit(int nr, volatile unsigned long * address) | 74 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr) |
93 | { | 75 | { |
94 | unsigned long mask; | 76 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); |
95 | unsigned long *addr = (unsigned long *) address; | ||
96 | 77 | ||
97 | addr += (nr >> SHIFT_PER_LONG); | 78 | *m ^= 1UL << CHOP_SHIFTCOUNT(nr); |
98 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
99 | *addr ^= mask; | ||
100 | } | 79 | } |
101 | 80 | ||
102 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address) | 81 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
103 | { | 82 | { |
104 | unsigned long mask; | 83 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
105 | unsigned long *addr = (unsigned long *) address; | 84 | unsigned long oldbit; |
106 | int oldbit; | ||
107 | unsigned long flags; | 85 | unsigned long flags; |
108 | 86 | ||
109 | addr += (nr >> SHIFT_PER_LONG); | 87 | addr += (nr >> SHIFT_PER_LONG); |
110 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
111 | _atomic_spin_lock_irqsave(addr, flags); | 88 | _atomic_spin_lock_irqsave(addr, flags); |
112 | oldbit = (*addr & mask) ? 1 : 0; | 89 | oldbit = *addr; |
113 | *addr |= mask; | 90 | *addr = oldbit | mask; |
114 | _atomic_spin_unlock_irqrestore(addr, flags); | 91 | _atomic_spin_unlock_irqrestore(addr, flags); |
115 | 92 | ||
116 | return oldbit; | 93 | return (oldbit & mask) ? 1 : 0; |
117 | } | 94 | } |
118 | 95 | ||
119 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) | 96 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) |
120 | { | 97 | { |
121 | unsigned long mask; | 98 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
122 | unsigned long *addr = (unsigned long *) address; | 99 | unsigned long oldbit; |
123 | int oldbit; | 100 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); |
124 | 101 | ||
125 | addr += (nr >> SHIFT_PER_LONG); | 102 | oldbit = *addr; |
126 | mask = 1L << CHOP_SHIFTCOUNT(nr); | 103 | *addr = oldbit | mask; |
127 | oldbit = (*addr & mask) ? 1 : 0; | ||
128 | *addr |= mask; | ||
129 | 104 | ||
130 | return oldbit; | 105 | return (oldbit & mask) ? 1 : 0; |
131 | } | 106 | } |
132 | 107 | ||
133 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * address) | 108 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
134 | { | 109 | { |
135 | unsigned long mask; | 110 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
136 | unsigned long *addr = (unsigned long *) address; | 111 | unsigned long oldbit; |
137 | int oldbit; | ||
138 | unsigned long flags; | 112 | unsigned long flags; |
139 | 113 | ||
140 | addr += (nr >> SHIFT_PER_LONG); | 114 | addr += (nr >> SHIFT_PER_LONG); |
141 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
142 | _atomic_spin_lock_irqsave(addr, flags); | 115 | _atomic_spin_lock_irqsave(addr, flags); |
143 | oldbit = (*addr & mask) ? 1 : 0; | 116 | oldbit = *addr; |
144 | *addr &= ~mask; | 117 | *addr = oldbit & ~mask; |
145 | _atomic_spin_unlock_irqrestore(addr, flags); | 118 | _atomic_spin_unlock_irqrestore(addr, flags); |
146 | 119 | ||
147 | return oldbit; | 120 | return (oldbit & mask) ? 1 : 0; |
148 | } | 121 | } |
149 | 122 | ||
150 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) | 123 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) |
151 | { | 124 | { |
152 | unsigned long mask; | 125 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
153 | unsigned long *addr = (unsigned long *) address; | 126 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); |
154 | int oldbit; | 127 | unsigned long oldbit; |
155 | 128 | ||
156 | addr += (nr >> SHIFT_PER_LONG); | 129 | oldbit = *addr; |
157 | mask = 1L << CHOP_SHIFTCOUNT(nr); | 130 | *addr = oldbit & ~mask; |
158 | oldbit = (*addr & mask) ? 1 : 0; | ||
159 | *addr &= ~mask; | ||
160 | 131 | ||
161 | return oldbit; | 132 | return (oldbit & mask) ? 1 : 0; |
162 | } | 133 | } |
163 | 134 | ||
164 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * address) | 135 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
165 | { | 136 | { |
166 | unsigned long mask; | 137 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
167 | unsigned long *addr = (unsigned long *) address; | 138 | unsigned long oldbit; |
168 | int oldbit; | ||
169 | unsigned long flags; | 139 | unsigned long flags; |
170 | 140 | ||
171 | addr += (nr >> SHIFT_PER_LONG); | 141 | addr += (nr >> SHIFT_PER_LONG); |
172 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
173 | _atomic_spin_lock_irqsave(addr, flags); | 142 | _atomic_spin_lock_irqsave(addr, flags); |
174 | oldbit = (*addr & mask) ? 1 : 0; | 143 | oldbit = *addr; |
175 | *addr ^= mask; | 144 | *addr = oldbit ^ mask; |
176 | _atomic_spin_unlock_irqrestore(addr, flags); | 145 | _atomic_spin_unlock_irqrestore(addr, flags); |
177 | 146 | ||
178 | return oldbit; | 147 | return (oldbit & mask) ? 1 : 0; |
179 | } | 148 | } |
180 | 149 | ||
181 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) | 150 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) |
182 | { | 151 | { |
183 | unsigned long mask; | 152 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
184 | unsigned long *addr = (unsigned long *) address; | 153 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); |
185 | int oldbit; | 154 | unsigned long oldbit; |
186 | 155 | ||
187 | addr += (nr >> SHIFT_PER_LONG); | 156 | oldbit = *addr; |
188 | mask = 1L << CHOP_SHIFTCOUNT(nr); | 157 | *addr = oldbit ^ mask; |
189 | oldbit = (*addr & mask) ? 1 : 0; | ||
190 | *addr ^= mask; | ||
191 | 158 | ||
192 | return oldbit; | 159 | return (oldbit & mask) ? 1 : 0; |
193 | } | 160 | } |
194 | 161 | ||
195 | static __inline__ int test_bit(int nr, const volatile unsigned long *address) | 162 | static __inline__ int test_bit(int nr, const volatile unsigned long *address) |
196 | { | 163 | { |
197 | unsigned long mask; | 164 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
198 | const unsigned long *addr = (const unsigned long *)address; | 165 | const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG); |
199 | |||
200 | addr += (nr >> SHIFT_PER_LONG); | ||
201 | mask = 1L << CHOP_SHIFTCOUNT(nr); | ||
202 | 166 | ||
203 | return !!(*addr & mask); | 167 | return !!(*addr & mask); |
204 | } | 168 | } |
@@ -229,7 +193,7 @@ static __inline__ unsigned long __ffs(unsigned long x) | |||
229 | unsigned long ret; | 193 | unsigned long ret; |
230 | 194 | ||
231 | __asm__( | 195 | __asm__( |
232 | #if BITS_PER_LONG > 32 | 196 | #ifdef __LP64__ |
233 | " ldi 63,%1\n" | 197 | " ldi 63,%1\n" |
234 | " extrd,u,*<> %0,63,32,%%r0\n" | 198 | " extrd,u,*<> %0,63,32,%%r0\n" |
235 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ | 199 | " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ |
@@ -304,14 +268,7 @@ static __inline__ int fls(int x) | |||
304 | * hweightN: returns the hamming weight (i.e. the number | 268 | * hweightN: returns the hamming weight (i.e. the number |
305 | * of bits set) of a N-bit word | 269 | * of bits set) of a N-bit word |
306 | */ | 270 | */ |
307 | #define hweight64(x) \ | 271 | #define hweight64(x) generic_hweight64(x) |
308 | ({ \ | ||
309 | unsigned long __x = (x); \ | ||
310 | unsigned int __w; \ | ||
311 | __w = generic_hweight32((unsigned int) __x); \ | ||
312 | __w += generic_hweight32((unsigned int) (__x>>32)); \ | ||
313 | __w; \ | ||
314 | }) | ||
315 | #define hweight32(x) generic_hweight32(x) | 272 | #define hweight32(x) generic_hweight32(x) |
316 | #define hweight16(x) generic_hweight16(x) | 273 | #define hweight16(x) generic_hweight16(x) |
317 | #define hweight8(x) generic_hweight8(x) | 274 | #define hweight8(x) generic_hweight8(x) |
@@ -324,7 +281,13 @@ static __inline__ int fls(int x) | |||
324 | */ | 281 | */ |
325 | static inline int sched_find_first_bit(const unsigned long *b) | 282 | static inline int sched_find_first_bit(const unsigned long *b) |
326 | { | 283 | { |
327 | #ifndef __LP64__ | 284 | #ifdef __LP64__ |
285 | if (unlikely(b[0])) | ||
286 | return __ffs(b[0]); | ||
287 | if (unlikely(b[1])) | ||
288 | return __ffs(b[1]) + 64; | ||
289 | return __ffs(b[2]) + 128; | ||
290 | #else | ||
328 | if (unlikely(b[0])) | 291 | if (unlikely(b[0])) |
329 | return __ffs(b[0]); | 292 | return __ffs(b[0]); |
330 | if (unlikely(b[1])) | 293 | if (unlikely(b[1])) |
@@ -334,14 +297,6 @@ static inline int sched_find_first_bit(const unsigned long *b) | |||
334 | if (b[3]) | 297 | if (b[3]) |
335 | return __ffs(b[3]) + 96; | 298 | return __ffs(b[3]) + 96; |
336 | return __ffs(b[4]) + 128; | 299 | return __ffs(b[4]) + 128; |
337 | #else | ||
338 | if (unlikely(b[0])) | ||
339 | return __ffs(b[0]); | ||
340 | if (unlikely(((unsigned int)b[1]))) | ||
341 | return __ffs(b[1]) + 64; | ||
342 | if (b[1] >> 32) | ||
343 | return __ffs(b[1] >> 32) + 96; | ||
344 | return __ffs(b[2]) + 128; | ||
345 | #endif | 300 | #endif |
346 | } | 301 | } |
347 | 302 | ||
@@ -391,7 +346,7 @@ found_middle: | |||
391 | 346 | ||
392 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) | 347 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) |
393 | { | 348 | { |
394 | const unsigned long *p = addr + (offset >> 6); | 349 | const unsigned long *p = addr + (offset >> SHIFT_PER_LONG); |
395 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 350 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
396 | unsigned long tmp; | 351 | unsigned long tmp; |
397 | 352 | ||
@@ -445,71 +400,90 @@ found_middle: | |||
445 | * test_and_{set,clear}_bit guarantee atomicity without | 400 | * test_and_{set,clear}_bit guarantee atomicity without |
446 | * disabling interrupts. | 401 | * disabling interrupts. |
447 | */ | 402 | */ |
448 | #ifdef __LP64__ | ||
449 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr) | ||
450 | #define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr) | ||
451 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr) | ||
452 | #define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr) | ||
453 | #else | ||
454 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr) | ||
455 | #define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr) | ||
456 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr) | ||
457 | #define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr) | ||
458 | #endif | ||
459 | 403 | ||
460 | #endif /* __KERNEL__ */ | 404 | /* '3' is bits per byte */ |
405 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | ||
461 | 406 | ||
462 | static __inline__ int ext2_test_bit(int nr, __const__ void * addr) | 407 | #define ext2_test_bit(nr, addr) \ |
463 | { | 408 | test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
464 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | 409 | #define ext2_set_bit(nr, addr) \ |
410 | __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
411 | #define ext2_clear_bit(nr, addr) \ | ||
412 | __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
465 | 413 | ||
466 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | 414 | #define ext2_set_bit_atomic(l,nr,addr) \ |
467 | } | 415 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
416 | #define ext2_clear_bit_atomic(l,nr,addr) \ | ||
417 | test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
418 | |||
419 | #endif /* __KERNEL__ */ | ||
468 | 420 | ||
469 | /* | ||
470 | * This implementation of ext2_find_{first,next}_zero_bit was stolen from | ||
471 | * Linus' asm-alpha/bitops.h and modified for a big-endian machine. | ||
472 | */ | ||
473 | 421 | ||
474 | #define ext2_find_first_zero_bit(addr, size) \ | 422 | #define ext2_find_first_zero_bit(addr, size) \ |
475 | ext2_find_next_zero_bit((addr), (size), 0) | 423 | ext2_find_next_zero_bit((addr), (size), 0) |
476 | 424 | ||
477 | extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, | 425 | /* include/linux/byteorder does not support "unsigned long" type */ |
478 | unsigned long size, unsigned long offset) | 426 | static inline unsigned long ext2_swabp(unsigned long * x) |
479 | { | 427 | { |
480 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | 428 | #ifdef __LP64__ |
481 | unsigned int result = offset & ~31UL; | 429 | return (unsigned long) __swab64p((u64 *) x); |
482 | unsigned int tmp; | 430 | #else |
431 | return (unsigned long) __swab32p((u32 *) x); | ||
432 | #endif | ||
433 | } | ||
434 | |||
435 | /* include/linux/byteorder doesn't support "unsigned long" type */ | ||
436 | static inline unsigned long ext2_swab(unsigned long y) | ||
437 | { | ||
438 | #ifdef __LP64__ | ||
439 | return (unsigned long) __swab64((u64) y); | ||
440 | #else | ||
441 | return (unsigned long) __swab32((u32) y); | ||
442 | #endif | ||
443 | } | ||
444 | |||
445 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
446 | { | ||
447 | unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG); | ||
448 | unsigned long result = offset & ~(BITS_PER_LONG - 1); | ||
449 | unsigned long tmp; | ||
483 | 450 | ||
484 | if (offset >= size) | 451 | if (offset >= size) |
485 | return size; | 452 | return size; |
486 | size -= result; | 453 | size -= result; |
487 | offset &= 31UL; | 454 | offset &= (BITS_PER_LONG - 1UL); |
488 | if (offset) { | 455 | if (offset) { |
489 | tmp = cpu_to_le32p(p++); | 456 | tmp = ext2_swabp(p++); |
490 | tmp |= ~0UL >> (32-offset); | 457 | tmp |= (~0UL >> (BITS_PER_LONG - offset)); |
491 | if (size < 32) | 458 | if (size < BITS_PER_LONG) |
492 | goto found_first; | 459 | goto found_first; |
493 | if (tmp != ~0U) | 460 | if (~tmp) |
494 | goto found_middle; | 461 | goto found_middle; |
495 | size -= 32; | 462 | size -= BITS_PER_LONG; |
496 | result += 32; | 463 | result += BITS_PER_LONG; |
497 | } | 464 | } |
498 | while (size >= 32) { | 465 | |
499 | if ((tmp = cpu_to_le32p(p++)) != ~0U) | 466 | while (size & ~(BITS_PER_LONG - 1)) { |
500 | goto found_middle; | 467 | if (~(tmp = *(p++))) |
501 | result += 32; | 468 | goto found_middle_swap; |
502 | size -= 32; | 469 | result += BITS_PER_LONG; |
470 | size -= BITS_PER_LONG; | ||
503 | } | 471 | } |
504 | if (!size) | 472 | if (!size) |
505 | return result; | 473 | return result; |
506 | tmp = cpu_to_le32p(p); | 474 | tmp = ext2_swabp(p); |
507 | found_first: | 475 | found_first: |
508 | tmp |= ~0U << size; | 476 | tmp |= ~0UL << size; |
477 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
478 | return result + size; /* Nope. Skip ffz */ | ||
509 | found_middle: | 479 | found_middle: |
510 | return result + ffz(tmp); | 480 | return result + ffz(tmp); |
481 | |||
482 | found_middle_swap: | ||
483 | return result + ffz(ext2_swab(tmp)); | ||
511 | } | 484 | } |
512 | 485 | ||
486 | |||
513 | /* Bitmap functions for the minix filesystem. */ | 487 | /* Bitmap functions for the minix filesystem. */ |
514 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | 488 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) |
515 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | 489 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) |
diff --git a/include/asm-parisc/errno.h b/include/asm-parisc/errno.h index 08464c405471..e2f3ddc796be 100644 --- a/include/asm-parisc/errno.h +++ b/include/asm-parisc/errno.h | |||
@@ -114,6 +114,7 @@ | |||
114 | 114 | ||
115 | #define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */ | 115 | #define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */ |
116 | #define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */ | 116 | #define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */ |
117 | #define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */ | ||
117 | 118 | ||
118 | /* for robust mutexes */ | 119 | /* for robust mutexes */ |
119 | #define EOWNERDEAD 254 /* Owner died */ | 120 | #define EOWNERDEAD 254 /* Owner died */ |
diff --git a/include/asm-parisc/grfioctl.h b/include/asm-parisc/grfioctl.h index d3cfc0168fb1..6a910311b56b 100644 --- a/include/asm-parisc/grfioctl.h +++ b/include/asm-parisc/grfioctl.h | |||
@@ -69,6 +69,8 @@ | |||
69 | #define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */ | 69 | #define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */ |
70 | #define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */ | 70 | #define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */ |
71 | #define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */ | 71 | #define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */ |
72 | #define CRT_ID_LEGO 0x35ACDA30 /* Lego FX5, FX10 ... */ | ||
73 | #define CRT_ID_PINNACLE 0x35ACDA16 /* Pinnacle FXe */ | ||
72 | 74 | ||
73 | /* structure for ioctl(GCDESCRIBE) */ | 75 | /* structure for ioctl(GCDESCRIBE) */ |
74 | 76 | ||
diff --git a/include/asm-parisc/led.h b/include/asm-parisc/led.h index 1ac8ab6c580d..efadfd543ec6 100644 --- a/include/asm-parisc/led.h +++ b/include/asm-parisc/led.h | |||
@@ -23,9 +23,6 @@ | |||
23 | 23 | ||
24 | #define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ | 24 | #define LED_CMD_REG_NONE 0 /* NULL == no addr for the cmd register */ |
25 | 25 | ||
26 | /* led tasklet struct */ | ||
27 | extern struct tasklet_struct led_tasklet; | ||
28 | |||
29 | /* register_led_driver() */ | 26 | /* register_led_driver() */ |
30 | int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); | 27 | int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg); |
31 | 28 | ||
diff --git a/include/asm-parisc/parisc-device.h b/include/asm-parisc/parisc-device.h index ef69ab4b17a9..1d247e32a608 100644 --- a/include/asm-parisc/parisc-device.h +++ b/include/asm-parisc/parisc-device.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #include <linux/device.h> | 1 | #include <linux/device.h> |
2 | 2 | ||
3 | struct parisc_device { | 3 | struct parisc_device { |
4 | unsigned long hpa; /* Hard Physical Address */ | 4 | struct resource hpa; /* Hard Physical Address */ |
5 | struct parisc_device_id id; | 5 | struct parisc_device_id id; |
6 | struct parisc_driver *driver; /* Driver for this device */ | 6 | struct parisc_driver *driver; /* Driver for this device */ |
7 | char name[80]; /* The hardware description */ | 7 | char name[80]; /* The hardware description */ |
@@ -39,6 +39,11 @@ struct parisc_driver { | |||
39 | #define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) | 39 | #define to_parisc_driver(d) container_of(d, struct parisc_driver, drv) |
40 | #define parisc_parent(d) to_parisc_device(d->dev.parent) | 40 | #define parisc_parent(d) to_parisc_device(d->dev.parent) |
41 | 41 | ||
42 | static inline char *parisc_pathname(struct parisc_device *d) | ||
43 | { | ||
44 | return d->dev.bus_id; | ||
45 | } | ||
46 | |||
42 | static inline void | 47 | static inline void |
43 | parisc_set_drvdata(struct parisc_device *d, void *p) | 48 | parisc_set_drvdata(struct parisc_device *d, void *p) |
44 | { | 49 | { |
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h index d0b761f690b5..fa39d07d49e9 100644 --- a/include/asm-parisc/pci.h +++ b/include/asm-parisc/pci.h | |||
@@ -69,7 +69,7 @@ struct pci_hba_data { | |||
69 | #define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS) | 69 | #define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS) |
70 | #define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1)) | 70 | #define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1)) |
71 | 71 | ||
72 | #if CONFIG_64BIT | 72 | #ifdef CONFIG_64BIT |
73 | #define PCI_F_EXTEND 0xffffffff00000000UL | 73 | #define PCI_F_EXTEND 0xffffffff00000000UL |
74 | #define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a) | 74 | #define PCI_IS_LMMIO(hba,a) pci_is_lmmio(hba,a) |
75 | 75 | ||
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index 820c6e712cd7..c28fb6f48c6c 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h | |||
@@ -501,6 +501,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
501 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 501 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
502 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 502 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
503 | 503 | ||
504 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) | ||
505 | |||
504 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | 506 | #define MK_IOSPACE_PFN(space, pfn) (pfn) |
505 | #define GET_IOSPACE(pfn) 0 | 507 | #define GET_IOSPACE(pfn) 0 |
506 | #define GET_PFN(pfn) (pfn) | 508 | #define GET_PFN(pfn) (pfn) |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index a9dfadd05658..aae40e8c3aa8 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
@@ -122,8 +122,27 @@ struct thread_struct { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | /* Thread struct flags. */ | 124 | /* Thread struct flags. */ |
125 | #define PARISC_UAC_NOPRINT (1UL << 0) /* see prctl and unaligned.c */ | ||
126 | #define PARISC_UAC_SIGBUS (1UL << 1) | ||
125 | #define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */ | 127 | #define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */ |
126 | 128 | ||
129 | #define PARISC_UAC_SHIFT 0 | ||
130 | #define PARISC_UAC_MASK (PARISC_UAC_NOPRINT|PARISC_UAC_SIGBUS) | ||
131 | |||
132 | #define SET_UNALIGN_CTL(task,value) \ | ||
133 | ({ \ | ||
134 | (task)->thread.flags = (((task)->thread.flags & ~PARISC_UAC_MASK) \ | ||
135 | | (((value) << PARISC_UAC_SHIFT) & \ | ||
136 | PARISC_UAC_MASK)); \ | ||
137 | 0; \ | ||
138 | }) | ||
139 | |||
140 | #define GET_UNALIGN_CTL(task,addr) \ | ||
141 | ({ \ | ||
142 | put_user(((task)->thread.flags & PARISC_UAC_MASK) \ | ||
143 | >> PARISC_UAC_SHIFT, (int __user *) (addr)); \ | ||
144 | }) | ||
145 | |||
127 | #define INIT_THREAD { \ | 146 | #define INIT_THREAD { \ |
128 | regs: { gr: { 0, }, \ | 147 | regs: { gr: { 0, }, \ |
129 | fr: { 0, }, \ | 148 | fr: { 0, }, \ |
diff --git a/include/asm-parisc/psw.h b/include/asm-parisc/psw.h index 51323029f377..4334d6ca2add 100644 --- a/include/asm-parisc/psw.h +++ b/include/asm-parisc/psw.h | |||
@@ -1,4 +1,7 @@ | |||
1 | #ifndef _PARISC_PSW_H | 1 | #ifndef _PARISC_PSW_H |
2 | |||
3 | #include <linux/config.h> | ||
4 | |||
2 | #define PSW_I 0x00000001 | 5 | #define PSW_I 0x00000001 |
3 | #define PSW_D 0x00000002 | 6 | #define PSW_D 0x00000002 |
4 | #define PSW_P 0x00000004 | 7 | #define PSW_P 0x00000004 |
@@ -9,6 +12,16 @@ | |||
9 | #define PSW_G 0x00000040 /* PA1.x only */ | 12 | #define PSW_G 0x00000040 /* PA1.x only */ |
10 | #define PSW_O 0x00000080 /* PA2.0 only */ | 13 | #define PSW_O 0x00000080 /* PA2.0 only */ |
11 | 14 | ||
15 | /* ssm/rsm instructions number PSW_W and PSW_E differently */ | ||
16 | #define PSW_SM_I PSW_I /* Enable External Interrupts */ | ||
17 | #define PSW_SM_D PSW_D | ||
18 | #define PSW_SM_P PSW_P | ||
19 | #define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */ | ||
20 | #define PSW_SM_R PSW_R /* Enable Recover Counter Trap */ | ||
21 | #define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */ | ||
22 | |||
23 | #define PSW_SM_QUIET PSW_SM_R+PSW_SM_Q+PSW_SM_P+PSW_SM_D+PSW_SM_I | ||
24 | |||
12 | #define PSW_CB 0x0000ff00 | 25 | #define PSW_CB 0x0000ff00 |
13 | 26 | ||
14 | #define PSW_M 0x00010000 | 27 | #define PSW_M 0x00010000 |
@@ -30,33 +43,21 @@ | |||
30 | #define PSW_Z 0x40000000 /* PA1.x only */ | 43 | #define PSW_Z 0x40000000 /* PA1.x only */ |
31 | #define PSW_Y 0x80000000 /* PA1.x only */ | 44 | #define PSW_Y 0x80000000 /* PA1.x only */ |
32 | 45 | ||
33 | #ifdef __LP64__ | 46 | #ifdef CONFIG_64BIT |
34 | #define PSW_HI_CB 0x000000ff /* PA2.0 only */ | 47 | # define PSW_HI_CB 0x000000ff /* PA2.0 only */ |
35 | #endif | 48 | #endif |
36 | 49 | ||
37 | /* PSW bits to be used with ssm/rsm */ | 50 | #ifdef CONFIG_64BIT |
38 | #define PSW_SM_I 0x1 | 51 | # define USER_PSW_HI_MASK PSW_HI_CB |
39 | #define PSW_SM_D 0x2 | 52 | # define WIDE_PSW PSW_W |
40 | #define PSW_SM_P 0x4 | 53 | #else |
41 | #define PSW_SM_Q 0x8 | 54 | # define WIDE_PSW 0 |
42 | #define PSW_SM_R 0x10 | ||
43 | #define PSW_SM_F 0x20 | ||
44 | #define PSW_SM_G 0x40 | ||
45 | #define PSW_SM_O 0x80 | ||
46 | #define PSW_SM_E 0x100 | ||
47 | #define PSW_SM_W 0x200 | ||
48 | |||
49 | #ifdef __LP64__ | ||
50 | # define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) | ||
51 | # define KERNEL_PSW (PSW_W | PSW_C | PSW_Q | PSW_P | PSW_D) | ||
52 | # define REAL_MODE_PSW (PSW_W | PSW_Q) | ||
53 | # define USER_PSW_MASK (PSW_W | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) | ||
54 | # define USER_PSW_HI_MASK (PSW_HI_CB) | ||
55 | #else | ||
56 | # define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) | ||
57 | # define KERNEL_PSW (PSW_C | PSW_Q | PSW_P | PSW_D) | ||
58 | # define REAL_MODE_PSW (PSW_Q) | ||
59 | # define USER_PSW_MASK (PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) | ||
60 | #endif | 55 | #endif |
61 | 56 | ||
57 | /* Used when setting up for rfi */ | ||
58 | #define KERNEL_PSW (WIDE_PSW | PSW_C | PSW_Q | PSW_P | PSW_D) | ||
59 | #define REAL_MODE_PSW (WIDE_PSW | PSW_Q) | ||
60 | #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) | ||
61 | #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) | ||
62 | |||
62 | #endif | 63 | #endif |
diff --git a/include/asm-parisc/ptrace.h b/include/asm-parisc/ptrace.h index 3f428aa371a4..93f990e418f1 100644 --- a/include/asm-parisc/ptrace.h +++ b/include/asm-parisc/ptrace.h | |||
@@ -49,7 +49,7 @@ struct pt_regs { | |||
49 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) | 49 | #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) |
50 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) | 50 | #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) |
51 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) | 51 | #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) |
52 | #define profile_pc(regs) instruction_pointer(regs) | 52 | unsigned long profile_pc(struct pt_regs *); |
53 | extern void show_regs(struct pt_regs *); | 53 | extern void show_regs(struct pt_regs *); |
54 | #endif | 54 | #endif |
55 | 55 | ||
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 43eaa6e742e0..7c3f406a746a 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
@@ -5,11 +5,6 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | ||
9 | * since it only has load-and-zero. Moreover, at least on some PA processors, | ||
10 | * the semaphore address has to be 16-byte aligned. | ||
11 | */ | ||
12 | |||
13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
14 | { | 9 | { |
15 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h index 785bba822fbf..d6b479bdb886 100644 --- a/include/asm-parisc/spinlock_types.h +++ b/include/asm-parisc/spinlock_types.h | |||
@@ -6,11 +6,15 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | #ifdef CONFIG_PA20 | ||
10 | volatile unsigned int slock; | ||
11 | # define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
12 | #else | ||
9 | volatile unsigned int lock[4]; | 13 | volatile unsigned int lock[4]; |
14 | # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
15 | #endif | ||
10 | } raw_spinlock_t; | 16 | } raw_spinlock_t; |
11 | 17 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
13 | |||
14 | typedef struct { | 18 | typedef struct { |
15 | raw_spinlock_t lock; | 19 | raw_spinlock_t lock; |
16 | volatile int counter; | 20 | volatile int counter; |
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 26ff844a21c1..f3928d3a80cb 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
@@ -138,13 +138,7 @@ static inline void set_eiem(unsigned long val) | |||
138 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | 138 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) |
139 | 139 | ||
140 | 140 | ||
141 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ | 141 | #ifndef CONFIG_PA20 |
142 | #define __ldcw(a) ({ \ | ||
143 | unsigned __ret; \ | ||
144 | __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \ | ||
145 | __ret; \ | ||
146 | }) | ||
147 | |||
148 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, | 142 | /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, |
149 | and GCC only guarantees 8-byte alignment for stack locals, we can't | 143 | and GCC only guarantees 8-byte alignment for stack locals, we can't |
150 | be assured of 16-byte alignment for atomic lock data even if we | 144 | be assured of 16-byte alignment for atomic lock data even if we |
@@ -152,37 +146,41 @@ static inline void set_eiem(unsigned long val) | |||
152 | we use a struct containing an array of four ints for the atomic lock | 146 | we use a struct containing an array of four ints for the atomic lock |
153 | type and dynamically select the 16-byte aligned int from the array | 147 | type and dynamically select the 16-byte aligned int from the array |
154 | for the semaphore. */ | 148 | for the semaphore. */ |
149 | |||
155 | #define __PA_LDCW_ALIGNMENT 16 | 150 | #define __PA_LDCW_ALIGNMENT 16 |
156 | #define __ldcw_align(a) ({ \ | 151 | #define __ldcw_align(a) ({ \ |
157 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ | 152 | unsigned long __ret = (unsigned long) &(a)->lock[0]; \ |
158 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ | 153 | __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \ |
159 | (volatile unsigned int *) __ret; \ | 154 | (volatile unsigned int *) __ret; \ |
160 | }) | 155 | }) |
156 | #define LDCW "ldcw" | ||
161 | 157 | ||
162 | #ifdef CONFIG_SMP | 158 | #else /*CONFIG_PA20*/ |
163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | 159 | /* From: "Jim Hull" <jim.hull of hp.com> |
164 | #endif | 160 | I've attached a summary of the change, but basically, for PA 2.0, as |
161 | long as the ",CO" (coherent operation) completer is specified, then the | ||
162 | 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead | ||
163 | they only require "natural" alignment (4-byte for ldcw, 8-byte for | ||
164 | ldcd). */ | ||
165 | 165 | ||
166 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define __PA_LDCW_ALIGNMENT 4 |
167 | #define __ldcw_align(a) ((volatile unsigned int *)a) | ||
168 | #define LDCW "ldcw,co" | ||
167 | 169 | ||
168 | /* This is for the serialisation of PxTLB broadcasts. At least on the | 170 | #endif /*!CONFIG_PA20*/ |
169 | * N class systems, only one PxTLB inter processor broadcast can be | ||
170 | * active at any one time on the Merced bus. This tlb purge | ||
171 | * synchronisation is fairly lightweight and harmless so we activate | ||
172 | * it on all SMP systems not just the N class. */ | ||
173 | #ifdef CONFIG_SMP | ||
174 | extern spinlock_t pa_tlb_lock; | ||
175 | 171 | ||
176 | #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) | 172 | /* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ |
177 | #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) | 173 | #define __ldcw(a) ({ \ |
178 | 174 | unsigned __ret; \ | |
179 | #else | 175 | __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \ |
180 | 176 | __ret; \ | |
181 | #define purge_tlb_start(x) do { } while(0) | 177 | }) |
182 | #define purge_tlb_end(x) do { } while (0) | ||
183 | 178 | ||
179 | #ifdef CONFIG_SMP | ||
180 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
184 | #endif | 181 | #endif |
185 | 182 | ||
183 | #define KERNEL_START (0x10100000 - 0x1000) | ||
186 | #define arch_align_stack(x) (x) | 184 | #define arch_align_stack(x) (x) |
187 | 185 | ||
188 | #endif | 186 | #endif |
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h index eb27b78930e8..84af4ab1fe51 100644 --- a/include/asm-parisc/tlbflush.h +++ b/include/asm-parisc/tlbflush.h | |||
@@ -7,6 +7,26 @@ | |||
7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
8 | #include <asm/mmu_context.h> | 8 | #include <asm/mmu_context.h> |
9 | 9 | ||
10 | |||
11 | /* This is for the serialisation of PxTLB broadcasts. At least on the | ||
12 | * N class systems, only one PxTLB inter processor broadcast can be | ||
13 | * active at any one time on the Merced bus. This tlb purge | ||
14 | * synchronisation is fairly lightweight and harmless so we activate | ||
15 | * it on all SMP systems not just the N class. */ | ||
16 | #ifdef CONFIG_SMP | ||
17 | extern spinlock_t pa_tlb_lock; | ||
18 | |||
19 | #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) | ||
20 | #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) | ||
21 | |||
22 | #else | ||
23 | |||
24 | #define purge_tlb_start(x) do { } while(0) | ||
25 | #define purge_tlb_end(x) do { } while (0) | ||
26 | |||
27 | #endif | ||
28 | |||
29 | |||
10 | extern void flush_tlb_all(void); | 30 | extern void flush_tlb_all(void); |
11 | 31 | ||
12 | /* | 32 | /* |
@@ -64,29 +84,26 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
64 | { | 84 | { |
65 | unsigned long npages; | 85 | unsigned long npages; |
66 | 86 | ||
67 | |||
68 | npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 87 | npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
69 | if (npages >= 512) /* XXX arbitrary, should be tuned */ | 88 | if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ |
70 | flush_tlb_all(); | 89 | flush_tlb_all(); |
71 | else { | 90 | else { |
72 | 91 | ||
73 | mtsp(vma->vm_mm->context,1); | 92 | mtsp(vma->vm_mm->context,1); |
93 | purge_tlb_start(); | ||
74 | if (split_tlb) { | 94 | if (split_tlb) { |
75 | purge_tlb_start(); | ||
76 | while (npages--) { | 95 | while (npages--) { |
77 | pdtlb(start); | 96 | pdtlb(start); |
78 | pitlb(start); | 97 | pitlb(start); |
79 | start += PAGE_SIZE; | 98 | start += PAGE_SIZE; |
80 | } | 99 | } |
81 | purge_tlb_end(); | ||
82 | } else { | 100 | } else { |
83 | purge_tlb_start(); | ||
84 | while (npages--) { | 101 | while (npages--) { |
85 | pdtlb(start); | 102 | pdtlb(start); |
86 | start += PAGE_SIZE; | 103 | start += PAGE_SIZE; |
87 | } | 104 | } |
88 | purge_tlb_end(); | ||
89 | } | 105 | } |
106 | purge_tlb_end(); | ||
90 | } | 107 | } |
91 | } | 108 | } |
92 | 109 | ||
diff --git a/include/asm-parisc/types.h b/include/asm-parisc/types.h index d21b9d0d63ea..34fdce361a5a 100644 --- a/include/asm-parisc/types.h +++ b/include/asm-parisc/types.h | |||
@@ -33,8 +33,10 @@ typedef unsigned long long __u64; | |||
33 | 33 | ||
34 | #ifdef __LP64__ | 34 | #ifdef __LP64__ |
35 | #define BITS_PER_LONG 64 | 35 | #define BITS_PER_LONG 64 |
36 | #define SHIFT_PER_LONG 6 | ||
36 | #else | 37 | #else |
37 | #define BITS_PER_LONG 32 | 38 | #define BITS_PER_LONG 32 |
39 | #define SHIFT_PER_LONG 5 | ||
38 | #endif | 40 | #endif |
39 | 41 | ||
40 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h index 6a9f0cadff58..e7a620c5c5e6 100644 --- a/include/asm-parisc/unistd.h +++ b/include/asm-parisc/unistd.h | |||
@@ -687,8 +687,8 @@ | |||
687 | #define __NR_shmget (__NR_Linux + 194) | 687 | #define __NR_shmget (__NR_Linux + 194) |
688 | #define __NR_shmctl (__NR_Linux + 195) | 688 | #define __NR_shmctl (__NR_Linux + 195) |
689 | 689 | ||
690 | #define __NR_getpmsg (__NR_Linux + 196) /* some people actually want streams */ | 690 | #define __NR_getpmsg (__NR_Linux + 196) /* Somebody *wants* streams? */ |
691 | #define __NR_putpmsg (__NR_Linux + 197) /* some people actually want streams */ | 691 | #define __NR_putpmsg (__NR_Linux + 197) |
692 | 692 | ||
693 | #define __NR_lstat64 (__NR_Linux + 198) | 693 | #define __NR_lstat64 (__NR_Linux + 198) |
694 | #define __NR_truncate64 (__NR_Linux + 199) | 694 | #define __NR_truncate64 (__NR_Linux + 199) |
@@ -755,8 +755,14 @@ | |||
755 | #define __NR_mbind (__NR_Linux + 260) | 755 | #define __NR_mbind (__NR_Linux + 260) |
756 | #define __NR_get_mempolicy (__NR_Linux + 261) | 756 | #define __NR_get_mempolicy (__NR_Linux + 261) |
757 | #define __NR_set_mempolicy (__NR_Linux + 262) | 757 | #define __NR_set_mempolicy (__NR_Linux + 262) |
758 | #define __NR_vserver (__NR_Linux + 263) | ||
759 | #define __NR_add_key (__NR_Linux + 264) | ||
760 | #define __NR_request_key (__NR_Linux + 265) | ||
761 | #define __NR_keyctl (__NR_Linux + 266) | ||
762 | #define __NR_ioprio_set (__NR_Linux + 267) | ||
763 | #define __NR_ioprio_get (__NR_Linux + 268) | ||
758 | 764 | ||
759 | #define __NR_Linux_syscalls 263 | 765 | #define __NR_Linux_syscalls 269 |
760 | 766 | ||
761 | #define HPUX_GATEWAY_ADDR 0xC0000004 | 767 | #define HPUX_GATEWAY_ADDR 0xC0000004 |
762 | #define LINUX_GATEWAY_ADDR 0x100 | 768 | #define LINUX_GATEWAY_ADDR 0x100 |
@@ -807,10 +813,10 @@ | |||
807 | #define K_INLINE_SYSCALL(name, nr, args...) ({ \ | 813 | #define K_INLINE_SYSCALL(name, nr, args...) ({ \ |
808 | long __sys_res; \ | 814 | long __sys_res; \ |
809 | { \ | 815 | { \ |
810 | register unsigned long __res asm("r28"); \ | 816 | register unsigned long __res __asm__("r28"); \ |
811 | K_LOAD_ARGS_##nr(args) \ | 817 | K_LOAD_ARGS_##nr(args) \ |
812 | /* FIXME: HACK stw/ldw r19 around syscall */ \ | 818 | /* FIXME: HACK stw/ldw r19 around syscall */ \ |
813 | asm volatile( \ | 819 | __asm__ volatile( \ |
814 | K_STW_ASM_PIC \ | 820 | K_STW_ASM_PIC \ |
815 | " ble 0x100(%%sr2, %%r0)\n" \ | 821 | " ble 0x100(%%sr2, %%r0)\n" \ |
816 | " ldi %1, %%r20\n" \ | 822 | " ldi %1, %%r20\n" \ |