aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/atomic.h3
-rw-r--r--include/asm-parisc/bitops.h286
-rw-r--r--include/asm-parisc/cache.h4
-rw-r--r--include/asm-parisc/cacheflush.h17
-rw-r--r--include/asm-parisc/compat.h5
-rw-r--r--include/asm-parisc/io.h113
-rw-r--r--include/asm-parisc/local.h16
-rw-r--r--include/asm-parisc/mmzone.h17
-rw-r--r--include/asm-parisc/page.h61
-rw-r--r--include/asm-parisc/pci.h5
-rw-r--r--include/asm-parisc/pdc.h2
-rw-r--r--include/asm-parisc/pdc_chassis.h5
-rw-r--r--include/asm-parisc/poll.h1
-rw-r--r--include/asm-parisc/spinlock.h16
-rw-r--r--include/asm-parisc/thread_info.h3
15 files changed, 109 insertions, 445 deletions
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 4dc7253ff5d0..403ea97316cf 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -210,6 +210,8 @@ static __inline__ int atomic_read(const atomic_t *v)
210 210
211#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 211#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
212 212
213#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
214
213#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 215#define ATOMIC_INIT(i) ((atomic_t) { (i) })
214 216
215#define smp_mb__before_atomic_dec() smp_mb() 217#define smp_mb__before_atomic_dec() smp_mb()
@@ -267,6 +269,7 @@ atomic64_read(const atomic64_t *v)
267 269
268#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 270#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
269#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) 271#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
272#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
270 273
271#endif /* __LP64__ */ 274#endif /* __LP64__ */
272 275
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index 15d8c2b51584..900561922c4c 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr)
35 _atomic_spin_unlock_irqrestore(addr, flags); 35 _atomic_spin_unlock_irqrestore(addr, flags);
36} 36}
37 37
38static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
39{
40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
41
42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
43}
44
45static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 38static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
46{ 39{
47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); 40 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
@@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
53 _atomic_spin_unlock_irqrestore(addr, flags); 46 _atomic_spin_unlock_irqrestore(addr, flags);
54} 47}
55 48
56static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
57{
58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
59
60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
61}
62
63static __inline__ void change_bit(int nr, volatile unsigned long * addr) 49static __inline__ void change_bit(int nr, volatile unsigned long * addr)
64{ 50{
65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 51 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
71 _atomic_spin_unlock_irqrestore(addr, flags); 57 _atomic_spin_unlock_irqrestore(addr, flags);
72} 58}
73 59
74static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
75{
76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
77
78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
79}
80
81static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 60static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
82{ 61{
83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 62 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
93 return (oldbit & mask) ? 1 : 0; 72 return (oldbit & mask) ? 1 : 0;
94} 73}
95 74
96static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
97{
98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
99 unsigned long oldbit;
100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
101
102 oldbit = *addr;
103 *addr = oldbit | mask;
104
105 return (oldbit & mask) ? 1 : 0;
106}
107
108static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 75static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
109{ 76{
110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 77 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
120 return (oldbit & mask) ? 1 : 0; 87 return (oldbit & mask) ? 1 : 0;
121} 88}
122 89
123static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
124{
125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
127 unsigned long oldbit;
128
129 oldbit = *addr;
130 *addr = oldbit & ~mask;
131
132 return (oldbit & mask) ? 1 : 0;
133}
134
135static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 90static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
136{ 91{
137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 92 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
@@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
147 return (oldbit & mask) ? 1 : 0; 102 return (oldbit & mask) ? 1 : 0;
148} 103}
149 104
150static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) 105#include <asm-generic/bitops/non-atomic.h>
151{
152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 unsigned long oldbit;
155
156 oldbit = *addr;
157 *addr = oldbit ^ mask;
158
159 return (oldbit & mask) ? 1 : 0;
160}
161
162static __inline__ int test_bit(int nr, const volatile unsigned long *address)
163{
164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
166
167 return !!(*addr & mask);
168}
169 106
170#ifdef __KERNEL__ 107#ifdef __KERNEL__
171 108
@@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
219 return ret; 156 return ret;
220} 157}
221 158
222/* Undefined if no bit is zero. */ 159#include <asm-generic/bitops/ffz.h>
223#define ffz(x) __ffs(~x)
224 160
225/* 161/*
226 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) 162 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
@@ -263,155 +199,22 @@ static __inline__ int fls(int x)
263 199
264 return ret; 200 return ret;
265} 201}
266#define fls64(x) generic_fls64(x)
267 202
268/* 203#include <asm-generic/bitops/fls64.h>
269 * hweightN: returns the hamming weight (i.e. the number 204#include <asm-generic/bitops/hweight.h>
270 * of bits set) of a N-bit word 205#include <asm-generic/bitops/sched.h>
271 */
272#define hweight64(x) generic_hweight64(x)
273#define hweight32(x) generic_hweight32(x)
274#define hweight16(x) generic_hweight16(x)
275#define hweight8(x) generic_hweight8(x)
276
277/*
278 * Every architecture must define this function. It's the fastest
279 * way of searching a 140-bit bitmap where the first 100 bits are
280 * unlikely to be set. It's guaranteed that at least one of the 140
281 * bits is cleared.
282 */
283static inline int sched_find_first_bit(const unsigned long *b)
284{
285#ifdef __LP64__
286 if (unlikely(b[0]))
287 return __ffs(b[0]);
288 if (unlikely(b[1]))
289 return __ffs(b[1]) + 64;
290 return __ffs(b[2]) + 128;
291#else
292 if (unlikely(b[0]))
293 return __ffs(b[0]);
294 if (unlikely(b[1]))
295 return __ffs(b[1]) + 32;
296 if (unlikely(b[2]))
297 return __ffs(b[2]) + 64;
298 if (b[3])
299 return __ffs(b[3]) + 96;
300 return __ffs(b[4]) + 128;
301#endif
302}
303 206
304#endif /* __KERNEL__ */ 207#endif /* __KERNEL__ */
305 208
306/* 209#include <asm-generic/bitops/find.h>
307 * This implementation of find_{first,next}_zero_bit was stolen from
308 * Linus' asm-alpha/bitops.h.
309 */
310#define find_first_zero_bit(addr, size) \
311 find_next_zero_bit((addr), (size), 0)
312
313static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
314{
315 const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
316 unsigned long result = offset & ~(BITS_PER_LONG-1);
317 unsigned long tmp;
318
319 if (offset >= size)
320 return size;
321 size -= result;
322 offset &= (BITS_PER_LONG-1);
323 if (offset) {
324 tmp = *(p++);
325 tmp |= ~0UL >> (BITS_PER_LONG-offset);
326 if (size < BITS_PER_LONG)
327 goto found_first;
328 if (~tmp)
329 goto found_middle;
330 size -= BITS_PER_LONG;
331 result += BITS_PER_LONG;
332 }
333 while (size & ~(BITS_PER_LONG -1)) {
334 if (~(tmp = *(p++)))
335 goto found_middle;
336 result += BITS_PER_LONG;
337 size -= BITS_PER_LONG;
338 }
339 if (!size)
340 return result;
341 tmp = *p;
342found_first:
343 tmp |= ~0UL << size;
344found_middle:
345 return result + ffz(tmp);
346}
347
348static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
349{
350 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
351 unsigned long result = offset & ~(BITS_PER_LONG-1);
352 unsigned long tmp;
353
354 if (offset >= size)
355 return size;
356 size -= result;
357 offset &= (BITS_PER_LONG-1);
358 if (offset) {
359 tmp = *(p++);
360 tmp &= (~0UL << offset);
361 if (size < BITS_PER_LONG)
362 goto found_first;
363 if (tmp)
364 goto found_middle;
365 size -= BITS_PER_LONG;
366 result += BITS_PER_LONG;
367 }
368 while (size & ~(BITS_PER_LONG-1)) {
369 if ((tmp = *(p++)))
370 goto found_middle;
371 result += BITS_PER_LONG;
372 size -= BITS_PER_LONG;
373 }
374 if (!size)
375 return result;
376 tmp = *p;
377
378found_first:
379 tmp &= (~0UL >> (BITS_PER_LONG - size));
380 if (tmp == 0UL) /* Are any bits set? */
381 return result + size; /* Nope. */
382found_middle:
383 return result + __ffs(tmp);
384}
385
386/**
387 * find_first_bit - find the first set bit in a memory region
388 * @addr: The address to start the search at
389 * @size: The maximum size to search
390 *
391 * Returns the bit-number of the first set bit, not the number of the byte
392 * containing a bit.
393 */
394#define find_first_bit(addr, size) \
395 find_next_bit((addr), (size), 0)
396
397#define _EXT2_HAVE_ASM_BITOPS_
398 210
399#ifdef __KERNEL__ 211#ifdef __KERNEL__
400/* 212
401 * test_and_{set,clear}_bit guarantee atomicity without 213#include <asm-generic/bitops/ext2-non-atomic.h>
402 * disabling interrupts.
403 */
404 214
405/* '3' is bits per byte */ 215/* '3' is bits per byte */
406#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) 216#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
407 217
408#define ext2_test_bit(nr, addr) \
409 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
410#define ext2_set_bit(nr, addr) \
411 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
412#define ext2_clear_bit(nr, addr) \
413 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
414
415#define ext2_set_bit_atomic(l,nr,addr) \ 218#define ext2_set_bit_atomic(l,nr,addr) \
416 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) 219 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
417#define ext2_clear_bit_atomic(l,nr,addr) \ 220#define ext2_clear_bit_atomic(l,nr,addr) \
@@ -419,77 +222,6 @@ found_middle:
419 222
420#endif /* __KERNEL__ */ 223#endif /* __KERNEL__ */
421 224
422 225#include <asm-generic/bitops/minix-le.h>
423#define ext2_find_first_zero_bit(addr, size) \
424 ext2_find_next_zero_bit((addr), (size), 0)
425
426/* include/linux/byteorder does not support "unsigned long" type */
427static inline unsigned long ext2_swabp(unsigned long * x)
428{
429#ifdef __LP64__
430 return (unsigned long) __swab64p((u64 *) x);
431#else
432 return (unsigned long) __swab32p((u32 *) x);
433#endif
434}
435
436/* include/linux/byteorder doesn't support "unsigned long" type */
437static inline unsigned long ext2_swab(unsigned long y)
438{
439#ifdef __LP64__
440 return (unsigned long) __swab64((u64) y);
441#else
442 return (unsigned long) __swab32((u32) y);
443#endif
444}
445
446static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
447{
448 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
449 unsigned long result = offset & ~(BITS_PER_LONG - 1);
450 unsigned long tmp;
451
452 if (offset >= size)
453 return size;
454 size -= result;
455 offset &= (BITS_PER_LONG - 1UL);
456 if (offset) {
457 tmp = ext2_swabp(p++);
458 tmp |= (~0UL >> (BITS_PER_LONG - offset));
459 if (size < BITS_PER_LONG)
460 goto found_first;
461 if (~tmp)
462 goto found_middle;
463 size -= BITS_PER_LONG;
464 result += BITS_PER_LONG;
465 }
466
467 while (size & ~(BITS_PER_LONG - 1)) {
468 if (~(tmp = *(p++)))
469 goto found_middle_swap;
470 result += BITS_PER_LONG;
471 size -= BITS_PER_LONG;
472 }
473 if (!size)
474 return result;
475 tmp = ext2_swabp(p);
476found_first:
477 tmp |= ~0UL << size;
478 if (tmp == ~0UL) /* Are any bits zero? */
479 return result + size; /* Nope. Skip ffz */
480found_middle:
481 return result + ffz(tmp);
482
483found_middle_swap:
484 return result + ffz(ext2_swab(tmp));
485}
486
487
488/* Bitmap functions for the minix filesystem. */
489#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
490#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
491#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
492#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
493#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
494 226
495#endif /* _PARISC_BITOPS_H */ 227#endif /* _PARISC_BITOPS_H */
diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h
index 93f179f13ce8..c831665473cb 100644
--- a/include/asm-parisc/cache.h
+++ b/include/asm-parisc/cache.h
@@ -29,6 +29,8 @@
29 29
30#define SMP_CACHE_BYTES L1_CACHE_BYTES 30#define SMP_CACHE_BYTES L1_CACHE_BYTES
31 31
32#define __read_mostly __attribute__((__section__(".data.read_mostly")))
33
32extern void flush_data_cache_local(void *); /* flushes local data-cache only */ 34extern void flush_data_cache_local(void *); /* flushes local data-cache only */
33extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ 35extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
34#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
@@ -46,7 +48,7 @@ extern void flush_user_icache_range_asm(unsigned long, unsigned long);
46extern void flush_kernel_icache_range_asm(unsigned long, unsigned long); 48extern void flush_kernel_icache_range_asm(unsigned long, unsigned long);
47extern void flush_user_dcache_range_asm(unsigned long, unsigned long); 49extern void flush_user_dcache_range_asm(unsigned long, unsigned long);
48extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 50extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
49extern void flush_kernel_dcache_page(void *); 51extern void flush_kernel_dcache_page_asm(void *);
50extern void flush_kernel_icache_page(void *); 52extern void flush_kernel_icache_page(void *);
51extern void disable_sr_hashing(void); /* turns off space register hashing */ 53extern void disable_sr_hashing(void); /* turns off space register hashing */
52extern void disable_sr_hashing_asm(int); /* low level support for above */ 54extern void disable_sr_hashing_asm(int); /* low level support for above */
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index c53af9ff41b5..76b6b7d6046a 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -62,7 +62,7 @@ extern void flush_dcache_page(struct page *page);
62#define flush_dcache_mmap_unlock(mapping) \ 62#define flush_dcache_mmap_unlock(mapping) \
63 write_unlock_irq(&(mapping)->tree_lock) 63 write_unlock_irq(&(mapping)->tree_lock)
64 64
65#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0) 65#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
66 66
67#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0) 67#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
68 68
@@ -184,6 +184,21 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
184 184
185} 185}
186 186
187static inline void
188flush_anon_page(struct page *page, unsigned long vmaddr)
189{
190 if (PageAnon(page))
191 flush_user_dcache_page(vmaddr);
192}
193#define ARCH_HAS_FLUSH_ANON_PAGE
194
195static inline void
196flush_kernel_dcache_page(struct page *page)
197{
198 flush_kernel_dcache_page_asm(page_address(page));
199}
200#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
201
187#ifdef CONFIG_DEBUG_RODATA 202#ifdef CONFIG_DEBUG_RODATA
188void mark_rodata_ro(void); 203void mark_rodata_ro(void);
189#endif 204#endif
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index 38b918feead9..289624d8b2d4 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -138,6 +138,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr)
138 return (void __user *)(unsigned long)uptr; 138 return (void __user *)(unsigned long)uptr;
139} 139}
140 140
141static inline compat_uptr_t ptr_to_compat(void __user *uptr)
142{
143 return (u32)(unsigned long)uptr;
144}
145
141static __inline__ void __user *compat_alloc_user_space(long len) 146static __inline__ void __user *compat_alloc_user_space(long len)
142{ 147{
143 struct pt_regs *regs = &current->thread.regs; 148 struct pt_regs *regs = &current->thread.regs;
diff --git a/include/asm-parisc/io.h b/include/asm-parisc/io.h
index 0db00adc942a..29da31194b91 100644
--- a/include/asm-parisc/io.h
+++ b/include/asm-parisc/io.h
@@ -25,35 +25,11 @@ extern unsigned long parisc_vmerge_max_size;
25 * eg dev->hpa or 0xfee00000. 25 * eg dev->hpa or 0xfee00000.
26 */ 26 */
27 27
28#ifdef CONFIG_DEBUG_IOREMAP
29#ifdef CONFIG_64BIT
30#define NYBBLE_SHIFT 60
31#else
32#define NYBBLE_SHIFT 28
33#endif
34extern void gsc_bad_addr(unsigned long addr);
35extern void __raw_bad_addr(const volatile void __iomem *addr);
36#define gsc_check_addr(addr) \
37 if ((addr >> NYBBLE_SHIFT) != 0xf) { \
38 gsc_bad_addr(addr); \
39 addr |= 0xfUL << NYBBLE_SHIFT; \
40 }
41#define __raw_check_addr(addr) \
42 if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \
43 __raw_bad_addr(addr); \
44 addr = (void __iomem *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT));
45#else
46#define gsc_check_addr(addr)
47#define __raw_check_addr(addr)
48#endif
49
50static inline unsigned char gsc_readb(unsigned long addr) 28static inline unsigned char gsc_readb(unsigned long addr)
51{ 29{
52 long flags; 30 long flags;
53 unsigned char ret; 31 unsigned char ret;
54 32
55 gsc_check_addr(addr);
56
57 __asm__ __volatile__( 33 __asm__ __volatile__(
58 " rsm 2,%0\n" 34 " rsm 2,%0\n"
59 " ldbx 0(%2),%1\n" 35 " ldbx 0(%2),%1\n"
@@ -68,8 +44,6 @@ static inline unsigned short gsc_readw(unsigned long addr)
68 long flags; 44 long flags;
69 unsigned short ret; 45 unsigned short ret;
70 46
71 gsc_check_addr(addr);
72
73 __asm__ __volatile__( 47 __asm__ __volatile__(
74 " rsm 2,%0\n" 48 " rsm 2,%0\n"
75 " ldhx 0(%2),%1\n" 49 " ldhx 0(%2),%1\n"
@@ -83,8 +57,6 @@ static inline unsigned int gsc_readl(unsigned long addr)
83{ 57{
84 u32 ret; 58 u32 ret;
85 59
86 gsc_check_addr(addr);
87
88 __asm__ __volatile__( 60 __asm__ __volatile__(
89 " ldwax 0(%1),%0\n" 61 " ldwax 0(%1),%0\n"
90 : "=r" (ret) : "r" (addr) ); 62 : "=r" (ret) : "r" (addr) );
@@ -95,7 +67,6 @@ static inline unsigned int gsc_readl(unsigned long addr)
95static inline unsigned long long gsc_readq(unsigned long addr) 67static inline unsigned long long gsc_readq(unsigned long addr)
96{ 68{
97 unsigned long long ret; 69 unsigned long long ret;
98 gsc_check_addr(addr);
99 70
100#ifdef __LP64__ 71#ifdef __LP64__
101 __asm__ __volatile__( 72 __asm__ __volatile__(
@@ -112,8 +83,6 @@ static inline unsigned long long gsc_readq(unsigned long addr)
112static inline void gsc_writeb(unsigned char val, unsigned long addr) 83static inline void gsc_writeb(unsigned char val, unsigned long addr)
113{ 84{
114 long flags; 85 long flags;
115 gsc_check_addr(addr);
116
117 __asm__ __volatile__( 86 __asm__ __volatile__(
118 " rsm 2,%0\n" 87 " rsm 2,%0\n"
119 " stbs %1,0(%2)\n" 88 " stbs %1,0(%2)\n"
@@ -124,8 +93,6 @@ static inline void gsc_writeb(unsigned char val, unsigned long addr)
124static inline void gsc_writew(unsigned short val, unsigned long addr) 93static inline void gsc_writew(unsigned short val, unsigned long addr)
125{ 94{
126 long flags; 95 long flags;
127 gsc_check_addr(addr);
128
129 __asm__ __volatile__( 96 __asm__ __volatile__(
130 " rsm 2,%0\n" 97 " rsm 2,%0\n"
131 " sths %1,0(%2)\n" 98 " sths %1,0(%2)\n"
@@ -135,8 +102,6 @@ static inline void gsc_writew(unsigned short val, unsigned long addr)
135 102
136static inline void gsc_writel(unsigned int val, unsigned long addr) 103static inline void gsc_writel(unsigned int val, unsigned long addr)
137{ 104{
138 gsc_check_addr(addr);
139
140 __asm__ __volatile__( 105 __asm__ __volatile__(
141 " stwas %0,0(%1)\n" 106 " stwas %0,0(%1)\n"
142 : : "r" (val), "r" (addr) ); 107 : : "r" (val), "r" (addr) );
@@ -144,8 +109,6 @@ static inline void gsc_writel(unsigned int val, unsigned long addr)
144 109
145static inline void gsc_writeq(unsigned long long val, unsigned long addr) 110static inline void gsc_writeq(unsigned long long val, unsigned long addr)
146{ 111{
147 gsc_check_addr(addr);
148
149#ifdef __LP64__ 112#ifdef __LP64__
150 __asm__ __volatile__( 113 __asm__ __volatile__(
151 " stda %0,0(%1)\n" 114 " stda %0,0(%1)\n"
@@ -180,14 +143,7 @@ extern inline void * ioremap_nocache(unsigned long offset, unsigned long size)
180 143
181extern void iounmap(void __iomem *addr); 144extern void iounmap(void __iomem *addr);
182 145
183/*
184 * USE_HPPA_IOREMAP is the magic flag to enable or disable real ioremap()
185 * functionality. It's currently disabled because it may not work on some
186 * machines.
187 */
188#define USE_HPPA_IOREMAP 0
189 146
190#if USE_HPPA_IOREMAP
191static inline unsigned char __raw_readb(const volatile void __iomem *addr) 147static inline unsigned char __raw_readb(const volatile void __iomem *addr)
192{ 148{
193 return (*(volatile unsigned char __force *) (addr)); 149 return (*(volatile unsigned char __force *) (addr));
@@ -221,57 +177,6 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add
221{ 177{
222 *(volatile unsigned long long __force *) addr = b; 178 *(volatile unsigned long long __force *) addr = b;
223} 179}
224#else /* !USE_HPPA_IOREMAP */
225static inline unsigned char __raw_readb(const volatile void __iomem *addr)
226{
227 __raw_check_addr(addr);
228
229 return gsc_readb((unsigned long) addr);
230}
231static inline unsigned short __raw_readw(const volatile void __iomem *addr)
232{
233 __raw_check_addr(addr);
234
235 return gsc_readw((unsigned long) addr);
236}
237static inline unsigned int __raw_readl(const volatile void __iomem *addr)
238{
239 __raw_check_addr(addr);
240
241 return gsc_readl((unsigned long) addr);
242}
243static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
244{
245 __raw_check_addr(addr);
246
247 return gsc_readq((unsigned long) addr);
248}
249
250static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
251{
252 __raw_check_addr(addr);
253
254 gsc_writeb(b, (unsigned long) addr);
255}
256static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
257{
258 __raw_check_addr(addr);
259
260 gsc_writew(b, (unsigned long) addr);
261}
262static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
263{
264 __raw_check_addr(addr);
265
266 gsc_writel(b, (unsigned long) addr);
267}
268static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
269{
270 __raw_check_addr(addr);
271
272 gsc_writeq(b, (unsigned long) addr);
273}
274#endif /* !USE_HPPA_IOREMAP */
275 180
276/* readb can never be const, so use __fswab instead of le*_to_cpu */ 181/* readb can never be const, so use __fswab instead of le*_to_cpu */
277#define readb(addr) __raw_readb(addr) 182#define readb(addr) __raw_readb(addr)
@@ -294,22 +199,6 @@ void memset_io(volatile void __iomem *addr, unsigned char val, int count);
294void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); 199void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
295void memcpy_toio(volatile void __iomem *dst, const void *src, int count); 200void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
296 201
297/* Support old drivers which don't ioremap.
298 * NB this interface is scheduled to disappear in 2.5
299 */
300
301#define __isa_addr(x) (void __iomem *)(F_EXTEND(0xfc000000) | (x))
302#define isa_readb(a) readb(__isa_addr(a))
303#define isa_readw(a) readw(__isa_addr(a))
304#define isa_readl(a) readl(__isa_addr(a))
305#define isa_writeb(b,a) writeb((b), __isa_addr(a))
306#define isa_writew(b,a) writew((b), __isa_addr(a))
307#define isa_writel(b,a) writel((b), __isa_addr(a))
308#define isa_memset_io(a,b,c) memset_io(__isa_addr(a), (b), (c))
309#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a), __isa_addr(b), (c))
310#define isa_memcpy_toio(a,b,c) memcpy_toio(__isa_addr(a), (b), (c))
311
312
313/* 202/*
314 * XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and 203 * XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
315 * just copy it. The net code will then do the checksum later. Presently 204 * just copy it. The net code will then do the checksum later. Presently
@@ -318,8 +207,6 @@ void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
318 207
319#define eth_io_copy_and_sum(skb,src,len,unused) \ 208#define eth_io_copy_and_sum(skb,src,len,unused) \
320 memcpy_fromio((skb)->data,(src),(len)) 209 memcpy_fromio((skb)->data,(src),(len))
321#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
322 isa_memcpy_fromio((skb)->data,(src),(len))
323 210
324/* Port-space IO */ 211/* Port-space IO */
325 212
diff --git a/include/asm-parisc/local.h b/include/asm-parisc/local.h
index 892b3b2c4962..d0f550912755 100644
--- a/include/asm-parisc/local.h
+++ b/include/asm-parisc/local.h
@@ -4,16 +4,16 @@
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6 6
7typedef atomic_t local_t; 7typedef atomic_long_t local_t;
8 8
9#define LOCAL_INIT(i) ATOMIC_INIT(i) 9#define LOCAL_INIT(i) ATOMIC_LONG_INIT(i)
10#define local_read(v) atomic_read(v) 10#define local_read(v) atomic_long_read(v)
11#define local_set(v,i) atomic_set(v,i) 11#define local_set(v,i) atomic_long_set(v,i)
12 12
13#define local_inc(v) atomic_inc(v) 13#define local_inc(v) atomic_long_inc(v)
14#define local_dec(v) atomic_dec(v) 14#define local_dec(v) atomic_long_dec(v)
15#define local_add(i, v) atomic_add(i, v) 15#define local_add(i, v) atomic_long_add(i, v)
16#define local_sub(i, v) atomic_sub(i, v) 16#define local_sub(i, v) atomic_long_sub(i, v)
17 17
18#define __local_inc(v) ((v)->counter++) 18#define __local_inc(v) ((v)->counter++)
19#define __local_dec(v) ((v)->counter--) 19#define __local_dec(v) ((v)->counter--)
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index ae039f4fd711..ceb9b73199d1 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -25,23 +25,6 @@ extern struct node_map_data node_data[];
25 pg_data_t *__pgdat = NODE_DATA(nid); \ 25 pg_data_t *__pgdat = NODE_DATA(nid); \
26 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ 26 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
27}) 27})
28#define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid))
29
30#define pfn_to_page(pfn) \
31({ \
32 unsigned long __pfn = (pfn); \
33 int __node = pfn_to_nid(__pfn); \
34 &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \
35})
36
37#define page_to_pfn(pg) \
38({ \
39 struct page *__page = pg; \
40 struct zone *__zone = page_zone(__page); \
41 BUG_ON(__zone == NULL); \
42 (unsigned long)(__page - __zone->zone_mem_map) \
43 + __zone->zone_start_pfn; \
44})
45 28
46/* We have these possible memory map layouts: 29/* We have these possible memory map layouts:
47 * Astro: 0-3.75, 67.75-68, 4-64 30 * Astro: 0-3.75, 67.75-68, 4-64
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 4a6752b0afed..45e02aa5bf4b 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -26,7 +26,7 @@ static inline void
26copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) 26copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
27{ 27{
28 copy_user_page_asm(vto, vfrom); 28 copy_user_page_asm(vto, vfrom);
29 flush_kernel_dcache_page(vto); 29 flush_kernel_dcache_page_asm(vto);
30 /* XXX: ppc flushes icache too, should we? */ 30 /* XXX: ppc flushes icache too, should we? */
31} 31}
32 32
@@ -40,14 +40,19 @@ clear_user_page(void *page, unsigned long vaddr, struct page *pg)
40/* 40/*
41 * These are used to make use of C type-checking.. 41 * These are used to make use of C type-checking..
42 */ 42 */
43#ifdef __LP64__ 43#define STRICT_MM_TYPECHECKS
44typedef struct { unsigned long pte; } pte_t; 44#ifdef STRICT_MM_TYPECHECKS
45#else 45typedef struct { unsigned long pte;
46typedef struct { 46#if !defined(CONFIG_64BIT)
47 unsigned long pte; 47 unsigned long future_flags;
48 unsigned long flags; 48 /* XXX: it's possible to remove future_flags and change BITS_PER_PTE_ENTRY
49} pte_t; 49 to 2, but then strangely the identical 32bit kernel boots on a
50 c3000(pa20), but not any longer on a 715(pa11).
51 Still investigating... HelgeD.
52 */
50#endif 53#endif
54} pte_t; /* either 32 or 64bit */
55
51/* NOTE: even on 64 bits, these entries are __u32 because we allocate 56/* NOTE: even on 64 bits, these entries are __u32 because we allocate
52 * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ 57 * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
53typedef struct { __u32 pmd; } pmd_t; 58typedef struct { __u32 pmd; } pmd_t;
@@ -55,25 +60,44 @@ typedef struct { __u32 pgd; } pgd_t;
55typedef struct { unsigned long pgprot; } pgprot_t; 60typedef struct { unsigned long pgprot; } pgprot_t;
56 61
57#define pte_val(x) ((x).pte) 62#define pte_val(x) ((x).pte)
58#ifdef __LP64__
59#define pte_flags(x) (*(__u32 *)&((x).pte))
60#else
61#define pte_flags(x) ((x).flags)
62#endif
63
64/* These do not work lvalues, so make sure we don't use them as such. */ 63/* These do not work lvalues, so make sure we don't use them as such. */
65#define pmd_val(x) ((x).pmd + 0) 64#define pmd_val(x) ((x).pmd + 0)
66#define pgd_val(x) ((x).pgd + 0) 65#define pgd_val(x) ((x).pgd + 0)
67#define pgprot_val(x) ((x).pgprot) 66#define pgprot_val(x) ((x).pgprot)
68 67
69#define __pmd_val_set(x,n) (x).pmd = (n)
70#define __pgd_val_set(x,n) (x).pgd = (n)
71
72#define __pte(x) ((pte_t) { (x) } ) 68#define __pte(x) ((pte_t) { (x) } )
73#define __pmd(x) ((pmd_t) { (x) } ) 69#define __pmd(x) ((pmd_t) { (x) } )
74#define __pgd(x) ((pgd_t) { (x) } ) 70#define __pgd(x) ((pgd_t) { (x) } )
75#define __pgprot(x) ((pgprot_t) { (x) } ) 71#define __pgprot(x) ((pgprot_t) { (x) } )
76 72
73#define __pmd_val_set(x,n) (x).pmd = (n)
74#define __pgd_val_set(x,n) (x).pgd = (n)
75
76#else
77/*
78 * .. while these make it easier on the compiler
79 */
80typedef unsigned long pte_t;
81typedef __u32 pmd_t;
82typedef __u32 pgd_t;
83typedef unsigned long pgprot_t;
84
85#define pte_val(x) (x)
86#define pmd_val(x) (x)
87#define pgd_val(x) (x)
88#define pgprot_val(x) (x)
89
90#define __pte(x) (x)
91#define __pmd(x) (x)
92#define __pgd(x) (x)
93#define __pgprot(x) (x)
94
95#define __pmd_val_set(x,n) (x) = (n)
96#define __pgd_val_set(x,n) (x) = (n)
97
98#endif /* STRICT_MM_TYPECHECKS */
99
100
77typedef struct __physmem_range { 101typedef struct __physmem_range {
78 unsigned long start_pfn; 102 unsigned long start_pfn;
79 unsigned long pages; /* PAGE_SIZE pages */ 103 unsigned long pages; /* PAGE_SIZE pages */
@@ -130,8 +154,6 @@ extern int npmem_ranges;
130#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 154#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
131 155
132#ifndef CONFIG_DISCONTIGMEM 156#ifndef CONFIG_DISCONTIGMEM
133#define pfn_to_page(pfn) (mem_map + (pfn))
134#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
135#define pfn_valid(pfn) ((pfn) < max_mapnr) 157#define pfn_valid(pfn) ((pfn) < max_mapnr)
136#endif /* CONFIG_DISCONTIGMEM */ 158#endif /* CONFIG_DISCONTIGMEM */
137 159
@@ -152,6 +174,7 @@ extern int npmem_ranges;
152 174
153#endif /* __KERNEL__ */ 175#endif /* __KERNEL__ */
154 176
177#include <asm-generic/memory_model.h>
155#include <asm-generic/page.h> 178#include <asm-generic/page.h>
156 179
157#endif /* _PARISC_PAGE_H */ 180#endif /* _PARISC_PAGE_H */
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index fe7f6a2f5aa7..77bbafb7f73e 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -289,4 +289,9 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev)
289{ 289{
290} 290}
291 291
292static inline void pcibios_penalize_isa_irq(int irq, int active)
293{
294 /* We don't need to penalize isa irq's */
295}
296
292#endif /* __ASM_PARISC_PCI_H */ 297#endif /* __ASM_PARISC_PCI_H */
diff --git a/include/asm-parisc/pdc.h b/include/asm-parisc/pdc.h
index 8e23e4c674f6..0a3face6c480 100644
--- a/include/asm-parisc/pdc.h
+++ b/include/asm-parisc/pdc.h
@@ -333,7 +333,7 @@ struct pdc_model { /* for PDC_MODEL */
333 unsigned long curr_key; 333 unsigned long curr_key;
334}; 334};
335 335
336/* Values for PDC_MODEL_CAPABILITES non-equivalent virtual aliasing support */ 336/* Values for PDC_MODEL_CAPABILITIES non-equivalent virtual aliasing support */
337 337
338#define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */ 338#define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */
339#define PDC_MODEL_NVA_MASK (3 << 4) 339#define PDC_MODEL_NVA_MASK (3 << 4)
diff --git a/include/asm-parisc/pdc_chassis.h b/include/asm-parisc/pdc_chassis.h
index adac9ac2743f..a609273dc6bf 100644
--- a/include/asm-parisc/pdc_chassis.h
+++ b/include/asm-parisc/pdc_chassis.h
@@ -6,9 +6,8 @@
6 * 6 *
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License, version 2, as
10 * the Free Software Foundation; either version 2, or (at your option) 10 * published by the Free Software Foundation.
11 * any later version.
12 * 11 *
13 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/include/asm-parisc/poll.h b/include/asm-parisc/poll.h
index 1c1da86934cf..20e4d03c74cb 100644
--- a/include/asm-parisc/poll.h
+++ b/include/asm-parisc/poll.h
@@ -16,6 +16,7 @@
16#define POLLWRBAND 0x0200 16#define POLLWRBAND 0x0200
17#define POLLMSG 0x0400 17#define POLLMSG 0x0400
18#define POLLREMOVE 0x1000 18#define POLLREMOVE 0x1000
19#define POLLRDHUP 0x2000
19 20
20struct pollfd { 21struct pollfd {
21 int fd; 22 int fd;
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 16c2ac075fc5..a93960e232cf 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -134,14 +134,22 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
134 return 1; 134 return 1;
135} 135}
136 136
137static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) 137/*
138 * read_can_lock - would read_trylock() succeed?
139 * @lock: the rwlock in question.
140 */
141static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
138{ 142{
139 return rw->counter > 0; 143 return rw->counter >= 0;
140} 144}
141 145
142static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) 146/*
147 * write_can_lock - would write_trylock() succeed?
148 * @lock: the rwlock in question.
149 */
150static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
143{ 151{
144 return rw->counter < 0; 152 return !rw->counter;
145} 153}
146 154
147#endif /* __ASM_SPINLOCK_H */ 155#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index ac32f140b83a..f2f83b04cd8b 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -49,7 +49,8 @@ struct thread_info {
49 49
50#endif /* !__ASSEMBLY */ 50#endif /* !__ASSEMBLY */
51 51
52#define PREEMPT_ACTIVE 0x10000000 52#define PREEMPT_ACTIVE_BIT 28
53#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
53 54
54/* 55/*
55 * thread information flags 56 * thread information flags