aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/bitops.h465
-rw-r--r--include/asm-mips/byteorder.h18
-rw-r--r--include/asm-mips/compat.h13
-rw-r--r--include/asm-mips/cpu-features.h3
-rw-r--r--include/asm-mips/cpu-info.h1
-rw-r--r--include/asm-mips/futex.h6
-rw-r--r--include/asm-mips/hazards.h180
-rw-r--r--include/asm-mips/io.h100
-rw-r--r--include/asm-mips/linkage.h4
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h2
-rw-r--r--include/asm-mips/mach-generic/mangle-port.h36
-rw-r--r--include/asm-mips/mach-ip27/mangle-port.h9
-rw-r--r--include/asm-mips/mach-ip32/mangle-port.h9
-rw-r--r--include/asm-mips/mach-mips/cpu-feature-overrides.h4
-rw-r--r--include/asm-mips/mc146818-time.h33
-rw-r--r--include/asm-mips/mmu_context.h7
-rw-r--r--include/asm-mips/mmzone.h14
-rw-r--r--include/asm-mips/page.h3
-rw-r--r--include/asm-mips/pgtable-32.h2
-rw-r--r--include/asm-mips/poll.h1
-rw-r--r--include/asm-mips/r4kcache.h4
-rw-r--r--include/asm-mips/serial.h85
-rw-r--r--include/asm-mips/sibyte/sb1250.h2
-rw-r--r--include/asm-mips/sibyte/sb1250_scd.h5
-rw-r--r--include/asm-mips/signal.h20
-rw-r--r--include/asm-mips/sn/klconfig.h2
-rw-r--r--include/asm-mips/sn/mapped_kernel.h4
-rw-r--r--include/asm-mips/sn/sn0/hubio.h12
-rw-r--r--include/asm-mips/stackframe.h20
-rw-r--r--include/asm-mips/system.h8
-rw-r--r--include/asm-mips/termbits.h2
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-mips/time.h12
-rw-r--r--include/asm-mips/types.h5
34 files changed, 306 insertions, 787 deletions
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 8e802059fe67..a1728f8c0705 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
105} 105}
106 106
107/* 107/*
108 * __set_bit - Set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
111 *
112 * Unlike set_bit(), this function is non-atomic and may be reordered.
113 * If it's called on the same region of memory simultaneously, the effect
114 * may be that only one operation succeeds.
115 */
116static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
117{
118 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119
120 *m |= 1UL << (nr & SZLONG_MASK);
121}
122
123/*
124 * clear_bit - Clears a bit in memory 108 * clear_bit - Clears a bit in memory
125 * @nr: Bit to clear 109 * @nr: Bit to clear
126 * @addr: Address to start counting from 110 * @addr: Address to start counting from
@@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
169} 153}
170 154
171/* 155/*
172 * __clear_bit - Clears a bit in memory
173 * @nr: Bit to clear
174 * @addr: Address to start counting from
175 *
176 * Unlike clear_bit(), this function is non-atomic and may be reordered.
177 * If it's called on the same region of memory simultaneously, the effect
178 * may be that only one operation succeeds.
179 */
180static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
181{
182 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
183
184 *m &= ~(1UL << (nr & SZLONG_MASK));
185}
186
187/*
188 * change_bit - Toggle a bit in memory 156 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change 157 * @nr: Bit to change
190 * @addr: Address to start counting from 158 * @addr: Address to start counting from
@@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
235} 203}
236 204
237/* 205/*
238 * __change_bit - Toggle a bit in memory
239 * @nr: the bit to change
240 * @addr: the address to start counting from
241 *
242 * Unlike change_bit(), this function is non-atomic and may be reordered.
243 * If it's called on the same region of memory simultaneously, the effect
244 * may be that only one operation succeeds.
245 */
246static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
247{
248 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
249
250 *m ^= 1UL << (nr & SZLONG_MASK);
251}
252
253/*
254 * test_and_set_bit - Set a bit and return its old value 206 * test_and_set_bit - Set a bit and return its old value
255 * @nr: Bit to set 207 * @nr: Bit to set
256 * @addr: Address to count from 208 * @addr: Address to count from
@@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr,
321} 273}
322 274
323/* 275/*
324 * __test_and_set_bit - Set a bit and return its old value
325 * @nr: Bit to set
326 * @addr: Address to count from
327 *
328 * This operation is non-atomic and can be reordered.
329 * If two examples of this operation race, one can appear to succeed
330 * but actually fail. You must protect multiple accesses with a lock.
331 */
332static inline int __test_and_set_bit(unsigned long nr,
333 volatile unsigned long *addr)
334{
335 volatile unsigned long *a = addr;
336 unsigned long mask;
337 int retval;
338
339 a += nr >> SZLONG_LOG;
340 mask = 1UL << (nr & SZLONG_MASK);
341 retval = (mask & *a) != 0;
342 *a |= mask;
343
344 return retval;
345}
346
347/*
348 * test_and_clear_bit - Clear a bit and return its old value 276 * test_and_clear_bit - Clear a bit and return its old value
349 * @nr: Bit to clear 277 * @nr: Bit to clear
350 * @addr: Address to count from 278 * @addr: Address to count from
@@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr,
417} 345}
418 346
419/* 347/*
420 * __test_and_clear_bit - Clear a bit and return its old value
421 * @nr: Bit to clear
422 * @addr: Address to count from
423 *
424 * This operation is non-atomic and can be reordered.
425 * If two examples of this operation race, one can appear to succeed
426 * but actually fail. You must protect multiple accesses with a lock.
427 */
428static inline int __test_and_clear_bit(unsigned long nr,
429 volatile unsigned long * addr)
430{
431 volatile unsigned long *a = addr;
432 unsigned long mask;
433 int retval;
434
435 a += (nr >> SZLONG_LOG);
436 mask = 1UL << (nr & SZLONG_MASK);
437 retval = ((mask & *a) != 0);
438 *a &= ~mask;
439
440 return retval;
441}
442
443/*
444 * test_and_change_bit - Change a bit and return its old value 348 * test_and_change_bit - Change a bit and return its old value
445 * @nr: Bit to change 349 * @nr: Bit to change
446 * @addr: Address to count from 350 * @addr: Address to count from
@@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr,
509 } 413 }
510} 414}
511 415
512/*
513 * __test_and_change_bit - Change a bit and return its old value
514 * @nr: Bit to change
515 * @addr: Address to count from
516 *
517 * This operation is non-atomic and can be reordered.
518 * If two examples of this operation race, one can appear to succeed
519 * but actually fail. You must protect multiple accesses with a lock.
520 */
521static inline int __test_and_change_bit(unsigned long nr,
522 volatile unsigned long *addr)
523{
524 volatile unsigned long *a = addr;
525 unsigned long mask;
526 int retval;
527
528 a += (nr >> SZLONG_LOG);
529 mask = 1UL << (nr & SZLONG_MASK);
530 retval = ((mask & *a) != 0);
531 *a ^= mask;
532
533 return retval;
534}
535
536#undef __bi_flags 416#undef __bi_flags
537#undef __bi_local_irq_save 417#undef __bi_local_irq_save
538#undef __bi_local_irq_restore 418#undef __bi_local_irq_restore
539 419
540/* 420#include <asm-generic/bitops/non-atomic.h>
541 * test_bit - Determine whether a bit is set
542 * @nr: bit number to test
543 * @addr: Address to start counting from
544 */
545static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
546{
547 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
548}
549 421
550/* 422/*
551 * Return the bit position (0..63) of the most significant 1 bit in a word 423 * Return the bit position (0..63) of the most significant 1 bit in a word
@@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x)
580 return 63 - lz; 452 return 63 - lz;
581} 453}
582 454
455#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
456
583/* 457/*
584 * __ffs - find first bit in word. 458 * __ffs - find first bit in word.
585 * @word: The word to search 459 * @word: The word to search
@@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x)
589 */ 463 */
590static inline unsigned long __ffs(unsigned long word) 464static inline unsigned long __ffs(unsigned long word)
591{ 465{
592#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 return __ilog2(word & -word); 466 return __ilog2(word & -word);
594#else
595 int b = 0, s;
596
597#ifdef CONFIG_32BIT
598 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
599 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
600 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
601 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
602 s = 1; if (word << 31 != 0) s = 0; b += s;
603
604 return b;
605#endif
606#ifdef CONFIG_64BIT
607 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
608 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
609 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
610 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
611 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
612 s = 1; if (word << 63 != 0) s = 0; b += s;
613
614 return b;
615#endif
616#endif
617} 467}
618 468
619/* 469/*
@@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word)
652 */ 502 */
653static inline unsigned long fls(unsigned long word) 503static inline unsigned long fls(unsigned long word)
654{ 504{
655#ifdef CONFIG_32BIT
656#ifdef CONFIG_CPU_MIPS32 505#ifdef CONFIG_CPU_MIPS32
657 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); 506 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
658 507
659 return 32 - word; 508 return 32 - word;
660#else
661 {
662 int r = 32, s;
663
664 if (word == 0)
665 return 0;
666
667 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
668 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
669 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
670 s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
671 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
672
673 return r;
674 }
675#endif 509#endif
676#endif /* CONFIG_32BIT */
677 510
678#ifdef CONFIG_64BIT
679#ifdef CONFIG_CPU_MIPS64 511#ifdef CONFIG_CPU_MIPS64
680
681 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); 512 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
682 513
683 return 64 - word; 514 return 64 - word;
684#else
685 {
686 int r = 64, s;
687
688 if (word == 0)
689 return 0;
690
691 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
692 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
693 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
694 s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
695 s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
696 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
697
698 return r;
699 }
700#endif 515#endif
701#endif /* CONFIG_64BIT */
702} 516}
703 517
704#define fls64(x) generic_fls64(x) 518#else
705
706/*
707 * find_next_zero_bit - find the first zero bit in a memory region
708 * @addr: The address to base the search on
709 * @offset: The bitnumber to start searching at
710 * @size: The maximum size to search
711 */
712static inline unsigned long find_next_zero_bit(const unsigned long *addr,
713 unsigned long size, unsigned long offset)
714{
715 const unsigned long *p = addr + (offset >> SZLONG_LOG);
716 unsigned long result = offset & ~SZLONG_MASK;
717 unsigned long tmp;
718
719 if (offset >= size)
720 return size;
721 size -= result;
722 offset &= SZLONG_MASK;
723 if (offset) {
724 tmp = *(p++);
725 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
726 if (size < _MIPS_SZLONG)
727 goto found_first;
728 if (~tmp)
729 goto found_middle;
730 size -= _MIPS_SZLONG;
731 result += _MIPS_SZLONG;
732 }
733 while (size & ~SZLONG_MASK) {
734 if (~(tmp = *(p++)))
735 goto found_middle;
736 result += _MIPS_SZLONG;
737 size -= _MIPS_SZLONG;
738 }
739 if (!size)
740 return result;
741 tmp = *p;
742
743found_first:
744 tmp |= ~0UL << size;
745 if (tmp == ~0UL) /* Are any bits zero? */
746 return result + size; /* Nope. */
747found_middle:
748 return result + ffz(tmp);
749}
750 519
751#define find_first_zero_bit(addr, size) \ 520#include <asm-generic/bitops/__ffs.h>
752 find_next_zero_bit((addr), (size), 0) 521#include <asm-generic/bitops/ffs.h>
522#include <asm-generic/bitops/ffz.h>
523#include <asm-generic/bitops/fls.h>
753 524
754/* 525#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
755 * find_next_bit - find the next set bit in a memory region
756 * @addr: The address to base the search on
757 * @offset: The bitnumber to start searching at
758 * @size: The maximum size to search
759 */
760static inline unsigned long find_next_bit(const unsigned long *addr,
761 unsigned long size, unsigned long offset)
762{
763 const unsigned long *p = addr + (offset >> SZLONG_LOG);
764 unsigned long result = offset & ~SZLONG_MASK;
765 unsigned long tmp;
766
767 if (offset >= size)
768 return size;
769 size -= result;
770 offset &= SZLONG_MASK;
771 if (offset) {
772 tmp = *(p++);
773 tmp &= ~0UL << offset;
774 if (size < _MIPS_SZLONG)
775 goto found_first;
776 if (tmp)
777 goto found_middle;
778 size -= _MIPS_SZLONG;
779 result += _MIPS_SZLONG;
780 }
781 while (size & ~SZLONG_MASK) {
782 if ((tmp = *(p++)))
783 goto found_middle;
784 result += _MIPS_SZLONG;
785 size -= _MIPS_SZLONG;
786 }
787 if (!size)
788 return result;
789 tmp = *p;
790
791found_first:
792 tmp &= ~0UL >> (_MIPS_SZLONG - size);
793 if (tmp == 0UL) /* Are any bits set? */
794 return result + size; /* Nope. */
795found_middle:
796 return result + __ffs(tmp);
797}
798 526
799/* 527#include <asm-generic/bitops/fls64.h>
800 * find_first_bit - find the first set bit in a memory region 528#include <asm-generic/bitops/find.h>
801 * @addr: The address to start the search at
802 * @size: The maximum size to search
803 *
804 * Returns the bit-number of the first set bit, not the number of the byte
805 * containing a bit.
806 */
807#define find_first_bit(addr, size) \
808 find_next_bit((addr), (size), 0)
809 529
810#ifdef __KERNEL__ 530#ifdef __KERNEL__
811 531
812/* 532#include <asm-generic/bitops/sched.h>
813 * Every architecture must define this function. It's the fastest 533#include <asm-generic/bitops/hweight.h>
814 * way of searching a 140-bit bitmap where the first 100 bits are 534#include <asm-generic/bitops/ext2-non-atomic.h>
815 * unlikely to be set. It's guaranteed that at least one of the 140 535#include <asm-generic/bitops/ext2-atomic.h>
816 * bits is cleared. 536#include <asm-generic/bitops/minix.h>
817 */
818static inline int sched_find_first_bit(const unsigned long *b)
819{
820#ifdef CONFIG_32BIT
821 if (unlikely(b[0]))
822 return __ffs(b[0]);
823 if (unlikely(b[1]))
824 return __ffs(b[1]) + 32;
825 if (unlikely(b[2]))
826 return __ffs(b[2]) + 64;
827 if (b[3])
828 return __ffs(b[3]) + 96;
829 return __ffs(b[4]) + 128;
830#endif
831#ifdef CONFIG_64BIT
832 if (unlikely(b[0]))
833 return __ffs(b[0]);
834 if (unlikely(b[1]))
835 return __ffs(b[1]) + 64;
836 return __ffs(b[2]) + 128;
837#endif
838}
839
840/*
841 * hweightN - returns the hamming weight of a N-bit word
842 * @x: the word to weigh
843 *
844 * The Hamming Weight of a number is the total number of bits set in it.
845 */
846
847#define hweight64(x) generic_hweight64(x)
848#define hweight32(x) generic_hweight32(x)
849#define hweight16(x) generic_hweight16(x)
850#define hweight8(x) generic_hweight8(x)
851
852static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
853{
854 unsigned char *ADDR = (unsigned char *) addr;
855 int mask, retval;
856
857 ADDR += nr >> 3;
858 mask = 1 << (nr & 0x07);
859 retval = (mask & *ADDR) != 0;
860 *ADDR |= mask;
861
862 return retval;
863}
864
865static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
866{
867 unsigned char *ADDR = (unsigned char *) addr;
868 int mask, retval;
869
870 ADDR += nr >> 3;
871 mask = 1 << (nr & 0x07);
872 retval = (mask & *ADDR) != 0;
873 *ADDR &= ~mask;
874
875 return retval;
876}
877
878static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
879{
880 const unsigned char *ADDR = (const unsigned char *) addr;
881 int mask;
882
883 ADDR += nr >> 3;
884 mask = 1 << (nr & 0x07);
885
886 return ((mask & *ADDR) != 0);
887}
888
889static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
890 unsigned long size, unsigned long offset)
891{
892 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
893 unsigned long result = offset & ~SZLONG_MASK;
894 unsigned long tmp;
895
896 if (offset >= size)
897 return size;
898 size -= result;
899 offset &= SZLONG_MASK;
900 if (offset) {
901 tmp = cpu_to_lelongp(p++);
902 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
903 if (size < _MIPS_SZLONG)
904 goto found_first;
905 if (~tmp)
906 goto found_middle;
907 size -= _MIPS_SZLONG;
908 result += _MIPS_SZLONG;
909 }
910 while (size & ~SZLONG_MASK) {
911 if (~(tmp = cpu_to_lelongp(p++)))
912 goto found_middle;
913 result += _MIPS_SZLONG;
914 size -= _MIPS_SZLONG;
915 }
916 if (!size)
917 return result;
918 tmp = cpu_to_lelongp(p);
919
920found_first:
921 tmp |= ~0UL << size;
922 if (tmp == ~0UL) /* Are any bits zero? */
923 return result + size; /* Nope. */
924
925found_middle:
926 return result + ffz(tmp);
927}
928
929#define find_first_zero_le_bit(addr, size) \
930 find_next_zero_le_bit((addr), (size), 0)
931
932#define ext2_set_bit(nr,addr) \
933 __test_and_set_le_bit((nr),(unsigned long*)addr)
934#define ext2_clear_bit(nr, addr) \
935 __test_and_clear_le_bit((nr),(unsigned long*)addr)
936 #define ext2_set_bit_atomic(lock, nr, addr) \
937({ \
938 int ret; \
939 spin_lock(lock); \
940 ret = ext2_set_bit((nr), (addr)); \
941 spin_unlock(lock); \
942 ret; \
943})
944
945#define ext2_clear_bit_atomic(lock, nr, addr) \
946({ \
947 int ret; \
948 spin_lock(lock); \
949 ret = ext2_clear_bit((nr), (addr)); \
950 spin_unlock(lock); \
951 ret; \
952})
953#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
954#define ext2_find_first_zero_bit(addr, size) \
955 find_first_zero_le_bit((unsigned long*)addr, size)
956#define ext2_find_next_zero_bit(addr, size, off) \
957 find_next_zero_le_bit((unsigned long*)addr, size, off)
958
959/*
960 * Bitmap functions for the minix filesystem.
961 *
962 * FIXME: These assume that Minix uses the native byte/bitorder.
963 * This limits the Minix filesystem's value for data exchange very much.
964 */
965#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
966#define minix_set_bit(nr,addr) set_bit(nr,addr)
967#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
968#define minix_test_bit(nr,addr) test_bit(nr,addr)
969#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
970 537
971#endif /* __KERNEL__ */ 538#endif /* __KERNEL__ */
972 539
diff --git a/include/asm-mips/byteorder.h b/include/asm-mips/byteorder.h
index 584f8128fffd..aefc02f16fd8 100644
--- a/include/asm-mips/byteorder.h
+++ b/include/asm-mips/byteorder.h
@@ -39,6 +39,24 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
39} 39}
40#define __arch__swab32(x) ___arch__swab32(x) 40#define __arch__swab32(x) ___arch__swab32(x)
41 41
42#ifdef CONFIG_CPU_MIPS64_R2
43
44static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
45{
46 __asm__(
47 " dsbh %0, %1 \n"
48 " dshd %0, %0 \n"
49 " drotr %0, %0, 32 \n"
50 : "=r" (x)
51 : "r" (x));
52
53 return x;
54}
55
56#define __arch__swab64(x) ___arch__swab64(x)
57
58#endif /* CONFIG_CPU_MIPS64_R2 */
59
42#endif /* CONFIG_CPU_MIPSR2 */ 60#endif /* CONFIG_CPU_MIPSR2 */
43 61
44#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) 62#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 35d2604fe69c..986511db54a6 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -128,17 +128,22 @@ typedef u32 compat_sigset_word;
128 */ 128 */
129typedef u32 compat_uptr_t; 129typedef u32 compat_uptr_t;
130 130
131static inline void *compat_ptr(compat_uptr_t uptr) 131static inline void __user *compat_ptr(compat_uptr_t uptr)
132{ 132{
133 return (void *)(long)uptr; 133 return (void __user *)(long)uptr;
134} 134}
135 135
136static inline void *compat_alloc_user_space(long len) 136static inline compat_uptr_t ptr_to_compat(void __user *uptr)
137{
138 return (u32)(unsigned long)uptr;
139}
140
141static inline void __user *compat_alloc_user_space(long len)
137{ 142{
138 struct pt_regs *regs = (struct pt_regs *) 143 struct pt_regs *regs = (struct pt_regs *)
139 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; 144 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
140 145
141 return (void *) (regs->regs[29] - len); 146 return (void __user *) (regs->regs[29] - len);
142} 147}
143#if defined (__MIPSEL__) 148#if defined (__MIPSEL__)
144#define __COMPAT_ENDIAN_SWAP__ 1 149#define __COMPAT_ENDIAN_SWAP__ 1
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h
index 78c9cc2735d5..3f2b6d9ac45e 100644
--- a/include/asm-mips/cpu-features.h
+++ b/include/asm-mips/cpu-features.h
@@ -96,6 +96,9 @@
96#ifndef cpu_has_ic_fills_f_dc 96#ifndef cpu_has_ic_fills_f_dc
97#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC) 97#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
98#endif 98#endif
99#ifndef cpu_has_pindexed_dcache
100#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
101#endif
99 102
100/* 103/*
101 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors 104 * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h
index d5cf519f8fcc..140be1c67da7 100644
--- a/include/asm-mips/cpu-info.h
+++ b/include/asm-mips/cpu-info.h
@@ -39,6 +39,7 @@ struct cache_desc {
39#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */ 39#define MIPS_CACHE_ALIASES 0x00000004 /* Cache could have aliases */
40#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */ 40#define MIPS_CACHE_IC_F_DC 0x00000008 /* Ic can refill from D-cache */
41#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */ 41#define MIPS_IC_SNOOPS_REMOTE 0x00000010 /* Ic snoops remote stores */
42#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
42 43
43struct cpuinfo_mips { 44struct cpuinfo_mips {
44 unsigned long udelay_val; 45 unsigned long udelay_val;
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 2454c44a8f54..a554089991f2 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -99,5 +99,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
99 return ret; 99 return ret;
100} 100}
101 101
102static inline int
103futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
104{
105 return -ENOSYS;
106}
107
102#endif 108#endif
103#endif 109#endif
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 6111a0ce58c4..feb29a793888 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -3,7 +3,9 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2003, 2004 Ralf Baechle 6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
7 */ 9 */
8#ifndef _ASM_HAZARDS_H 10#ifndef _ASM_HAZARDS_H
9#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
@@ -74,8 +76,7 @@
74#define irq_disable_hazard 76#define irq_disable_hazard
75 _ehb 77 _ehb
76 78
77#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ 79#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
78 defined(CONFIG_CPU_SB1)
79 80
80/* 81/*
81 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 82 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
@@ -99,13 +100,13 @@
99#else /* __ASSEMBLY__ */ 100#else /* __ASSEMBLY__ */
100 101
101__asm__( 102__asm__(
102 " .macro _ssnop \n\t" 103 " .macro _ssnop \n"
103 " sll $0, $0, 1 \n\t" 104 " sll $0, $0, 1 \n"
104 " .endm \n\t" 105 " .endm \n"
105 " \n\t" 106 " \n"
106 " .macro _ehb \n\t" 107 " .macro _ehb \n"
107 " sll $0, $0, 3 \n\t" 108 " sll $0, $0, 3 \n"
108 " .endm \n\t"); 109 " .endm \n");
109 110
110#ifdef CONFIG_CPU_RM9000 111#ifdef CONFIG_CPU_RM9000
111 112
@@ -117,17 +118,21 @@ __asm__(
117 118
118#define mtc0_tlbw_hazard() \ 119#define mtc0_tlbw_hazard() \
119 __asm__ __volatile__( \ 120 __asm__ __volatile__( \
120 ".set\tmips32\n\t" \ 121 " .set mips32 \n" \
121 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 122 " _ssnop \n" \
122 ".set\tmips0") 123 " _ssnop \n" \
124 " _ssnop \n" \
125 " _ssnop \n" \
126 " .set mips0 \n")
123 127
124#define tlbw_use_hazard() \ 128#define tlbw_use_hazard() \
125 __asm__ __volatile__( \ 129 __asm__ __volatile__( \
126 ".set\tmips32\n\t" \ 130 " .set mips32 \n" \
127 "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ 131 " _ssnop \n" \
128 ".set\tmips0") 132 " _ssnop \n" \
129 133 " _ssnop \n" \
130#define back_to_back_c0_hazard() do { } while (0) 134 " _ssnop \n" \
135 " .set mips0 \n")
131 136
132#else 137#else
133 138
@@ -136,15 +141,25 @@ __asm__(
136 */ 141 */
137#define mtc0_tlbw_hazard() \ 142#define mtc0_tlbw_hazard() \
138 __asm__ __volatile__( \ 143 __asm__ __volatile__( \
139 ".set noreorder\n\t" \ 144 " .set noreorder \n" \
140 "nop; nop; nop; nop; nop; nop;\n\t" \ 145 " nop \n" \
141 ".set reorder\n\t") 146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " nop \n" \
151 " .set reorder \n")
142 152
143#define tlbw_use_hazard() \ 153#define tlbw_use_hazard() \
144 __asm__ __volatile__( \ 154 __asm__ __volatile__( \
145 ".set noreorder\n\t" \ 155 " .set noreorder \n" \
146 "nop; nop; nop; nop; nop; nop;\n\t" \ 156 " nop \n" \
147 ".set reorder\n\t") 157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " nop \n" \
162 " .set reorder \n")
148 163
149#endif 164#endif
150 165
@@ -156,49 +171,26 @@ __asm__(
156 171
157#ifdef CONFIG_CPU_MIPSR2 172#ifdef CONFIG_CPU_MIPSR2
158 173
159__asm__( 174__asm__(" .macro irq_enable_hazard \n"
160 " .macro\tirq_enable_hazard \n\t" 175 " _ehb \n"
161 " _ehb \n\t" 176 " .endm \n"
162 " .endm \n\t" 177 " \n"
163 " \n\t" 178 " .macro irq_disable_hazard \n"
164 " .macro\tirq_disable_hazard \n\t" 179 " _ehb \n"
165 " _ehb \n\t" 180 " .endm \n");
166 " .endm \n\t"
167 " \n\t"
168 " .macro\tback_to_back_c0_hazard \n\t"
169 " _ehb \n\t"
170 " .endm");
171
172#define irq_enable_hazard() \
173 __asm__ __volatile__( \
174 "irq_enable_hazard")
175 181
176#define irq_disable_hazard() \ 182#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
177 __asm__ __volatile__( \
178 "irq_disable_hazard")
179
180#define back_to_back_c0_hazard() \
181 __asm__ __volatile__( \
182 "back_to_back_c0_hazard")
183
184#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
185 defined(CONFIG_CPU_SB1)
186 183
187/* 184/*
188 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 185 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
189 */ 186 */
190 187
191__asm__( 188__asm__(
192 " .macro\tirq_enable_hazard \n\t" 189 " .macro irq_enable_hazard \n"
193 " .endm \n\t" 190 " .endm \n"
194 " \n\t" 191 " \n"
195 " .macro\tirq_disable_hazard \n\t" 192 " .macro irq_disable_hazard \n"
196 " .endm"); 193 " .endm \n");
197
198#define irq_enable_hazard() do { } while (0)
199#define irq_disable_hazard() do { } while (0)
200
201#define back_to_back_c0_hazard() do { } while (0)
202 194
203#else 195#else
204 196
@@ -209,29 +201,63 @@ __asm__(
209 */ 201 */
210 202
211__asm__( 203__asm__(
212 " # \n\t" 204 " # \n"
213 " # There is a hazard but we do not care \n\t" 205 " # There is a hazard but we do not care \n"
214 " # \n\t" 206 " # \n"
215 " .macro\tirq_enable_hazard \n\t" 207 " .macro\tirq_enable_hazard \n"
216 " .endm \n\t" 208 " .endm \n"
217 " \n\t" 209 " \n"
218 " .macro\tirq_disable_hazard \n\t" 210 " .macro\tirq_disable_hazard \n"
219 " _ssnop; _ssnop; _ssnop \n\t" 211 " _ssnop \n"
220 " .endm"); 212 " _ssnop \n"
213 " _ssnop \n"
214 " .endm \n");
221 215
222#define irq_enable_hazard() do { } while (0) 216#endif
217
218#define irq_enable_hazard() \
219 __asm__ __volatile__("irq_enable_hazard")
223#define irq_disable_hazard() \ 220#define irq_disable_hazard() \
224 __asm__ __volatile__( \ 221 __asm__ __volatile__("irq_disable_hazard")
225 "irq_disable_hazard")
226 222
227#define back_to_back_c0_hazard() \ 223
228 __asm__ __volatile__( \ 224/*
229 " .set noreorder \n" \ 225 * Back-to-back hazards -
230 " nop; nop; nop \n" \ 226 *
231 " .set reorder \n") 227 * What is needed to separate a move to cp0 from a subsequent read from the
228 * same cp0 register?
229 */
230#ifdef CONFIG_CPU_MIPSR2
231
232__asm__(" .macro back_to_back_c0_hazard \n"
233 " _ehb \n"
234 " .endm \n");
235
236#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
237 defined(CONFIG_CPU_SB1)
238
239__asm__(" .macro back_to_back_c0_hazard \n"
240 " .endm \n");
241
242#else
243
244__asm__(" .macro back_to_back_c0_hazard \n"
245 " .set noreorder \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " _ssnop \n"
249 " .set reorder \n"
250 " .endm");
232 251
233#endif 252#endif
234 253
254#define back_to_back_c0_hazard() \
255 __asm__ __volatile__("back_to_back_c0_hazard")
256
257
258/*
259 * Instruction execution hazard
260 */
235#ifdef CONFIG_CPU_MIPSR2 261#ifdef CONFIG_CPU_MIPSR2
236/* 262/*
237 * gcc has a tradition of misscompiling the previous construct using the 263 * gcc has a tradition of misscompiling the previous construct using the
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index 8c011aa61afa..6b17eb9d79a5 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -4,7 +4,7 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH 6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com> 10 * Author: Maciej W. Rozycki <macro@mips.com>
@@ -40,56 +40,13 @@
40 * hardware. An example use would be for flash memory that's used for 40 * hardware. An example use would be for flash memory that's used for
41 * execute in place. 41 * execute in place.
42 */ 42 */
43# define __raw_ioswabb(x) (x) 43# define __raw_ioswabb(a,x) (x)
44# define __raw_ioswabw(x) (x) 44# define __raw_ioswabw(a,x) (x)
45# define __raw_ioswabl(x) (x) 45# define __raw_ioswabl(a,x) (x)
46# define __raw_ioswabq(x) (x) 46# define __raw_ioswabq(a,x) (x)
47# define ____raw_ioswabq(x) (x) 47# define ____raw_ioswabq(a,x) (x)
48 48
49/* 49/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
50 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
51 * less sane hardware forces software to fiddle with this...
52 *
53 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
54 * you can't have the numerical value of data and byte addresses within
55 * multibyte quantities both preserved at the same time. Hence two
56 * variations of functions: non-prefixed ones that preserve the value
57 * and prefixed ones that preserve byte addresses. The latters are
58 * typically used for moving raw data between a peripheral and memory (cf.
59 * string I/O functions), hence the "__mem_" prefix.
60 */
61#if defined(CONFIG_SWAP_IO_SPACE)
62
63# define ioswabb(x) (x)
64# define __mem_ioswabb(x) (x)
65# ifdef CONFIG_SGI_IP22
66/*
67 * IP22 seems braindead enough to swap 16bits values in hardware, but
68 * not 32bits. Go figure... Can't tell without documentation.
69 */
70# define ioswabw(x) (x)
71# define __mem_ioswabw(x) le16_to_cpu(x)
72# else
73# define ioswabw(x) le16_to_cpu(x)
74# define __mem_ioswabw(x) (x)
75# endif
76# define ioswabl(x) le32_to_cpu(x)
77# define __mem_ioswabl(x) (x)
78# define ioswabq(x) le64_to_cpu(x)
79# define __mem_ioswabq(x) (x)
80
81#else
82
83# define ioswabb(x) (x)
84# define __mem_ioswabb(x) (x)
85# define ioswabw(x) (x)
86# define __mem_ioswabw(x) cpu_to_le16(x)
87# define ioswabl(x) (x)
88# define __mem_ioswabl(x) cpu_to_le32(x)
89# define ioswabq(x) (x)
90# define __mem_ioswabq(x) cpu_to_le32(x)
91
92#endif
93 50
94#define IO_SPACE_LIMIT 0xffff 51#define IO_SPACE_LIMIT 0xffff
95 52
@@ -103,8 +60,20 @@
103 */ 60 */
104extern const unsigned long mips_io_port_base; 61extern const unsigned long mips_io_port_base;
105 62
106#define set_io_port_base(base) \ 63/*
107 do { * (unsigned long *) &mips_io_port_base = (base); } while (0) 64 * Gcc will generate code to load the value of mips_io_port_base after each
65 * function call which may be fairly wasteful in some cases. So we don't
66 * play quite by the book. We tell gcc mips_io_port_base is a long variable
67 * which solves the code generation issue. Now we need to violate the
68 * aliasing rules a little to make initialization possible and finally we
69 * will need the barrier() to fight side effects of the aliasing chat.
70 * This trickery will eventually collapse under gcc's optimizer. Oh well.
71 */
72static inline void set_io_port_base(unsigned long base)
73{
74 * (unsigned long *) &mips_io_port_base = base;
75 barrier();
76}
108 77
109/* 78/*
110 * Thanks to James van Artsdalen for a better timing-fix than 79 * Thanks to James van Artsdalen for a better timing-fix than
@@ -334,7 +303,7 @@ static inline void pfx##write##bwlq(type val, \
334 \ 303 \
335 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 304 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
336 \ 305 \
337 __val = pfx##ioswab##bwlq(val); \ 306 __val = pfx##ioswab##bwlq(__mem, val); \
338 \ 307 \
339 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 308 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
340 *__mem = __val; \ 309 *__mem = __val; \
@@ -389,7 +358,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
389 BUG(); \ 358 BUG(); \
390 } \ 359 } \
391 \ 360 \
392 return pfx##ioswab##bwlq(__val); \ 361 return pfx##ioswab##bwlq(__mem, __val); \
393} 362}
394 363
395#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 364#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
@@ -399,10 +368,9 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
399 volatile type *__addr; \ 368 volatile type *__addr; \
400 type __val; \ 369 type __val; \
401 \ 370 \
402 port = __swizzle_addr_##bwlq(port); \ 371 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
403 __addr = (void *)(mips_io_port_base + port); \
404 \ 372 \
405 __val = pfx##ioswab##bwlq(val); \ 373 __val = pfx##ioswab##bwlq(__addr, val); \
406 \ 374 \
407 /* Really, we want this to be atomic */ \ 375 /* Really, we want this to be atomic */ \
408 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 376 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
@@ -416,15 +384,14 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
416 volatile type *__addr; \ 384 volatile type *__addr; \
417 type __val; \ 385 type __val; \
418 \ 386 \
419 port = __swizzle_addr_##bwlq(port); \ 387 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
420 __addr = (void *)(mips_io_port_base + port); \
421 \ 388 \
422 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 389 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
423 \ 390 \
424 __val = *__addr; \ 391 __val = *__addr; \
425 slow; \ 392 slow; \
426 \ 393 \
427 return pfx##ioswab##bwlq(__val); \ 394 return pfx##ioswab##bwlq(__addr, __val); \
428} 395}
429 396
430#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 397#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
@@ -589,24 +556,11 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
589 */ 556 */
590#define __ISA_IO_base ((char *)(isa_slot_offset)) 557#define __ISA_IO_base ((char *)(isa_slot_offset))
591 558
592#define isa_readb(a) readb(__ISA_IO_base + (a))
593#define isa_readw(a) readw(__ISA_IO_base + (a))
594#define isa_readl(a) readl(__ISA_IO_base + (a))
595#define isa_readq(a) readq(__ISA_IO_base + (a))
596#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
597#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
598#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
599#define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a))
600#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
601#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
602#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
603
604/* 559/*
605 * We don't have csum_partial_copy_fromio() yet, so we cheat here and 560 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
606 * just copy it. The net code will then do the checksum later. 561 * just copy it. The net code will then do the checksum later.
607 */ 562 */
608#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) 563#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
609#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
610 564
611/* 565/*
612 * check_signature - find BIOS signatures 566 * check_signature - find BIOS signatures
diff --git a/include/asm-mips/linkage.h b/include/asm-mips/linkage.h
index 291c2d01c44f..b6185d3cfe68 100644
--- a/include/asm-mips/linkage.h
+++ b/include/asm-mips/linkage.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H 2#define __ASM_LINKAGE_H
3 3
4/* Nothing to see here... */ 4#ifdef __ASSEMBLY__
5#include <asm/asm.h>
6#endif
5 7
6#endif 8#endif
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 78e1df2095fb..b3c5ecbec03c 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -113,4 +113,6 @@ do { \
113# define COBALT_KEY_SELECT (1 << 7) 113# define COBALT_KEY_SELECT (1 << 7)
114# define COBALT_KEY_MASK 0xfe 114# define COBALT_KEY_MASK 0xfe
115 115
116#define COBALT_UART ((volatile unsigned char *) CKSEG1ADDR(0x1c800000))
117
116#endif /* __ASM_COBALT_H */ 118#endif /* __ASM_COBALT_H */
diff --git a/include/asm-mips/mach-generic/mangle-port.h b/include/asm-mips/mach-generic/mangle-port.h
index 4a98d83b8ec7..6e1b0c075de7 100644
--- a/include/asm-mips/mach-generic/mangle-port.h
+++ b/include/asm-mips/mach-generic/mangle-port.h
@@ -13,4 +13,40 @@
13#define __swizzle_addr_l(port) (port) 13#define __swizzle_addr_l(port) (port)
14#define __swizzle_addr_q(port) (port) 14#define __swizzle_addr_q(port) (port)
15 15
16/*
17 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
18 * less sane hardware forces software to fiddle with this...
19 *
20 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
21 * you can't have the numerical value of data and byte addresses within
22 * multibyte quantities both preserved at the same time. Hence two
23 * variations of functions: non-prefixed ones that preserve the value
24 * and prefixed ones that preserve byte addresses. The latters are
25 * typically used for moving raw data between a peripheral and memory (cf.
26 * string I/O functions), hence the "__mem_" prefix.
27 */
28#if defined(CONFIG_SWAP_IO_SPACE)
29
30# define ioswabb(a,x) (x)
31# define __mem_ioswabb(a,x) (x)
32# define ioswabw(a,x) le16_to_cpu(x)
33# define __mem_ioswabw(a,x) (x)
34# define ioswabl(a,x) le32_to_cpu(x)
35# define __mem_ioswabl(a,x) (x)
36# define ioswabq(a,x) le64_to_cpu(x)
37# define __mem_ioswabq(a,x) (x)
38
39#else
40
41# define ioswabb(a,x) (x)
42# define __mem_ioswabb(a,x) (x)
43# define ioswabw(a,x) (x)
44# define __mem_ioswabw(a,x) cpu_to_le16(x)
45# define ioswabl(a,x) (x)
46# define __mem_ioswabl(a,x) cpu_to_le32(x)
47# define ioswabq(a,x) (x)
48# define __mem_ioswabq(a,x) cpu_to_le32(x)
49
50#endif
51
16#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ 52#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-ip27/mangle-port.h b/include/asm-mips/mach-ip27/mangle-port.h
index f76c44880451..d615312a451a 100644
--- a/include/asm-mips/mach-ip27/mangle-port.h
+++ b/include/asm-mips/mach-ip27/mangle-port.h
@@ -13,4 +13,13 @@
13#define __swizzle_addr_l(port) (port) 13#define __swizzle_addr_l(port) (port)
14#define __swizzle_addr_q(port) (port) 14#define __swizzle_addr_q(port) (port)
15 15
16# define ioswabb(a,x) (x)
17# define __mem_ioswabb(a,x) (x)
18# define ioswabw(a,x) (x)
19# define __mem_ioswabw(a,x) cpu_to_le16(x)
20# define ioswabl(a,x) (x)
21# define __mem_ioswabl(a,x) cpu_to_le32(x)
22# define ioswabq(a,x) (x)
23# define __mem_ioswabq(a,x) cpu_to_le32(x)
24
16#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */ 25#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-ip32/mangle-port.h b/include/asm-mips/mach-ip32/mangle-port.h
index 6e25b52ed8f2..81320eb55324 100644
--- a/include/asm-mips/mach-ip32/mangle-port.h
+++ b/include/asm-mips/mach-ip32/mangle-port.h
@@ -14,4 +14,13 @@
14#define __swizzle_addr_l(port) (port) 14#define __swizzle_addr_l(port) (port)
15#define __swizzle_addr_q(port) (port) 15#define __swizzle_addr_q(port) (port)
16 16
17# define ioswabb(a,x) (x)
18# define __mem_ioswabb(a,x) (x)
19# define ioswabw(a,x) (x)
20# define __mem_ioswabw(a,x) cpu_to_le16(x)
21# define ioswabl(a,x) (x)
22# define __mem_ioswabl(a,x) cpu_to_le32(x)
23# define ioswabq(a,x) (x)
24# define __mem_ioswabq(a,x) cpu_to_le32(x)
25
17#endif /* __ASM_MACH_IP32_MANGLE_PORT_H */ 26#endif /* __ASM_MACH_IP32_MANGLE_PORT_H */
diff --git a/include/asm-mips/mach-mips/cpu-feature-overrides.h b/include/asm-mips/mach-mips/cpu-feature-overrides.h
index 9f92aed17754..e06af6c86f86 100644
--- a/include/asm-mips/mach-mips/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-mips/cpu-feature-overrides.h
@@ -29,7 +29,11 @@
29/* #define cpu_has_prefetch ? */ 29/* #define cpu_has_prefetch ? */
30#define cpu_has_mcheck 1 30#define cpu_has_mcheck 1
31/* #define cpu_has_ejtag ? */ 31/* #define cpu_has_ejtag ? */
32#ifdef CONFIG_CPU_HAS_LLSC
32#define cpu_has_llsc 1 33#define cpu_has_llsc 1
34#else
35#define cpu_has_llsc 0
36#endif
33/* #define cpu_has_vtag_icache ? */ 37/* #define cpu_has_vtag_icache ? */
34/* #define cpu_has_dc_aliases ? */ 38/* #define cpu_has_dc_aliases ? */
35/* #define cpu_has_ic_fills_f_dc ? */ 39/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/include/asm-mips/mc146818-time.h b/include/asm-mips/mc146818-time.h
index 47214861093b..41ac8d363c67 100644
--- a/include/asm-mips/mc146818-time.h
+++ b/include/asm-mips/mc146818-time.h
@@ -86,43 +86,14 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
86 return retval; 86 return retval;
87} 87}
88 88
89/*
90 * Returns true if a clock update is in progress
91 */
92static inline unsigned char rtc_is_updating(void)
93{
94 unsigned char uip;
95 unsigned long flags;
96
97 spin_lock_irqsave(&rtc_lock, flags);
98 uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
99 spin_unlock_irqrestore(&rtc_lock, flags);
100 return uip;
101}
102
103static inline unsigned long mc146818_get_cmos_time(void) 89static inline unsigned long mc146818_get_cmos_time(void)
104{ 90{
105 unsigned int year, mon, day, hour, min, sec; 91 unsigned int year, mon, day, hour, min, sec;
106 int i;
107 unsigned long flags; 92 unsigned long flags;
108 93
109 /*
110 * The Linux interpretation of the CMOS clock register contents:
111 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
112 * RTC registers show the second which has precisely just started.
113 * Let's hope other operating systems interpret the RTC the same way.
114 */
115
116 /* read RTC exactly on falling edge of update flag */
117 for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
118 if (rtc_is_updating())
119 break;
120 for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
121 if (!rtc_is_updating())
122 break;
123
124 spin_lock_irqsave(&rtc_lock, flags); 94 spin_lock_irqsave(&rtc_lock, flags);
125 do { /* Isn't this overkill ? UIP above should guarantee consistency */ 95
96 do {
126 sec = CMOS_READ(RTC_SECONDS); 97 sec = CMOS_READ(RTC_SECONDS);
127 min = CMOS_READ(RTC_MINUTES); 98 min = CMOS_READ(RTC_MINUTES);
128 hour = CMOS_READ(RTC_HOURS); 99 hour = CMOS_READ(RTC_HOURS);
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index 19cdf7642e66..61cf22588137 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -33,12 +33,7 @@ extern unsigned long pgd_current[];
33 write_c0_context((unsigned long) smp_processor_id() << 25); \ 33 write_c0_context((unsigned long) smp_processor_id() << 25); \
34 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 34 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
35#endif 35#endif
36#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 36#ifdef CONFIG_64BIT
37#define TLBMISS_HANDLER_SETUP() \
38 write_c0_context((unsigned long) &pgd_current[smp_processor_id()] << 23); \
39 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
40#endif
41#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
42#define TLBMISS_HANDLER_SETUP() \ 37#define TLBMISS_HANDLER_SETUP() \
43 write_c0_context((unsigned long) smp_processor_id() << 26); \ 38 write_c0_context((unsigned long) smp_processor_id() << 26); \
44 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 39 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h
index 011caebac369..7bde4432092b 100644
--- a/include/asm-mips/mmzone.h
+++ b/include/asm-mips/mmzone.h
@@ -22,20 +22,6 @@
22 NODE_DATA(__n)->node_spanned_pages) : 0);\ 22 NODE_DATA(__n)->node_spanned_pages) : 0);\
23}) 23})
24 24
25#define pfn_to_page(pfn) \
26({ \
27 unsigned long __pfn = (pfn); \
28 pg_data_t *__pg = NODE_DATA(pfn_to_nid(__pfn)); \
29 __pg->node_mem_map + (__pfn - __pg->node_start_pfn); \
30})
31
32#define page_to_pfn(p) \
33({ \
34 struct page *__p = (p); \
35 struct zone *__z = page_zone(__p); \
36 ((__p - __z->zone_mem_map) + __z->zone_start_pfn); \
37})
38
39/* XXX: FIXME -- wli */ 25/* XXX: FIXME -- wli */
40#define kern_addr_valid(addr) (0) 26#define kern_addr_valid(addr) (0)
41 27
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index ee25a779bf49..a1eab136ff6c 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -140,8 +140,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
141 141
142#ifndef CONFIG_NEED_MULTIPLE_NODES 142#ifndef CONFIG_NEED_MULTIPLE_NODES
143#define pfn_to_page(pfn) (mem_map + (pfn))
144#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
145#define pfn_valid(pfn) ((pfn) < max_mapnr) 143#define pfn_valid(pfn) ((pfn) < max_mapnr)
146#endif 144#endif
147 145
@@ -160,6 +158,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
160#define WANT_PAGE_VIRTUAL 158#define WANT_PAGE_VIRTUAL
161#endif 159#endif
162 160
161#include <asm-generic/memory_model.h>
163#include <asm-generic/page.h> 162#include <asm-generic/page.h>
164 163
165#endif /* _ASM_PAGE_H */ 164#endif /* _ASM_PAGE_H */
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 0cff64ce0fb8..4d6bc45df594 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -206,7 +206,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
206 /* fixme */ 206 /* fixme */
207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f)) 207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
208#define pgoff_to_pte(off) \ 208#define pgoff_to_pte(off) \
209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)}) 209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
210 210
211#else 211#else
212#define pte_to_pgoff(_pte) \ 212#define pte_to_pgoff(_pte) \
diff --git a/include/asm-mips/poll.h b/include/asm-mips/poll.h
index a000f1f789e3..70881f8c5c50 100644
--- a/include/asm-mips/poll.h
+++ b/include/asm-mips/poll.h
@@ -17,6 +17,7 @@
17/* These seem to be more or less nonstandard ... */ 17/* These seem to be more or less nonstandard ... */
18#define POLLMSG 0x0400 18#define POLLMSG 0x0400
19#define POLLREMOVE 0x1000 19#define POLLREMOVE 0x1000
20#define POLLRDHUP 0x2000
20 21
21struct pollfd { 22struct pollfd {
22 int fd; 23 int fd;
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h
index 9632c27dad15..90c374700977 100644
--- a/include/asm-mips/r4kcache.h
+++ b/include/asm-mips/r4kcache.h
@@ -257,7 +257,8 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
257 \ 257 \
258static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 258static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
259{ \ 259{ \
260 unsigned long start = page; \ 260 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
261 unsigned long start = INDEX_BASE + (page & indexmask); \
261 unsigned long end = start + PAGE_SIZE; \ 262 unsigned long end = start + PAGE_SIZE; \
262 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ 263 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
263 unsigned long ws_end = current_cpu_data.desc.ways << \ 264 unsigned long ws_end = current_cpu_data.desc.ways << \
@@ -302,5 +303,6 @@ __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
302__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) 303__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
303/* blast_inv_dcache_range */ 304/* blast_inv_dcache_range */
304__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) 305__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
306__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
305 307
306#endif /* _ASM_R4KCACHE_H */ 308#endif /* _ASM_R4KCACHE_H */
diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h
index e796d75f027e..7b2366412203 100644
--- a/include/asm-mips/serial.h
+++ b/include/asm-mips/serial.h
@@ -103,88 +103,6 @@
103#define IVR_SERIAL_PORT_DEFNS 103#define IVR_SERIAL_PORT_DEFNS
104#endif 104#endif
105 105
106#ifdef CONFIG_SERIAL_AU1X00
107#include <asm/mach-au1x00/au1000.h>
108#ifdef CONFIG_SOC_AU1000
109#define AU1000_SERIAL_PORT_DEFNS \
110 { .baud_base = 0, .port = UART0_ADDR, \
111 .iomem_base = (unsigned char *)UART0_ADDR, \
112 .irq = AU1000_UART0_INT, .flags = STD_COM_FLAGS, \
113 .iomem_reg_shift = 2 }, \
114 { .baud_base = 0, .port = UART1_ADDR, \
115 .iomem_base = (unsigned char *)UART1_ADDR, \
116 .irq = AU1000_UART1_INT, .flags = STD_COM_FLAGS, \
117 .iomem_reg_shift = 2 }, \
118 { .baud_base = 0, .port = UART2_ADDR, \
119 .iomem_base = (unsigned char *)UART2_ADDR, \
120 .irq = AU1000_UART2_INT, .flags = STD_COM_FLAGS, \
121 .iomem_reg_shift = 2 }, \
122 { .baud_base = 0, .port = UART3_ADDR, \
123 .iomem_base = (unsigned char *)UART3_ADDR, \
124 .irq = AU1000_UART3_INT, .flags = STD_COM_FLAGS, \
125 .iomem_reg_shift = 2 },
126#endif
127
128#ifdef CONFIG_SOC_AU1500
129#define AU1000_SERIAL_PORT_DEFNS \
130 { .baud_base = 0, .port = UART0_ADDR, \
131 .iomem_base = (unsigned char *)UART0_ADDR, \
132 .irq = AU1500_UART0_INT, .flags = STD_COM_FLAGS, \
133 .iomem_reg_shift = 2 }, \
134 { .baud_base = 0, .port = UART3_ADDR, \
135 .iomem_base = (unsigned char *)UART3_ADDR, \
136 .irq = AU1500_UART3_INT, .flags = STD_COM_FLAGS, \
137 .iomem_reg_shift = 2 },
138#endif
139
140#ifdef CONFIG_SOC_AU1100
141#define AU1000_SERIAL_PORT_DEFNS \
142 { .baud_base = 0, .port = UART0_ADDR, \
143 .iomem_base = (unsigned char *)UART0_ADDR, \
144 .irq = AU1100_UART0_INT, .flags = STD_COM_FLAGS, \
145 .iomem_reg_shift = 2 }, \
146 { .baud_base = 0, .port = UART1_ADDR, \
147 .iomem_base = (unsigned char *)UART1_ADDR, \
148 .irq = AU1100_UART1_INT, .flags = STD_COM_FLAGS, \
149 .iomem_reg_shift = 2 }, \
150 { .baud_base = 0, .port = UART3_ADDR, \
151 .iomem_base = (unsigned char *)UART3_ADDR, \
152 .irq = AU1100_UART3_INT, .flags = STD_COM_FLAGS, \
153 .iomem_reg_shift = 2 },
154#endif
155
156#ifdef CONFIG_SOC_AU1550
157#define AU1000_SERIAL_PORT_DEFNS \
158 { .baud_base = 0, .port = UART0_ADDR, \
159 .iomem_base = (unsigned char *)UART0_ADDR, \
160 .irq = AU1550_UART0_INT, .flags = STD_COM_FLAGS, \
161 .iomem_reg_shift = 2 }, \
162 { .baud_base = 0, .port = UART1_ADDR, \
163 .iomem_base = (unsigned char *)UART1_ADDR, \
164 .irq = AU1550_UART1_INT, .flags = STD_COM_FLAGS, \
165 .iomem_reg_shift = 2 }, \
166 { .baud_base = 0, .port = UART3_ADDR, \
167 .iomem_base = (unsigned char *)UART3_ADDR, \
168 .irq = AU1550_UART3_INT, .flags = STD_COM_FLAGS,\
169 .iomem_reg_shift = 2 },
170#endif
171
172#ifdef CONFIG_SOC_AU1200
173#define AU1000_SERIAL_PORT_DEFNS \
174 { .baud_base = 0, .port = UART0_ADDR, \
175 .iomem_base = (unsigned char *)UART0_ADDR, \
176 .irq = AU1200_UART0_INT, .flags = STD_COM_FLAGS, \
177 .iomem_reg_shift = 2 }, \
178 { .baud_base = 0, .port = UART1_ADDR, \
179 .iomem_base = (unsigned char *)UART1_ADDR, \
180 .irq = AU1200_UART1_INT, .flags = STD_COM_FLAGS, \
181 .iomem_reg_shift = 2 },
182#endif
183
184#else
185#define AU1000_SERIAL_PORT_DEFNS
186#endif
187
188#ifdef CONFIG_HAVE_STD_PC_SERIAL_PORT 106#ifdef CONFIG_HAVE_STD_PC_SERIAL_PORT
189#define STD_SERIAL_PORT_DEFNS \ 107#define STD_SERIAL_PORT_DEFNS \
190 /* UART CLK PORT IRQ FLAGS */ \ 108 /* UART CLK PORT IRQ FLAGS */ \
@@ -331,7 +249,6 @@
331 MOMENCO_OCELOT_G_SERIAL_PORT_DEFNS \ 249 MOMENCO_OCELOT_G_SERIAL_PORT_DEFNS \
332 MOMENCO_OCELOT_C_SERIAL_PORT_DEFNS \ 250 MOMENCO_OCELOT_C_SERIAL_PORT_DEFNS \
333 MOMENCO_OCELOT_SERIAL_PORT_DEFNS \ 251 MOMENCO_OCELOT_SERIAL_PORT_DEFNS \
334 MOMENCO_OCELOT_3_SERIAL_PORT_DEFNS \ 252 MOMENCO_OCELOT_3_SERIAL_PORT_DEFNS
335 AU1000_SERIAL_PORT_DEFNS
336 253
337#endif /* _ASM_SERIAL_H */ 254#endif /* _ASM_SERIAL_H */
diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h
index a474c29cd701..b09e16c93ca0 100644
--- a/include/asm-mips/sibyte/sb1250.h
+++ b/include/asm-mips/sibyte/sb1250.h
@@ -45,8 +45,8 @@ extern unsigned int soc_type;
45extern unsigned int periph_rev; 45extern unsigned int periph_rev;
46extern unsigned int zbbus_mhz; 46extern unsigned int zbbus_mhz;
47 47
48extern void sb1250_hpt_setup(void);
48extern void sb1250_time_init(void); 49extern void sb1250_time_init(void);
49extern unsigned long sb1250_gettimeoffset(void);
50extern void sb1250_mask_irq(int cpu, int irq); 50extern void sb1250_mask_irq(int cpu, int irq);
51extern void sb1250_unmask_irq(int cpu, int irq); 51extern void sb1250_unmask_irq(int cpu, int irq);
52extern void sb1250_smp_finish(void); 52extern void sb1250_smp_finish(void);
diff --git a/include/asm-mips/sibyte/sb1250_scd.h b/include/asm-mips/sibyte/sb1250_scd.h
index a667bc14a7cd..f4178bdcfcb0 100644
--- a/include/asm-mips/sibyte/sb1250_scd.h
+++ b/include/asm-mips/sibyte/sb1250_scd.h
@@ -359,14 +359,15 @@
359 */ 359 */
360 360
361#define V_SCD_TIMER_FREQ 1000000 361#define V_SCD_TIMER_FREQ 1000000
362#define V_SCD_TIMER_WIDTH 23
362 363
363#define S_SCD_TIMER_INIT 0 364#define S_SCD_TIMER_INIT 0
364#define M_SCD_TIMER_INIT _SB_MAKEMASK(20,S_SCD_TIMER_INIT) 365#define M_SCD_TIMER_INIT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_INIT)
365#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT) 366#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_INIT)
366#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT) 367#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x,S_SCD_TIMER_INIT,M_SCD_TIMER_INIT)
367 368
368#define S_SCD_TIMER_CNT 0 369#define S_SCD_TIMER_CNT 0
369#define M_SCD_TIMER_CNT _SB_MAKEMASK(20,S_SCD_TIMER_CNT) 370#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH,S_SCD_TIMER_CNT)
370#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT) 371#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x,S_SCD_TIMER_CNT)
371#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x,S_SCD_TIMER_CNT,M_SCD_TIMER_CNT) 372#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x,S_SCD_TIMER_CNT,M_SCD_TIMER_CNT)
372 373
diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h
index 6fe903e09c62..d8349e4b55ee 100644
--- a/include/asm-mips/signal.h
+++ b/include/asm-mips/signal.h
@@ -147,16 +147,34 @@ struct k_sigaction {
147 147
148/* IRIX compatible stack_t */ 148/* IRIX compatible stack_t */
149typedef struct sigaltstack { 149typedef struct sigaltstack {
150 void *ss_sp; 150 void __user *ss_sp;
151 size_t ss_size; 151 size_t ss_size;
152 int ss_flags; 152 int ss_flags;
153} stack_t; 153} stack_t;
154 154
155#ifdef __KERNEL__ 155#ifdef __KERNEL__
156#include <asm/sigcontext.h> 156#include <asm/sigcontext.h>
157#include <asm/siginfo.h>
157 158
158#define ptrace_signal_deliver(regs, cookie) do { } while (0) 159#define ptrace_signal_deliver(regs, cookie) do { } while (0)
159 160
161struct pt_regs;
162extern void do_signal(struct pt_regs *regs);
163extern void do_signal32(struct pt_regs *regs);
164
165extern int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
166 int signr, sigset_t *set);
167extern int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
168 int signr, sigset_t *set, siginfo_t *info);
169
170extern int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
171 int signr, sigset_t *set);
172extern int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
173 int signr, sigset_t *set, siginfo_t *info);
174
175extern int setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs,
176 int signr, sigset_t *set, siginfo_t *info);
177
160#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
161 179
162#endif /* _ASM_SIGNAL_H */ 180#endif /* _ASM_SIGNAL_H */
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index d028e28d6239..9709ff701d9b 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -99,7 +99,7 @@ typedef s32 klconf_off_t;
99#define ENABLE_BOARD 0x01 99#define ENABLE_BOARD 0x01
100#define FAILED_BOARD 0x02 100#define FAILED_BOARD 0x02
101#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which 101#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
102 are discovered twice. Use one of them */ 102 are discovered twice. Use one of them */
103#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */ 103#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */
104#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */ 104#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
105#define GLOBAL_MASTER_IO6 0x20 105#define GLOBAL_MASTER_IO6 0x20
diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h
index 3a17846df849..59edb20f8ec5 100644
--- a/include/asm-mips/sn/mapped_kernel.h
+++ b/include/asm-mips/sn/mapped_kernel.h
@@ -23,11 +23,7 @@
23#include <linux/config.h> 23#include <linux/config.h>
24#include <asm/addrspace.h> 24#include <asm/addrspace.h>
25 25
26#ifdef CONFIG_BUILD_ELF64
27#define REP_BASE CAC_BASE 26#define REP_BASE CAC_BASE
28#else
29#define REP_BASE CKSEG0
30#endif
31 27
32#ifdef CONFIG_MAPPED_KERNEL 28#ifdef CONFIG_MAPPED_KERNEL
33 29
diff --git a/include/asm-mips/sn/sn0/hubio.h b/include/asm-mips/sn/sn0/hubio.h
index 80cf6a52ed3b..f314da21b970 100644
--- a/include/asm-mips/sn/sn0/hubio.h
+++ b/include/asm-mips/sn/sn0/hubio.h
@@ -229,7 +229,7 @@ typedef union hubii_ilcsr_u {
229 icsr_llp_en: 1, /* LLP enable bit */ 229 icsr_llp_en: 1, /* LLP enable bit */
230 icsr_rsvd2: 1, /* reserver */ 230 icsr_rsvd2: 1, /* reserver */
231 icsr_wrm_reset: 1, /* Warm reset bit */ 231 icsr_wrm_reset: 1, /* Warm reset bit */
232 icsr_rsvd1: 2, /* Data ready offset */ 232 icsr_rsvd1: 2, /* Data ready offset */
233 icsr_null_to: 6; /* Null timeout */ 233 icsr_null_to: 6; /* Null timeout */
234 234
235 } icsr_fields_s; 235 } icsr_fields_s;
@@ -274,9 +274,9 @@ typedef union io_perf_sel {
274 u64 perf_sel_reg; 274 u64 perf_sel_reg;
275 struct { 275 struct {
276 u64 perf_rsvd : 48, 276 u64 perf_rsvd : 48,
277 perf_icct : 8, 277 perf_icct : 8,
278 perf_ippr1 : 4, 278 perf_ippr1 : 4,
279 perf_ippr0 : 4; 279 perf_ippr0 : 4;
280 } perf_sel_bits; 280 } perf_sel_bits;
281} io_perf_sel_t; 281} io_perf_sel_t;
282 282
@@ -287,8 +287,8 @@ typedef union io_perf_cnt {
287 u64 perf_cnt; 287 u64 perf_cnt;
288 struct { 288 struct {
289 u64 perf_rsvd1 : 32, 289 u64 perf_rsvd1 : 32,
290 perf_rsvd2 : 12, 290 perf_rsvd2 : 12,
291 perf_cnt : 20; 291 perf_cnt : 20;
292 } perf_cnt_bits; 292 } perf_cnt_bits;
293} io_perf_cnt_t; 293} io_perf_cnt_t;
294 294
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index a8919dcc93c8..2acf3e844f00 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -63,17 +63,7 @@
63 addu k1, k0 63 addu k1, k0
64 LONG_L k1, %lo(kernelsp)(k1) 64 LONG_L k1, %lo(kernelsp)(k1)
65#endif 65#endif
66#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 66#ifdef CONFIG_64BIT
67 MFC0 k1, CP0_CONTEXT
68 dsra k1, 23
69 lui k0, %hi(pgd_current)
70 addiu k0, %lo(pgd_current)
71 dsubu k1, k0
72 lui k0, %hi(kernelsp)
73 daddu k1, k0
74 LONG_L k1, %lo(kernelsp)(k1)
75#endif
76#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
77 MFC0 k1, CP0_CONTEXT 67 MFC0 k1, CP0_CONTEXT
78 lui k0, %highest(kernelsp) 68 lui k0, %highest(kernelsp)
79 dsrl k1, 23 69 dsrl k1, 23
@@ -91,11 +81,7 @@
91 mfc0 \temp, CP0_CONTEXT 81 mfc0 \temp, CP0_CONTEXT
92 srl \temp, 23 82 srl \temp, 23
93#endif 83#endif
94#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 84#ifdef CONFIG_64BIT
95 lw \temp, TI_CPU(gp)
96 dsll \temp, 3
97#endif
98#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64)
99 MFC0 \temp, CP0_CONTEXT 85 MFC0 \temp, CP0_CONTEXT
100 dsrl \temp, 23 86 dsrl \temp, 23
101#endif 87#endif
@@ -103,7 +89,7 @@
103 .endm 89 .endm
104#else 90#else
105 .macro get_saved_sp /* Uniprocessor variation */ 91 .macro get_saved_sp /* Uniprocessor variation */
106#if defined(CONFIG_64BIT) && defined(CONFIG_BUILD_ELF64) 92#ifdef CONFIG_64BIT
107 lui k1, %highest(kernelsp) 93 lui k1, %highest(kernelsp)
108 daddiu k1, %higher(kernelsp) 94 daddiu k1, %higher(kernelsp)
109 dsll k1, k1, 16 95 dsll k1, k1, 16
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index ddae9bae31af..4097fac5ac3c 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -286,10 +286,10 @@ extern void __xchg_called_with_bad_pointer(void);
286static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 286static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
287{ 287{
288 switch (size) { 288 switch (size) {
289 case 4: 289 case 4:
290 return __xchg_u32(ptr, x); 290 return __xchg_u32(ptr, x);
291 case 8: 291 case 8:
292 return __xchg_u64(ptr, x); 292 return __xchg_u64(ptr, x);
293 } 293 }
294 __xchg_called_with_bad_pointer(); 294 __xchg_called_with_bad_pointer();
295 return x; 295 return x;
diff --git a/include/asm-mips/termbits.h b/include/asm-mips/termbits.h
index c29c65b7818e..fa6d04dac56b 100644
--- a/include/asm-mips/termbits.h
+++ b/include/asm-mips/termbits.h
@@ -77,7 +77,7 @@ struct termios {
77#define IXANY 0004000 /* Any character will restart after stop. */ 77#define IXANY 0004000 /* Any character will restart after stop. */
78#define IXOFF 0010000 /* Enable start/stop input control. */ 78#define IXOFF 0010000 /* Enable start/stop input control. */
79#define IMAXBEL 0020000 /* Ring bell when input queue is full. */ 79#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
80#define IUTF8 0040000 /* Input is UTF8 */ 80#define IUTF8 0040000 /* Input is UTF-8 */
81 81
82/* c_oflag bits */ 82/* c_oflag bits */
83#define OPOST 0000001 /* Perform output processing. */ 83#define OPOST 0000001 /* Perform output processing. */
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index fa193f861e71..f8d97dafd2f4 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
31 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 int preempt_count; /* 0 => preemptable, <0 => BUG */
32 32
33 mm_segment_t addr_limit; /* thread address space: 33 mm_segment_t addr_limit; /* thread address space:
34 0-0xBFFFFFFF for user-thead 34 0-0xBFFFFFFF for user-thead
35 0-0xFFFFFFFF for kernel-thread 35 0-0xFFFFFFFF for kernel-thread
36 */ 36 */
37 struct restart_block restart_block; 37 struct restart_block restart_block;
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h
index 9cc3564cc2c9..d897c8bb554d 100644
--- a/include/asm-mips/time.h
+++ b/include/asm-mips/time.h
@@ -26,14 +26,14 @@ extern spinlock_t rtc_lock;
26 26
27/* 27/*
28 * RTC ops. By default, they point to no-RTC functions. 28 * RTC ops. By default, they point to no-RTC functions.
29 * rtc_get_time - mktime(year, mon, day, hour, min, sec) in seconds. 29 * rtc_mips_get_time - mktime(year, mon, day, hour, min, sec) in seconds.
30 * rtc_set_time - reverse the above translation and set time to RTC. 30 * rtc_mips_set_time - reverse the above translation and set time to RTC.
31 * rtc_set_mmss - similar to rtc_set_time, but only min and sec need 31 * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need
32 * to be set. Used by RTC sync-up. 32 * to be set. Used by RTC sync-up.
33 */ 33 */
34extern unsigned long (*rtc_get_time)(void); 34extern unsigned long (*rtc_mips_get_time)(void);
35extern int (*rtc_set_time)(unsigned long); 35extern int (*rtc_mips_set_time)(unsigned long);
36extern int (*rtc_set_mmss)(unsigned long); 36extern int (*rtc_mips_set_mmss)(unsigned long);
37 37
38/* 38/*
39 * Timer interrupt functions. 39 * Timer interrupt functions.
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h
index 421b3aea14cc..cd2813d8e136 100644
--- a/include/asm-mips/types.h
+++ b/include/asm-mips/types.h
@@ -99,6 +99,11 @@ typedef u64 sector_t;
99#define HAVE_SECTOR_T 99#define HAVE_SECTOR_T
100#endif 100#endif
101 101
102#ifdef CONFIG_LSF
103typedef u64 blkcnt_t;
104#define HAVE_BLKCNT_T
105#endif
106
102#endif /* __ASSEMBLY__ */ 107#endif /* __ASSEMBLY__ */
103 108
104#endif /* __KERNEL__ */ 109#endif /* __KERNEL__ */