aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/Kconfig18
-rw-r--r--arch/mips/kernel/linux32.c578
-rw-r--r--arch/mips/kernel/scall64-n32.S14
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-32/csum_partial.S240
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/csum_partial.S242
-rw-r--r--arch/mips/lib/Makefile4
-rw-r--r--arch/mips/lib/csum_partial.S258
-rw-r--r--arch/mips/sibyte/swarm/setup.c8
-rw-r--r--include/asm-mips/atomic.h39
-rw-r--r--include/asm-mips/barrier.h132
-rw-r--r--include/asm-mips/bitops.h27
-rw-r--r--include/asm-mips/compat.h68
-rw-r--r--include/asm-mips/futex.h22
-rw-r--r--include/asm-mips/sn/klconfig.h2
-rw-r--r--include/asm-mips/spinlock.h53
-rw-r--r--include/asm-mips/system.h156
19 files changed, 605 insertions, 1266 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 27f83e642968..4d64960be035 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS_MTX1
16 bool "4G Systems MTX-1 board" 16 bool "4G Systems MTX-1 board"
17 select DMA_NONCOHERENT 17 select DMA_NONCOHERENT
18 select HW_HAS_PCI 18 select HW_HAS_PCI
19 select RESOURCES_64BIT if PCI
19 select SOC_AU1500 20 select SOC_AU1500
20 select SYS_HAS_CPU_MIPS32_R1 21 select SYS_HAS_CPU_MIPS32_R1
21 select SYS_SUPPORTS_LITTLE_ENDIAN 22 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -32,6 +33,7 @@ config MIPS_PB1000
32 select SOC_AU1000 33 select SOC_AU1000
33 select DMA_NONCOHERENT 34 select DMA_NONCOHERENT
34 select HW_HAS_PCI 35 select HW_HAS_PCI
36 select RESOURCES_64BIT if PCI
35 select SWAP_IO_SPACE 37 select SWAP_IO_SPACE
36 select SYS_HAS_CPU_MIPS32_R1 38 select SYS_HAS_CPU_MIPS32_R1
37 select SYS_SUPPORTS_LITTLE_ENDIAN 39 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -41,6 +43,7 @@ config MIPS_PB1100
41 select SOC_AU1100 43 select SOC_AU1100
42 select DMA_NONCOHERENT 44 select DMA_NONCOHERENT
43 select HW_HAS_PCI 45 select HW_HAS_PCI
46 select RESOURCES_64BIT if PCI
44 select SWAP_IO_SPACE 47 select SWAP_IO_SPACE
45 select SYS_HAS_CPU_MIPS32_R1 48 select SYS_HAS_CPU_MIPS32_R1
46 select SYS_SUPPORTS_LITTLE_ENDIAN 49 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -50,6 +53,7 @@ config MIPS_PB1500
50 select SOC_AU1500 53 select SOC_AU1500
51 select DMA_NONCOHERENT 54 select DMA_NONCOHERENT
52 select HW_HAS_PCI 55 select HW_HAS_PCI
56 select RESOURCES_64BIT if PCI
53 select SYS_HAS_CPU_MIPS32_R1 57 select SYS_HAS_CPU_MIPS32_R1
54 select SYS_SUPPORTS_LITTLE_ENDIAN 58 select SYS_SUPPORTS_LITTLE_ENDIAN
55 59
@@ -59,6 +63,7 @@ config MIPS_PB1550
59 select DMA_NONCOHERENT 63 select DMA_NONCOHERENT
60 select HW_HAS_PCI 64 select HW_HAS_PCI
61 select MIPS_DISABLE_OBSOLETE_IDE 65 select MIPS_DISABLE_OBSOLETE_IDE
66 select RESOURCES_64BIT if PCI
62 select SYS_HAS_CPU_MIPS32_R1 67 select SYS_HAS_CPU_MIPS32_R1
63 select SYS_SUPPORTS_LITTLE_ENDIAN 68 select SYS_SUPPORTS_LITTLE_ENDIAN
64 69
@@ -67,6 +72,7 @@ config MIPS_PB1200
67 select SOC_AU1200 72 select SOC_AU1200
68 select DMA_NONCOHERENT 73 select DMA_NONCOHERENT
69 select MIPS_DISABLE_OBSOLETE_IDE 74 select MIPS_DISABLE_OBSOLETE_IDE
75 select RESOURCES_64BIT if PCI
70 select SYS_HAS_CPU_MIPS32_R1 76 select SYS_HAS_CPU_MIPS32_R1
71 select SYS_SUPPORTS_LITTLE_ENDIAN 77 select SYS_SUPPORTS_LITTLE_ENDIAN
72 78
@@ -75,6 +81,7 @@ config MIPS_DB1000
75 select SOC_AU1000 81 select SOC_AU1000
76 select DMA_NONCOHERENT 82 select DMA_NONCOHERENT
77 select HW_HAS_PCI 83 select HW_HAS_PCI
84 select RESOURCES_64BIT if PCI
78 select SYS_HAS_CPU_MIPS32_R1 85 select SYS_HAS_CPU_MIPS32_R1
79 select SYS_SUPPORTS_LITTLE_ENDIAN 86 select SYS_SUPPORTS_LITTLE_ENDIAN
80 87
@@ -91,6 +98,7 @@ config MIPS_DB1500
91 select DMA_NONCOHERENT 98 select DMA_NONCOHERENT
92 select HW_HAS_PCI 99 select HW_HAS_PCI
93 select MIPS_DISABLE_OBSOLETE_IDE 100 select MIPS_DISABLE_OBSOLETE_IDE
101 select RESOURCES_64BIT if PCI
94 select SYS_HAS_CPU_MIPS32_R1 102 select SYS_HAS_CPU_MIPS32_R1
95 select SYS_SUPPORTS_BIG_ENDIAN 103 select SYS_SUPPORTS_BIG_ENDIAN
96 select SYS_SUPPORTS_LITTLE_ENDIAN 104 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -101,6 +109,7 @@ config MIPS_DB1550
101 select HW_HAS_PCI 109 select HW_HAS_PCI
102 select DMA_NONCOHERENT 110 select DMA_NONCOHERENT
103 select MIPS_DISABLE_OBSOLETE_IDE 111 select MIPS_DISABLE_OBSOLETE_IDE
112 select RESOURCES_64BIT if PCI
104 select SYS_HAS_CPU_MIPS32_R1 113 select SYS_HAS_CPU_MIPS32_R1
105 select SYS_SUPPORTS_LITTLE_ENDIAN 114 select SYS_SUPPORTS_LITTLE_ENDIAN
106 115
@@ -1268,6 +1277,7 @@ config CPU_RM9000
1268 select CPU_SUPPORTS_32BIT_KERNEL 1277 select CPU_SUPPORTS_32BIT_KERNEL
1269 select CPU_SUPPORTS_64BIT_KERNEL 1278 select CPU_SUPPORTS_64BIT_KERNEL
1270 select CPU_SUPPORTS_HIGHMEM 1279 select CPU_SUPPORTS_HIGHMEM
1280 select WEAK_ORDERING
1271 1281
1272config CPU_SB1 1282config CPU_SB1
1273 bool "SB1" 1283 bool "SB1"
@@ -1276,6 +1286,7 @@ config CPU_SB1
1276 select CPU_SUPPORTS_32BIT_KERNEL 1286 select CPU_SUPPORTS_32BIT_KERNEL
1277 select CPU_SUPPORTS_64BIT_KERNEL 1287 select CPU_SUPPORTS_64BIT_KERNEL
1278 select CPU_SUPPORTS_HIGHMEM 1288 select CPU_SUPPORTS_HIGHMEM
1289 select WEAK_ORDERING
1279 1290
1280endchoice 1291endchoice
1281 1292
@@ -1336,6 +1347,8 @@ config SYS_HAS_CPU_RM9000
1336config SYS_HAS_CPU_SB1 1347config SYS_HAS_CPU_SB1
1337 bool 1348 bool
1338 1349
1350config WEAK_ORDERING
1351 bool
1339endmenu 1352endmenu
1340 1353
1341# 1354#
@@ -1940,6 +1953,11 @@ config COMPAT
1940 depends on MIPS32_COMPAT 1953 depends on MIPS32_COMPAT
1941 default y 1954 default y
1942 1955
1956config SYSVIPC_COMPAT
1957 bool
1958 depends on COMPAT && SYSVIPC
1959 default y
1960
1943config MIPS32_O32 1961config MIPS32_O32
1944 bool "Kernel support for o32 binaries" 1962 bool "Kernel support for o32 binaries"
1945 depends on MIPS32_COMPAT 1963 depends on MIPS32_COMPAT
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 7a3ebbeba1f3..b061c9aa6302 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -382,531 +382,6 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
382 return ret; 382 return ret;
383} 383}
384 384
385struct msgbuf32 { s32 mtype; char mtext[1]; };
386
387struct ipc_perm32
388{
389 key_t key;
390 __compat_uid_t uid;
391 __compat_gid_t gid;
392 __compat_uid_t cuid;
393 __compat_gid_t cgid;
394 compat_mode_t mode;
395 unsigned short seq;
396};
397
398struct ipc64_perm32 {
399 key_t key;
400 __compat_uid_t uid;
401 __compat_gid_t gid;
402 __compat_uid_t cuid;
403 __compat_gid_t cgid;
404 compat_mode_t mode;
405 unsigned short seq;
406 unsigned short __pad1;
407 unsigned int __unused1;
408 unsigned int __unused2;
409};
410
411struct semid_ds32 {
412 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
413 compat_time_t sem_otime; /* last semop time */
414 compat_time_t sem_ctime; /* last change time */
415 u32 sem_base; /* ptr to first semaphore in array */
416 u32 sem_pending; /* pending operations to be processed */
417 u32 sem_pending_last; /* last pending operation */
418 u32 undo; /* undo requests on this array */
419 unsigned short sem_nsems; /* no. of semaphores in array */
420};
421
422struct semid64_ds32 {
423 struct ipc64_perm32 sem_perm;
424 compat_time_t sem_otime;
425 compat_time_t sem_ctime;
426 unsigned int sem_nsems;
427 unsigned int __unused1;
428 unsigned int __unused2;
429};
430
431struct msqid_ds32
432{
433 struct ipc_perm32 msg_perm;
434 u32 msg_first;
435 u32 msg_last;
436 compat_time_t msg_stime;
437 compat_time_t msg_rtime;
438 compat_time_t msg_ctime;
439 u32 wwait;
440 u32 rwait;
441 unsigned short msg_cbytes;
442 unsigned short msg_qnum;
443 unsigned short msg_qbytes;
444 compat_ipc_pid_t msg_lspid;
445 compat_ipc_pid_t msg_lrpid;
446};
447
448struct msqid64_ds32 {
449 struct ipc64_perm32 msg_perm;
450 compat_time_t msg_stime;
451 unsigned int __unused1;
452 compat_time_t msg_rtime;
453 unsigned int __unused2;
454 compat_time_t msg_ctime;
455 unsigned int __unused3;
456 unsigned int msg_cbytes;
457 unsigned int msg_qnum;
458 unsigned int msg_qbytes;
459 compat_pid_t msg_lspid;
460 compat_pid_t msg_lrpid;
461 unsigned int __unused4;
462 unsigned int __unused5;
463};
464
465struct shmid_ds32 {
466 struct ipc_perm32 shm_perm;
467 int shm_segsz;
468 compat_time_t shm_atime;
469 compat_time_t shm_dtime;
470 compat_time_t shm_ctime;
471 compat_ipc_pid_t shm_cpid;
472 compat_ipc_pid_t shm_lpid;
473 unsigned short shm_nattch;
474};
475
476struct shmid64_ds32 {
477 struct ipc64_perm32 shm_perm;
478 compat_size_t shm_segsz;
479 compat_time_t shm_atime;
480 compat_time_t shm_dtime;
481 compat_time_t shm_ctime;
482 compat_pid_t shm_cpid;
483 compat_pid_t shm_lpid;
484 unsigned int shm_nattch;
485 unsigned int __unused1;
486 unsigned int __unused2;
487};
488
489struct ipc_kludge32 {
490 u32 msgp;
491 s32 msgtyp;
492};
493
494static int
495do_sys32_semctl(int first, int second, int third, void __user *uptr)
496{
497 union semun fourth;
498 u32 pad;
499 int err, err2;
500 struct semid64_ds s;
501 mm_segment_t old_fs;
502
503 if (!uptr)
504 return -EINVAL;
505 err = -EFAULT;
506 if (get_user (pad, (u32 __user *)uptr))
507 return err;
508 if ((third & ~IPC_64) == SETVAL)
509 fourth.val = (int)pad;
510 else
511 fourth.__pad = (void __user *)A(pad);
512 switch (third & ~IPC_64) {
513 case IPC_INFO:
514 case IPC_RMID:
515 case IPC_SET:
516 case SEM_INFO:
517 case GETVAL:
518 case GETPID:
519 case GETNCNT:
520 case GETZCNT:
521 case GETALL:
522 case SETVAL:
523 case SETALL:
524 err = sys_semctl (first, second, third, fourth);
525 break;
526
527 case IPC_STAT:
528 case SEM_STAT:
529 fourth.__pad = (struct semid64_ds __user *)&s;
530 old_fs = get_fs();
531 set_fs(KERNEL_DS);
532 err = sys_semctl(first, second, third | IPC_64, fourth);
533 set_fs(old_fs);
534
535 if (third & IPC_64) {
536 struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
537
538 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
539 err = -EFAULT;
540 break;
541 }
542 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
543 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
544 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
545 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
546 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
547 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
548 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
549 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
550 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
551 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
552 } else {
553 struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
554
555 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
556 err = -EFAULT;
557 break;
558 }
559 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
560 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
561 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
562 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
563 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
564 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
565 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
566 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
567 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
568 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
569 }
570 if (err2)
571 err = -EFAULT;
572 break;
573
574 default:
575 err = - EINVAL;
576 break;
577 }
578
579 return err;
580}
581
582static int
583do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
584{
585 struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
586 struct msgbuf *p;
587 mm_segment_t old_fs;
588 int err;
589
590 if (second < 0)
591 return -EINVAL;
592 p = kmalloc (second + sizeof (struct msgbuf)
593 + 4, GFP_USER);
594 if (!p)
595 return -ENOMEM;
596 err = get_user (p->mtype, &up->mtype);
597 if (err)
598 goto out;
599 err |= __copy_from_user (p->mtext, &up->mtext, second);
600 if (err)
601 goto out;
602 old_fs = get_fs ();
603 set_fs (KERNEL_DS);
604 err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
605 set_fs (old_fs);
606out:
607 kfree (p);
608
609 return err;
610}
611
612static int
613do_sys32_msgrcv (int first, int second, int msgtyp, int third,
614 int version, void __user *uptr)
615{
616 struct msgbuf32 __user *up;
617 struct msgbuf *p;
618 mm_segment_t old_fs;
619 int err;
620
621 if (!version) {
622 struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
623 struct ipc_kludge32 ipck;
624
625 err = -EINVAL;
626 if (!uptr)
627 goto out;
628 err = -EFAULT;
629 if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
630 goto out;
631 uptr = (void __user *)AA(ipck.msgp);
632 msgtyp = ipck.msgtyp;
633 }
634
635 if (second < 0)
636 return -EINVAL;
637 err = -ENOMEM;
638 p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
639 if (!p)
640 goto out;
641 old_fs = get_fs ();
642 set_fs (KERNEL_DS);
643 err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
644 set_fs (old_fs);
645 if (err < 0)
646 goto free_then_out;
647 up = (struct msgbuf32 __user *)uptr;
648 if (put_user (p->mtype, &up->mtype) ||
649 __copy_to_user (&up->mtext, p->mtext, err))
650 err = -EFAULT;
651free_then_out:
652 kfree (p);
653out:
654 return err;
655}
656
657static int
658do_sys32_msgctl (int first, int second, void __user *uptr)
659{
660 int err = -EINVAL, err2;
661 struct msqid64_ds m;
662 struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
663 struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
664 mm_segment_t old_fs;
665
666 switch (second & ~IPC_64) {
667 case IPC_INFO:
668 case IPC_RMID:
669 case MSG_INFO:
670 err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
671 break;
672
673 case IPC_SET:
674 if (second & IPC_64) {
675 if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
676 err = -EFAULT;
677 break;
678 }
679 err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
680 err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
681 err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
682 err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
683 } else {
684 if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
685 err = -EFAULT;
686 break;
687 }
688 err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
689 err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
690 err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
691 err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
692 }
693 if (err)
694 break;
695 old_fs = get_fs();
696 set_fs(KERNEL_DS);
697 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
698 set_fs(old_fs);
699 break;
700
701 case IPC_STAT:
702 case MSG_STAT:
703 old_fs = get_fs();
704 set_fs(KERNEL_DS);
705 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
706 set_fs(old_fs);
707 if (second & IPC_64) {
708 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
709 err = -EFAULT;
710 break;
711 }
712 err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
713 err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
714 err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
715 err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
716 err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
717 err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
718 err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
719 err2 |= __put_user(m.msg_stime, &up64->msg_stime);
720 err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
721 err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
722 err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
723 err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
724 err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
725 err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
726 err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
727 if (err2)
728 err = -EFAULT;
729 } else {
730 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
731 err = -EFAULT;
732 break;
733 }
734 err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
735 err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
736 err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
737 err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
738 err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
739 err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
740 err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
741 err2 |= __put_user(m.msg_stime, &up32->msg_stime);
742 err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
743 err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
744 err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
745 err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
746 err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
747 err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
748 err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
749 if (err2)
750 err = -EFAULT;
751 }
752 break;
753 }
754
755 return err;
756}
757
758static int
759do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
760{
761 unsigned long raddr;
762 u32 __user *uaddr = (u32 __user *)A((u32)third);
763 int err = -EINVAL;
764
765 if (version == 1)
766 return err;
767 err = do_shmat (first, uptr, second, &raddr);
768 if (err)
769 return err;
770 err = put_user (raddr, uaddr);
771 return err;
772}
773
774struct shm_info32 {
775 int used_ids;
776 u32 shm_tot, shm_rss, shm_swp;
777 u32 swap_attempts, swap_successes;
778};
779
780static int
781do_sys32_shmctl (int first, int second, void __user *uptr)
782{
783 struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
784 struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
785 struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
786 int err = -EFAULT, err2;
787 struct shmid64_ds s64;
788 mm_segment_t old_fs;
789 struct shm_info si;
790 struct shmid_ds s;
791
792 switch (second & ~IPC_64) {
793 case IPC_INFO:
794 second = IPC_INFO; /* So that we don't have to translate it */
795 case IPC_RMID:
796 case SHM_LOCK:
797 case SHM_UNLOCK:
798 err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
799 break;
800 case IPC_SET:
801 if (second & IPC_64) {
802 err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
803 err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
804 err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
805 } else {
806 err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
807 err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
808 err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
809 }
810 if (err)
811 break;
812 old_fs = get_fs();
813 set_fs(KERNEL_DS);
814 err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
815 set_fs(old_fs);
816 break;
817
818 case IPC_STAT:
819 case SHM_STAT:
820 old_fs = get_fs();
821 set_fs(KERNEL_DS);
822 err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
823 set_fs(old_fs);
824 if (err < 0)
825 break;
826 if (second & IPC_64) {
827 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
828 err = -EFAULT;
829 break;
830 }
831 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
832 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
833 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
834 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
835 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
836 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
837 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
838 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
839 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
840 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
841 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
842 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
843 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
844 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
845 } else {
846 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
847 err = -EFAULT;
848 break;
849 }
850 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
851 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
852 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
853 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
854 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
855 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
856 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
857 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
858 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
859 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
860 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
861 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
862 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
863 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
864 }
865 if (err2)
866 err = -EFAULT;
867 break;
868
869 case SHM_INFO:
870 old_fs = get_fs();
871 set_fs(KERNEL_DS);
872 err = sys_shmctl(first, second, (void __user *)&si);
873 set_fs(old_fs);
874 if (err < 0)
875 break;
876 err2 = put_user(si.used_ids, &uip->used_ids);
877 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
878 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
879 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
880 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
881 err2 |= __put_user (si.swap_successes, &uip->swap_successes);
882 if (err2)
883 err = -EFAULT;
884 break;
885
886 default:
887 err = -EINVAL;
888 break;
889 }
890
891 return err;
892}
893
894static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
895 const struct compat_timespec __user *timeout32)
896{
897 struct compat_timespec t32;
898 struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
899
900 if (copy_from_user(&t32, timeout32, sizeof(t32)))
901 return -EFAULT;
902
903 if (put_user(t32.tv_sec, &t64->tv_sec) ||
904 put_user(t32.tv_nsec, &t64->tv_nsec))
905 return -EFAULT;
906
907 return sys_semtimedop(semid, tsems, nsems, t64);
908}
909
910asmlinkage long 385asmlinkage long
911sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 386sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
912{ 387{
@@ -918,48 +393,43 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
918 switch (call) { 393 switch (call) {
919 case SEMOP: 394 case SEMOP:
920 /* struct sembuf is the same on 32 and 64bit :)) */ 395 /* struct sembuf is the same on 32 and 64bit :)) */
921 err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 396 err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
922 NULL);
923 break; 397 break;
924 case SEMTIMEDOP: 398 case SEMTIMEDOP:
925 err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 399 err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
926 (const struct compat_timespec __user *)AA(fifth)); 400 compat_ptr(fifth));
927 break; 401 break;
928 case SEMGET: 402 case SEMGET:
929 err = sys_semget (first, second, third); 403 err = sys_semget(first, second, third);
930 break; 404 break;
931 case SEMCTL: 405 case SEMCTL:
932 err = do_sys32_semctl (first, second, third, 406 err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
933 (void __user *)AA(ptr));
934 break; 407 break;
935
936 case MSGSND: 408 case MSGSND:
937 err = do_sys32_msgsnd (first, second, third, 409 err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
938 (void __user *)AA(ptr));
939 break; 410 break;
940 case MSGRCV: 411 case MSGRCV:
941 err = do_sys32_msgrcv (first, second, fifth, third, 412 err = compat_sys_msgrcv(first, second, fifth, third,
942 version, (void __user *)AA(ptr)); 413 version, compat_ptr(ptr));
943 break; 414 break;
944 case MSGGET: 415 case MSGGET:
945 err = sys_msgget ((key_t) first, second); 416 err = sys_msgget((key_t) first, second);
946 break; 417 break;
947 case MSGCTL: 418 case MSGCTL:
948 err = do_sys32_msgctl (first, second, (void __user *)AA(ptr)); 419 err = compat_sys_msgctl(first, second, compat_ptr(ptr));
949 break; 420 break;
950
951 case SHMAT: 421 case SHMAT:
952 err = do_sys32_shmat (first, second, third, 422 err = compat_sys_shmat(first, second, third, version,
953 version, (void __user *)AA(ptr)); 423 compat_ptr(ptr));
954 break; 424 break;
955 case SHMDT: 425 case SHMDT:
956 err = sys_shmdt ((char __user *)A(ptr)); 426 err = sys_shmdt(compat_ptr(ptr));
957 break; 427 break;
958 case SHMGET: 428 case SHMGET:
959 err = sys_shmget (first, (unsigned)second, third); 429 err = sys_shmget(first, (unsigned)second, third);
960 break; 430 break;
961 case SHMCTL: 431 case SHMCTL:
962 err = do_sys32_shmctl (first, second, (void __user *)AA(ptr)); 432 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
963 break; 433 break;
964 default: 434 default:
965 err = -EINVAL; 435 err = -EINVAL;
@@ -969,18 +439,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
969 return err; 439 return err;
970} 440}
971 441
972asmlinkage long sys32_shmat(int shmid, char __user *shmaddr, 442#ifdef CONFIG_MIPS32_N32
973 int shmflg, int32_t __user *addr) 443asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
974{ 444{
975 unsigned long raddr; 445 /* compat_sys_semctl expects a pointer to union semun */
976 int err; 446 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
977 447 if (put_user(ptr_to_compat(arg.__pad), uptr))
978 err = do_shmat(shmid, shmaddr, shmflg, &raddr); 448 return -EFAULT;
979 if (err) 449 return compat_sys_semctl(semid, semnum, cmd, uptr);
980 return err;
981
982 return put_user(raddr, addr);
983} 450}
451#endif
984 452
985struct sysctl_args32 453struct sysctl_args32
986{ 454{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 5b18f265d75b..34567d81f940 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -149,8 +149,8 @@ EXPORT(sysn32_call_table)
149 PTR sys_mincore 149 PTR sys_mincore
150 PTR sys_madvise 150 PTR sys_madvise
151 PTR sys_shmget 151 PTR sys_shmget
152 PTR sys32_shmat 152 PTR sys_shmat
153 PTR sys_shmctl /* 6030 */ 153 PTR compat_sys_shmctl /* 6030 */
154 PTR sys_dup 154 PTR sys_dup
155 PTR sys_dup2 155 PTR sys_dup2
156 PTR sys_pause 156 PTR sys_pause
@@ -184,12 +184,12 @@ EXPORT(sysn32_call_table)
184 PTR sys32_newuname 184 PTR sys32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sys_semctl 187 PTR sysn32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sys_msgsnd 190 PTR compat_sys_msgsnd
191 PTR sys_msgrcv 191 PTR compat_sys_msgrcv
192 PTR sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
195 PTR sys_fsync 195 PTR sys_fsync
@@ -335,7 +335,7 @@ EXPORT(sysn32_call_table)
335 PTR compat_sys_fcntl64 335 PTR compat_sys_fcntl64
336 PTR sys_set_tid_address 336 PTR sys_set_tid_address
337 PTR sys_restart_syscall 337 PTR sys_restart_syscall
338 PTR sys_semtimedop /* 6215 */ 338 PTR compat_sys_semtimedop /* 6215 */
339 PTR sys_fadvise64_64 339 PTR sys_fadvise64_64
340 PTR compat_sys_statfs64 340 PTR compat_sys_statfs64
341 PTR compat_sys_fstatfs64 341 PTR compat_sys_fstatfs64
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 49db516789e0..f2a8701e414d 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
172 172
173 spin_lock(&smp_call_lock); 173 spin_lock(&smp_call_lock);
174 call_data = &data; 174 call_data = &data;
175 mb(); 175 smp_mb();
176 176
177 /* Send a message to all other CPUs and wait for them to respond */ 177 /* Send a message to all other CPUs and wait for them to respond */
178 for_each_online_cpu(i) 178 for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
204 * Notify initiating CPU that I've grabbed the data and am 204 * Notify initiating CPU that I've grabbed the data and am
205 * about to execute the function. 205 * about to execute the function.
206 */ 206 */
207 mb(); 207 smp_mb();
208 atomic_inc(&call_data->started); 208 atomic_inc(&call_data->started);
209 209
210 /* 210 /*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
215 irq_exit(); 215 irq_exit();
216 216
217 if (wait) { 217 if (wait) {
218 mb(); 218 smp_mb();
219 atomic_inc(&call_data->finished); 219 atomic_inc(&call_data->finished);
220 } 220 }
221} 221}
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index ad285786e74b..dcd4d2ed2ac4 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memset.o watch.o 5lib-y += memset.o watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-32/csum_partial.S b/arch/mips/lib-32/csum_partial.S
deleted file mode 100644
index ea257dbdcc40..000000000000
--- a/arch/mips/lib-32/csum_partial.S
+++ /dev/null
@@ -1,240 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998 Ralf Baechle
7 */
8#include <asm/asm.h>
9#include <asm/regdef.h>
10
11#define ADDC(sum,reg) \
12 addu sum, reg; \
13 sltu v1, sum, reg; \
14 addu sum, v1
15
16#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
17 lw t0, (offset + 0x00)(src); \
18 lw t1, (offset + 0x04)(src); \
19 lw t2, (offset + 0x08)(src); \
20 lw t3, (offset + 0x0c)(src); \
21 ADDC(sum, t0); \
22 ADDC(sum, t1); \
23 ADDC(sum, t2); \
24 ADDC(sum, t3); \
25 lw t0, (offset + 0x10)(src); \
26 lw t1, (offset + 0x14)(src); \
27 lw t2, (offset + 0x18)(src); \
28 lw t3, (offset + 0x1c)(src); \
29 ADDC(sum, t0); \
30 ADDC(sum, t1); \
31 ADDC(sum, t2); \
32 ADDC(sum, t3); \
33
34/*
35 * a0: source address
36 * a1: length of the area to checksum
37 * a2: partial checksum
38 */
39
40#define src a0
41#define dest a1
42#define sum v0
43
44 .text
45 .set noreorder
46
47/* unknown src alignment and < 8 bytes to go */
48small_csumcpy:
49 move a1, t2
50
51 andi t0, a1, 4
52 beqz t0, 1f
53 andi t0, a1, 2
54
55 /* Still a full word to go */
56 ulw t1, (src)
57 addiu src, 4
58 ADDC(sum, t1)
59
601: move t1, zero
61 beqz t0, 1f
62 andi t0, a1, 1
63
64 /* Still a halfword to go */
65 ulhu t1, (src)
66 addiu src, 2
67
681: beqz t0, 1f
69 sll t1, t1, 16
70
71 lbu t2, (src)
72 nop
73
74#ifdef __MIPSEB__
75 sll t2, t2, 8
76#endif
77 or t1, t2
78
791: ADDC(sum, t1)
80
81 /* fold checksum */
82 sll v1, sum, 16
83 addu sum, v1
84 sltu v1, sum, v1
85 srl sum, sum, 16
86 addu sum, v1
87
88 /* odd buffer alignment? */
89 beqz t7, 1f
90 nop
91 sll v1, sum, 8
92 srl sum, sum, 8
93 or sum, v1
94 andi sum, 0xffff
951:
96 .set reorder
97 /* Add the passed partial csum. */
98 ADDC(sum, a2)
99 jr ra
100 .set noreorder
101
102/* ------------------------------------------------------------------------- */
103
104 .align 5
105LEAF(csum_partial)
106 move sum, zero
107 move t7, zero
108
109 sltiu t8, a1, 0x8
110 bnez t8, small_csumcpy /* < 8 bytes to copy */
111 move t2, a1
112
113 beqz a1, out
114 andi t7, src, 0x1 /* odd buffer? */
115
116hword_align:
117 beqz t7, word_align
118 andi t8, src, 0x2
119
120 lbu t0, (src)
121 subu a1, a1, 0x1
122#ifdef __MIPSEL__
123 sll t0, t0, 8
124#endif
125 ADDC(sum, t0)
126 addu src, src, 0x1
127 andi t8, src, 0x2
128
129word_align:
130 beqz t8, dword_align
131 sltiu t8, a1, 56
132
133 lhu t0, (src)
134 subu a1, a1, 0x2
135 ADDC(sum, t0)
136 sltiu t8, a1, 56
137 addu src, src, 0x2
138
139dword_align:
140 bnez t8, do_end_words
141 move t8, a1
142
143 andi t8, src, 0x4
144 beqz t8, qword_align
145 andi t8, src, 0x8
146
147 lw t0, 0x00(src)
148 subu a1, a1, 0x4
149 ADDC(sum, t0)
150 addu src, src, 0x4
151 andi t8, src, 0x8
152
153qword_align:
154 beqz t8, oword_align
155 andi t8, src, 0x10
156
157 lw t0, 0x00(src)
158 lw t1, 0x04(src)
159 subu a1, a1, 0x8
160 ADDC(sum, t0)
161 ADDC(sum, t1)
162 addu src, src, 0x8
163 andi t8, src, 0x10
164
165oword_align:
166 beqz t8, begin_movement
167 srl t8, a1, 0x7
168
169 lw t3, 0x08(src)
170 lw t4, 0x0c(src)
171 lw t0, 0x00(src)
172 lw t1, 0x04(src)
173 ADDC(sum, t3)
174 ADDC(sum, t4)
175 ADDC(sum, t0)
176 ADDC(sum, t1)
177 subu a1, a1, 0x10
178 addu src, src, 0x10
179 srl t8, a1, 0x7
180
181begin_movement:
182 beqz t8, 1f
183 andi t2, a1, 0x40
184
185move_128bytes:
186 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
187 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
188 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
189 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
190 subu t8, t8, 0x01
191 bnez t8, move_128bytes
192 addu src, src, 0x80
193
1941:
195 beqz t2, 1f
196 andi t2, a1, 0x20
197
198move_64bytes:
199 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
200 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
201 addu src, src, 0x40
202
2031:
204 beqz t2, do_end_words
205 andi t8, a1, 0x1c
206
207move_32bytes:
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
209 andi t8, a1, 0x1c
210 addu src, src, 0x20
211
212do_end_words:
213 beqz t8, maybe_end_cruft
214 srl t8, t8, 0x2
215
216end_words:
217 lw t0, (src)
218 subu t8, t8, 0x1
219 ADDC(sum, t0)
220 bnez t8, end_words
221 addu src, src, 0x4
222
223maybe_end_cruft:
224 andi t2, a1, 0x3
225
226small_memcpy:
227 j small_csumcpy; move a1, t2
228 beqz t2, out
229 move a1, t2
230
231end_bytes:
232 lb t0, (src)
233 subu a1, a1, 0x1
234 bnez a2, end_bytes
235 addu src, src, 0x1
236
237out:
238 jr ra
239 move v0, sum
240 END(csum_partial)
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index ad285786e74b..dcd4d2ed2ac4 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memset.o watch.o 5lib-y += memset.o watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/csum_partial.S b/arch/mips/lib-64/csum_partial.S
deleted file mode 100644
index 25aba660cc9c..000000000000
--- a/arch/mips/lib-64/csum_partial.S
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 */
11#include <asm/asm.h>
12#include <asm/regdef.h>
13
14#define ADDC(sum,reg) \
15 addu sum, reg; \
16 sltu v1, sum, reg; \
17 addu sum, v1
18
19#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
20 lw t0, (offset + 0x00)(src); \
21 lw t1, (offset + 0x04)(src); \
22 lw t2, (offset + 0x08)(src); \
23 lw t3, (offset + 0x0c)(src); \
24 ADDC(sum, t0); \
25 ADDC(sum, t1); \
26 ADDC(sum, t2); \
27 ADDC(sum, t3); \
28 lw t0, (offset + 0x10)(src); \
29 lw t1, (offset + 0x14)(src); \
30 lw t2, (offset + 0x18)(src); \
31 lw t3, (offset + 0x1c)(src); \
32 ADDC(sum, t0); \
33 ADDC(sum, t1); \
34 ADDC(sum, t2); \
35 ADDC(sum, t3); \
36
37/*
38 * a0: source address
39 * a1: length of the area to checksum
40 * a2: partial checksum
41 */
42
43#define src a0
44#define sum v0
45
46 .text
47 .set noreorder
48
49/* unknown src alignment and < 8 bytes to go */
50small_csumcpy:
51 move a1, ta2
52
53 andi ta0, a1, 4
54 beqz ta0, 1f
55 andi ta0, a1, 2
56
57 /* Still a full word to go */
58 ulw ta1, (src)
59 daddiu src, 4
60 ADDC(sum, ta1)
61
621: move ta1, zero
63 beqz ta0, 1f
64 andi ta0, a1, 1
65
66 /* Still a halfword to go */
67 ulhu ta1, (src)
68 daddiu src, 2
69
701: beqz ta0, 1f
71 sll ta1, ta1, 16
72
73 lbu ta2, (src)
74 nop
75
76#ifdef __MIPSEB__
77 sll ta2, ta2, 8
78#endif
79 or ta1, ta2
80
811: ADDC(sum, ta1)
82
83 /* fold checksum */
84 sll v1, sum, 16
85 addu sum, v1
86 sltu v1, sum, v1
87 srl sum, sum, 16
88 addu sum, v1
89
90 /* odd buffer alignment? */
91 beqz t3, 1f
92 nop
93 sll v1, sum, 8
94 srl sum, sum, 8
95 or sum, v1
96 andi sum, 0xffff
971:
98 .set reorder
99 /* Add the passed partial csum. */
100 ADDC(sum, a2)
101 jr ra
102 .set noreorder
103
104/* ------------------------------------------------------------------------- */
105
106 .align 5
107LEAF(csum_partial)
108 move sum, zero
109 move t3, zero
110
111 sltiu t8, a1, 0x8
112 bnez t8, small_csumcpy /* < 8 bytes to copy */
113 move ta2, a1
114
115 beqz a1, out
116 andi t3, src, 0x1 /* odd buffer? */
117
118hword_align:
119 beqz t3, word_align
120 andi t8, src, 0x2
121
122 lbu ta0, (src)
123 dsubu a1, a1, 0x1
124#ifdef __MIPSEL__
125 sll ta0, ta0, 8
126#endif
127 ADDC(sum, ta0)
128 daddu src, src, 0x1
129 andi t8, src, 0x2
130
131word_align:
132 beqz t8, dword_align
133 sltiu t8, a1, 56
134
135 lhu ta0, (src)
136 dsubu a1, a1, 0x2
137 ADDC(sum, ta0)
138 sltiu t8, a1, 56
139 daddu src, src, 0x2
140
141dword_align:
142 bnez t8, do_end_words
143 move t8, a1
144
145 andi t8, src, 0x4
146 beqz t8, qword_align
147 andi t8, src, 0x8
148
149 lw ta0, 0x00(src)
150 dsubu a1, a1, 0x4
151 ADDC(sum, ta0)
152 daddu src, src, 0x4
153 andi t8, src, 0x8
154
155qword_align:
156 beqz t8, oword_align
157 andi t8, src, 0x10
158
159 lw ta0, 0x00(src)
160 lw ta1, 0x04(src)
161 dsubu a1, a1, 0x8
162 ADDC(sum, ta0)
163 ADDC(sum, ta1)
164 daddu src, src, 0x8
165 andi t8, src, 0x10
166
167oword_align:
168 beqz t8, begin_movement
169 dsrl t8, a1, 0x7
170
171 lw ta3, 0x08(src)
172 lw t0, 0x0c(src)
173 lw ta0, 0x00(src)
174 lw ta1, 0x04(src)
175 ADDC(sum, ta3)
176 ADDC(sum, t0)
177 ADDC(sum, ta0)
178 ADDC(sum, ta1)
179 dsubu a1, a1, 0x10
180 daddu src, src, 0x10
181 dsrl t8, a1, 0x7
182
183begin_movement:
184 beqz t8, 1f
185 andi ta2, a1, 0x40
186
187move_128bytes:
188 CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
189 CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
190 CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
191 CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
192 dsubu t8, t8, 0x01
193 bnez t8, move_128bytes
194 daddu src, src, 0x80
195
1961:
197 beqz ta2, 1f
198 andi ta2, a1, 0x20
199
200move_64bytes:
201 CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
202 CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
203 daddu src, src, 0x40
204
2051:
206 beqz ta2, do_end_words
207 andi t8, a1, 0x1c
208
209move_32bytes:
210 CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
211 andi t8, a1, 0x1c
212 daddu src, src, 0x20
213
214do_end_words:
215 beqz t8, maybe_end_cruft
216 dsrl t8, t8, 0x2
217
218end_words:
219 lw ta0, (src)
220 dsubu t8, t8, 0x1
221 ADDC(sum, ta0)
222 bnez t8, end_words
223 daddu src, src, 0x4
224
225maybe_end_cruft:
226 andi ta2, a1, 0x3
227
228small_memcpy:
229 j small_csumcpy; move a1, ta2 /* XXX ??? */
230 beqz t2, out
231 move a1, ta2
232
233end_bytes:
234 lb ta0, (src)
235 dsubu a1, a1, 0x1
236 bnez a2, end_bytes
237 daddu src, src, 0x1
238
239out:
240 jr ra
241 move v0, sum
242 END(csum_partial)
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index b225543f5302..888b61ea12fe 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,8 +2,8 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \ 5lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
6 strnlen_user.o uncached.o 6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
9 9
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 000000000000..15611d9df7ac
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,258 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 */
11#include <asm/asm.h>
12#include <asm/regdef.h>
13
14#ifdef CONFIG_64BIT
15#define T0 ta0
16#define T1 ta1
17#define T2 ta2
18#define T3 ta3
19#define T4 t0
20#define T7 t3
21#else
22#define T0 t0
23#define T1 t1
24#define T2 t2
25#define T3 t3
26#define T4 t4
27#define T7 t7
28#endif
29
30#define ADDC(sum,reg) \
31 addu sum, reg; \
32 sltu v1, sum, reg; \
33 addu sum, v1
34
35#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
36 lw _t0, (offset + 0x00)(src); \
37 lw _t1, (offset + 0x04)(src); \
38 lw _t2, (offset + 0x08)(src); \
39 lw _t3, (offset + 0x0c)(src); \
40 ADDC(sum, _t0); \
41 ADDC(sum, _t1); \
42 ADDC(sum, _t2); \
43 ADDC(sum, _t3); \
44 lw _t0, (offset + 0x10)(src); \
45 lw _t1, (offset + 0x14)(src); \
46 lw _t2, (offset + 0x18)(src); \
47 lw _t3, (offset + 0x1c)(src); \
48 ADDC(sum, _t0); \
49 ADDC(sum, _t1); \
50 ADDC(sum, _t2); \
51 ADDC(sum, _t3); \
52
53/*
54 * a0: source address
55 * a1: length of the area to checksum
56 * a2: partial checksum
57 */
58
59#define src a0
60#define sum v0
61
62 .text
63 .set noreorder
64
65/* unknown src alignment and < 8 bytes to go */
66small_csumcpy:
67 move a1, T2
68
69 andi T0, a1, 4
70 beqz T0, 1f
71 andi T0, a1, 2
72
73 /* Still a full word to go */
74 ulw T1, (src)
75 PTR_ADDIU src, 4
76 ADDC(sum, T1)
77
781: move T1, zero
79 beqz T0, 1f
80 andi T0, a1, 1
81
82 /* Still a halfword to go */
83 ulhu T1, (src)
84 PTR_ADDIU src, 2
85
861: beqz T0, 1f
87 sll T1, T1, 16
88
89 lbu T2, (src)
90 nop
91
92#ifdef __MIPSEB__
93 sll T2, T2, 8
94#endif
95 or T1, T2
96
971: ADDC(sum, T1)
98
99 /* fold checksum */
100 sll v1, sum, 16
101 addu sum, v1
102 sltu v1, sum, v1
103 srl sum, sum, 16
104 addu sum, v1
105
106 /* odd buffer alignment? */
107 beqz T7, 1f
108 nop
109 sll v1, sum, 8
110 srl sum, sum, 8
111 or sum, v1
112 andi sum, 0xffff
1131:
114 .set reorder
115 /* Add the passed partial csum. */
116 ADDC(sum, a2)
117 jr ra
118 .set noreorder
119
120/* ------------------------------------------------------------------------- */
121
122 .align 5
123LEAF(csum_partial)
124 move sum, zero
125 move T7, zero
126
127 sltiu t8, a1, 0x8
128 bnez t8, small_csumcpy /* < 8 bytes to copy */
129 move T2, a1
130
131 beqz a1, out
132 andi T7, src, 0x1 /* odd buffer? */
133
134hword_align:
135 beqz T7, word_align
136 andi t8, src, 0x2
137
138 lbu T0, (src)
139 LONG_SUBU a1, a1, 0x1
140#ifdef __MIPSEL__
141 sll T0, T0, 8
142#endif
143 ADDC(sum, T0)
144 PTR_ADDU src, src, 0x1
145 andi t8, src, 0x2
146
147word_align:
148 beqz t8, dword_align
149 sltiu t8, a1, 56
150
151 lhu T0, (src)
152 LONG_SUBU a1, a1, 0x2
153 ADDC(sum, T0)
154 sltiu t8, a1, 56
155 PTR_ADDU src, src, 0x2
156
157dword_align:
158 bnez t8, do_end_words
159 move t8, a1
160
161 andi t8, src, 0x4
162 beqz t8, qword_align
163 andi t8, src, 0x8
164
165 lw T0, 0x00(src)
166 LONG_SUBU a1, a1, 0x4
167 ADDC(sum, T0)
168 PTR_ADDU src, src, 0x4
169 andi t8, src, 0x8
170
171qword_align:
172 beqz t8, oword_align
173 andi t8, src, 0x10
174
175 lw T0, 0x00(src)
176 lw T1, 0x04(src)
177 LONG_SUBU a1, a1, 0x8
178 ADDC(sum, T0)
179 ADDC(sum, T1)
180 PTR_ADDU src, src, 0x8
181 andi t8, src, 0x10
182
183oword_align:
184 beqz t8, begin_movement
185 LONG_SRL t8, a1, 0x7
186
187 lw T3, 0x08(src)
188 lw T4, 0x0c(src)
189 lw T0, 0x00(src)
190 lw T1, 0x04(src)
191 ADDC(sum, T3)
192 ADDC(sum, T4)
193 ADDC(sum, T0)
194 ADDC(sum, T1)
195 LONG_SUBU a1, a1, 0x10
196 PTR_ADDU src, src, 0x10
197 LONG_SRL t8, a1, 0x7
198
199begin_movement:
200 beqz t8, 1f
201 andi T2, a1, 0x40
202
203move_128bytes:
204 CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
205 CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
206 CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
207 CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
208 LONG_SUBU t8, t8, 0x01
209 bnez t8, move_128bytes
210 PTR_ADDU src, src, 0x80
211
2121:
213 beqz T2, 1f
214 andi T2, a1, 0x20
215
216move_64bytes:
217 CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
218 CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
219 PTR_ADDU src, src, 0x40
220
2211:
222 beqz T2, do_end_words
223 andi t8, a1, 0x1c
224
225move_32bytes:
226 CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
227 andi t8, a1, 0x1c
228 PTR_ADDU src, src, 0x20
229
230do_end_words:
231 beqz t8, maybe_end_cruft
232 LONG_SRL t8, t8, 0x2
233
234end_words:
235 lw T0, (src)
236 LONG_SUBU t8, t8, 0x1
237 ADDC(sum, T0)
238 bnez t8, end_words
239 PTR_ADDU src, src, 0x4
240
241maybe_end_cruft:
242 andi T2, a1, 0x3
243
244small_memcpy:
245 j small_csumcpy; move a1, T2 /* XXX ??? */
246 beqz t2, out
247 move a1, T2
248
249end_bytes:
250 lb T0, (src)
251 LONG_SUBU a1, a1, 0x1
252 bnez a2, end_bytes
253 PTR_ADDU src, src, 0x1
254
255out:
256 jr ra
257 move v0, sum
258 END(csum_partial)
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index ac342f5643c9..defa1f1452ad 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -43,7 +43,7 @@
43#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 43#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
44#include <asm/sibyte/sb1250_regs.h> 44#include <asm/sibyte/sb1250_regs.h>
45#else 45#else
46#error invalid SiByte board configuation 46#error invalid SiByte board configuration
47#endif 47#endif
48#include <asm/sibyte/sb1250_genbus.h> 48#include <asm/sibyte/sb1250_genbus.h>
49#include <asm/sibyte/board.h> 49#include <asm/sibyte/board.h>
@@ -53,7 +53,7 @@ extern void bcm1480_setup(void);
53#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 53#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
54extern void sb1250_setup(void); 54extern void sb1250_setup(void);
55#else 55#else
56#error invalid SiByte board configuation 56#error invalid SiByte board configuration
57#endif 57#endif
58 58
59extern int xicor_probe(void); 59extern int xicor_probe(void);
@@ -90,7 +90,7 @@ void __init plat_timer_setup(struct irqaction *irq)
90#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 90#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
91 sb1250_time_init(); 91 sb1250_time_init();
92#else 92#else
93#error invalid SiByte board configuation 93#error invalid SiByte board configuration
94#endif 94#endif
95} 95}
96 96
@@ -111,7 +111,7 @@ void __init plat_mem_setup(void)
111#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) 111#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
112 sb1250_setup(); 112 sb1250_setup();
113#else 113#else
114#error invalid SiByte board configuation 114#error invalid SiByte board configuration
115#endif 115#endif
116 116
117 panic_timeout = 5; /* For debug. */ 117 panic_timeout = 5; /* For debug. */
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 7978d8e11647..c1a2409bb52a 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -15,6 +15,7 @@
15#define _ASM_ATOMIC_H 15#define _ASM_ATOMIC_H
16 16
17#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <asm/barrier.h>
18#include <asm/cpu-features.h> 19#include <asm/cpu-features.h>
19#include <asm/war.h> 20#include <asm/war.h>
20 21
@@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
130{ 131{
131 unsigned long result; 132 unsigned long result;
132 133
134 smp_mb();
135
133 if (cpu_has_llsc && R10000_LLSC_WAR) { 136 if (cpu_has_llsc && R10000_LLSC_WAR) {
134 unsigned long temp; 137 unsigned long temp;
135 138
@@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
140 " sc %0, %2 \n" 143 " sc %0, %2 \n"
141 " beqzl %0, 1b \n" 144 " beqzl %0, 1b \n"
142 " addu %0, %1, %3 \n" 145 " addu %0, %1, %3 \n"
143 " sync \n"
144 " .set mips0 \n" 146 " .set mips0 \n"
145 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 147 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
146 : "Ir" (i), "m" (v->counter) 148 : "Ir" (i), "m" (v->counter)
@@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
155 " sc %0, %2 \n" 157 " sc %0, %2 \n"
156 " beqz %0, 1b \n" 158 " beqz %0, 1b \n"
157 " addu %0, %1, %3 \n" 159 " addu %0, %1, %3 \n"
158 " sync \n"
159 " .set mips0 \n" 160 " .set mips0 \n"
160 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 161 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
161 : "Ir" (i), "m" (v->counter) 162 : "Ir" (i), "m" (v->counter)
@@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
170 local_irq_restore(flags); 171 local_irq_restore(flags);
171 } 172 }
172 173
174 smp_mb();
175
173 return result; 176 return result;
174} 177}
175 178
@@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
177{ 180{
178 unsigned long result; 181 unsigned long result;
179 182
183 smp_mb();
184
180 if (cpu_has_llsc && R10000_LLSC_WAR) { 185 if (cpu_has_llsc && R10000_LLSC_WAR) {
181 unsigned long temp; 186 unsigned long temp;
182 187
@@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
187 " sc %0, %2 \n" 192 " sc %0, %2 \n"
188 " beqzl %0, 1b \n" 193 " beqzl %0, 1b \n"
189 " subu %0, %1, %3 \n" 194 " subu %0, %1, %3 \n"
190 " sync \n"
191 " .set mips0 \n" 195 " .set mips0 \n"
192 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 196 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
193 : "Ir" (i), "m" (v->counter) 197 : "Ir" (i), "m" (v->counter)
@@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
202 " sc %0, %2 \n" 206 " sc %0, %2 \n"
203 " beqz %0, 1b \n" 207 " beqz %0, 1b \n"
204 " subu %0, %1, %3 \n" 208 " subu %0, %1, %3 \n"
205 " sync \n"
206 " .set mips0 \n" 209 " .set mips0 \n"
207 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 210 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
208 : "Ir" (i), "m" (v->counter) 211 : "Ir" (i), "m" (v->counter)
@@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
217 local_irq_restore(flags); 220 local_irq_restore(flags);
218 } 221 }
219 222
223 smp_mb();
224
220 return result; 225 return result;
221} 226}
222 227
@@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
232{ 237{
233 unsigned long result; 238 unsigned long result;
234 239
240 smp_mb();
241
235 if (cpu_has_llsc && R10000_LLSC_WAR) { 242 if (cpu_has_llsc && R10000_LLSC_WAR) {
236 unsigned long temp; 243 unsigned long temp;
237 244
@@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
245 " beqzl %0, 1b \n" 252 " beqzl %0, 1b \n"
246 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
247 " .set reorder \n" 254 " .set reorder \n"
248 " sync \n"
249 "1: \n" 255 "1: \n"
250 " .set mips0 \n" 256 " .set mips0 \n"
251 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 257 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
264 " beqz %0, 1b \n" 270 " beqz %0, 1b \n"
265 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
266 " .set reorder \n" 272 " .set reorder \n"
267 " sync \n"
268 "1: \n" 273 "1: \n"
269 " .set mips0 \n" 274 " .set mips0 \n"
270 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 275 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
281 local_irq_restore(flags); 286 local_irq_restore(flags);
282 } 287 }
283 288
289 smp_mb();
290
284 return result; 291 return result;
285} 292}
286 293
@@ -375,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
375 382
376#ifdef CONFIG_64BIT 383#ifdef CONFIG_64BIT
377 384
378typedef struct { volatile __s64 counter; } atomic64_t; 385typedef struct { volatile long counter; } atomic64_t;
379 386
380#define ATOMIC64_INIT(i) { (i) } 387#define ATOMIC64_INIT(i) { (i) }
381 388
@@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
484{ 491{
485 unsigned long result; 492 unsigned long result;
486 493
494 smp_mb();
495
487 if (cpu_has_llsc && R10000_LLSC_WAR) { 496 if (cpu_has_llsc && R10000_LLSC_WAR) {
488 unsigned long temp; 497 unsigned long temp;
489 498
@@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
494 " scd %0, %2 \n" 503 " scd %0, %2 \n"
495 " beqzl %0, 1b \n" 504 " beqzl %0, 1b \n"
496 " addu %0, %1, %3 \n" 505 " addu %0, %1, %3 \n"
497 " sync \n"
498 " .set mips0 \n" 506 " .set mips0 \n"
499 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 507 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
500 : "Ir" (i), "m" (v->counter) 508 : "Ir" (i), "m" (v->counter)
@@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
509 " scd %0, %2 \n" 517 " scd %0, %2 \n"
510 " beqz %0, 1b \n" 518 " beqz %0, 1b \n"
511 " addu %0, %1, %3 \n" 519 " addu %0, %1, %3 \n"
512 " sync \n"
513 " .set mips0 \n" 520 " .set mips0 \n"
514 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 521 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
515 : "Ir" (i), "m" (v->counter) 522 : "Ir" (i), "m" (v->counter)
@@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
524 local_irq_restore(flags); 531 local_irq_restore(flags);
525 } 532 }
526 533
534 smp_mb();
535
527 return result; 536 return result;
528} 537}
529 538
@@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
531{ 540{
532 unsigned long result; 541 unsigned long result;
533 542
543 smp_mb();
544
534 if (cpu_has_llsc && R10000_LLSC_WAR) { 545 if (cpu_has_llsc && R10000_LLSC_WAR) {
535 unsigned long temp; 546 unsigned long temp;
536 547
@@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
541 " scd %0, %2 \n" 552 " scd %0, %2 \n"
542 " beqzl %0, 1b \n" 553 " beqzl %0, 1b \n"
543 " subu %0, %1, %3 \n" 554 " subu %0, %1, %3 \n"
544 " sync \n"
545 " .set mips0 \n" 555 " .set mips0 \n"
546 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 556 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
547 : "Ir" (i), "m" (v->counter) 557 : "Ir" (i), "m" (v->counter)
@@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
556 " scd %0, %2 \n" 566 " scd %0, %2 \n"
557 " beqz %0, 1b \n" 567 " beqz %0, 1b \n"
558 " subu %0, %1, %3 \n" 568 " subu %0, %1, %3 \n"
559 " sync \n"
560 " .set mips0 \n" 569 " .set mips0 \n"
561 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 570 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
562 : "Ir" (i), "m" (v->counter) 571 : "Ir" (i), "m" (v->counter)
@@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571 local_irq_restore(flags); 580 local_irq_restore(flags);
572 } 581 }
573 582
583 smp_mb();
584
574 return result; 585 return result;
575} 586}
576 587
@@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
586{ 597{
587 unsigned long result; 598 unsigned long result;
588 599
600 smp_mb();
601
589 if (cpu_has_llsc && R10000_LLSC_WAR) { 602 if (cpu_has_llsc && R10000_LLSC_WAR) {
590 unsigned long temp; 603 unsigned long temp;
591 604
@@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
599 " beqzl %0, 1b \n" 612 " beqzl %0, 1b \n"
600 " dsubu %0, %1, %3 \n" 613 " dsubu %0, %1, %3 \n"
601 " .set reorder \n" 614 " .set reorder \n"
602 " sync \n"
603 "1: \n" 615 "1: \n"
604 " .set mips0 \n" 616 " .set mips0 \n"
605 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 617 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
618 " beqz %0, 1b \n" 630 " beqz %0, 1b \n"
619 " dsubu %0, %1, %3 \n" 631 " dsubu %0, %1, %3 \n"
620 " .set reorder \n" 632 " .set reorder \n"
621 " sync \n"
622 "1: \n" 633 "1: \n"
623 " .set mips0 \n" 634 " .set mips0 \n"
624 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 635 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
635 local_irq_restore(flags); 646 local_irq_restore(flags);
636 } 647 }
637 648
649 smp_mb();
650
638 return result; 651 return result;
639} 652}
640 653
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
new file mode 100644
index 000000000000..ed82631b0017
--- /dev/null
+++ b/include/asm-mips/barrier.h
@@ -0,0 +1,132 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11/*
12 * read_barrier_depends - Flush all pending reads that subsequents reads
13 * depend on.
14 *
15 * No data-dependent reads from memory-like regions are ever reordered
16 * over this barrier. All reads preceding this primitive are guaranteed
17 * to access memory (but not necessarily other CPUs' caches) before any
18 * reads following this primitive that depend on the data return by
19 * any of the preceding reads. This primitive is much lighter weight than
20 * rmb() on most CPUs, and is never heavier weight than is
21 * rmb().
22 *
23 * These ordering constraints are respected by both the local CPU
24 * and the compiler.
25 *
26 * Ordering is not guaranteed by anything other than these primitives,
27 * not even by data dependencies. See the documentation for
28 * memory_barrier() for examples and URLs to more information.
29 *
30 * For example, the following code would force ordering (the initial
31 * value of "a" is zero, "b" is one, and "p" is "&a"):
32 *
33 * <programlisting>
34 * CPU 0 CPU 1
35 *
36 * b = 2;
37 * memory_barrier();
38 * p = &b; q = p;
39 * read_barrier_depends();
40 * d = *q;
41 * </programlisting>
42 *
43 * because the read of "*q" depends on the read of "p" and these
44 * two reads are separated by a read_barrier_depends(). However,
45 * the following code, with the same initial values for "a" and "b":
46 *
47 * <programlisting>
48 * CPU 0 CPU 1
49 *
50 * a = 2;
51 * memory_barrier();
52 * b = 3; y = b;
53 * read_barrier_depends();
54 * x = a;
55 * </programlisting>
56 *
57 * does not enforce ordering, since there is no data dependency between
58 * the read of "a" and the read of "b". Therefore, on some CPUs, such
59 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
60 * in cases like this where there are no data dependencies.
61 */
62
63#define read_barrier_depends() do { } while(0)
64#define smp_read_barrier_depends() do { } while(0)
65
66#ifdef CONFIG_CPU_HAS_SYNC
67#define __sync() \
68 __asm__ __volatile__( \
69 ".set push\n\t" \
70 ".set noreorder\n\t" \
71 ".set mips2\n\t" \
72 "sync\n\t" \
73 ".set pop" \
74 : /* no output */ \
75 : /* no input */ \
76 : "memory")
77#else
78#define __sync() do { } while(0)
79#endif
80
81#define __fast_iob() \
82 __asm__ __volatile__( \
83 ".set push\n\t" \
84 ".set noreorder\n\t" \
85 "lw $0,%0\n\t" \
86 "nop\n\t" \
87 ".set pop" \
88 : /* no output */ \
89 : "m" (*(int *)CKSEG1) \
90 : "memory")
91
92#define fast_wmb() __sync()
93#define fast_rmb() __sync()
94#define fast_mb() __sync()
95#define fast_iob() \
96 do { \
97 __sync(); \
98 __fast_iob(); \
99 } while (0)
100
101#ifdef CONFIG_CPU_HAS_WB
102
103#include <asm/wbflush.h>
104
105#define wmb() fast_wmb()
106#define rmb() fast_rmb()
107#define mb() wbflush()
108#define iob() wbflush()
109
110#else /* !CONFIG_CPU_HAS_WB */
111
112#define wmb() fast_wmb()
113#define rmb() fast_rmb()
114#define mb() fast_mb()
115#define iob() fast_iob()
116
117#endif /* !CONFIG_CPU_HAS_WB */
118
119#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
120#define __WEAK_ORDERING_MB " sync \n"
121#else
122#define __WEAK_ORDERING_MB " \n"
123#endif
124
125#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
126#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
127#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
128
129#define set_mb(var, value) \
130 do { var = value; smp_mb(); } while (0)
131
132#endif /* __ASM_BARRIER_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index b9007411b60f..06445de1324b 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) 6 * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */ 8 */
9#ifndef _ASM_BITOPS_H 9#ifndef _ASM_BITOPS_H
@@ -12,6 +12,7 @@
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <asm/barrier.h>
15#include <asm/bug.h> 16#include <asm/bug.h>
16#include <asm/byteorder.h> /* sigh ... */ 17#include <asm/byteorder.h> /* sigh ... */
17#include <asm/cpu-features.h> 18#include <asm/cpu-features.h>
@@ -204,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr,
204 " " __SC "%2, %1 \n" 205 " " __SC "%2, %1 \n"
205 " beqzl %2, 1b \n" 206 " beqzl %2, 1b \n"
206 " and %2, %0, %3 \n" 207 " and %2, %0, %3 \n"
207#ifdef CONFIG_SMP
208 " sync \n"
209#endif
210 " .set mips0 \n" 208 " .set mips0 \n"
211 : "=&r" (temp), "=m" (*m), "=&r" (res) 209 : "=&r" (temp), "=m" (*m), "=&r" (res)
212 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 210 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -226,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr,
226 " " __SC "%2, %1 \n" 224 " " __SC "%2, %1 \n"
227 " beqz %2, 1b \n" 225 " beqz %2, 1b \n"
228 " and %2, %0, %3 \n" 226 " and %2, %0, %3 \n"
229#ifdef CONFIG_SMP
230 " sync \n"
231#endif
232 " .set pop \n" 227 " .set pop \n"
233 : "=&r" (temp), "=m" (*m), "=&r" (res) 228 : "=&r" (temp), "=m" (*m), "=&r" (res)
234 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 229 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -250,6 +245,8 @@ static inline int test_and_set_bit(unsigned long nr,
250 245
251 return retval; 246 return retval;
252 } 247 }
248
249 smp_mb();
253} 250}
254 251
255/* 252/*
@@ -275,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr,
275 " " __SC "%2, %1 \n" 272 " " __SC "%2, %1 \n"
276 " beqzl %2, 1b \n" 273 " beqzl %2, 1b \n"
277 " and %2, %0, %3 \n" 274 " and %2, %0, %3 \n"
278#ifdef CONFIG_SMP
279 " sync \n"
280#endif
281 " .set mips0 \n" 275 " .set mips0 \n"
282 : "=&r" (temp), "=m" (*m), "=&r" (res) 276 : "=&r" (temp), "=m" (*m), "=&r" (res)
283 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 277 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -298,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr,
298 " " __SC "%2, %1 \n" 292 " " __SC "%2, %1 \n"
299 " beqz %2, 1b \n" 293 " beqz %2, 1b \n"
300 " and %2, %0, %3 \n" 294 " and %2, %0, %3 \n"
301#ifdef CONFIG_SMP
302 " sync \n"
303#endif
304 " .set pop \n" 295 " .set pop \n"
305 : "=&r" (temp), "=m" (*m), "=&r" (res) 296 : "=&r" (temp), "=m" (*m), "=&r" (res)
306 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 297 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -322,6 +313,8 @@ static inline int test_and_clear_bit(unsigned long nr,
322 313
323 return retval; 314 return retval;
324 } 315 }
316
317 smp_mb();
325} 318}
326 319
327/* 320/*
@@ -346,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr,
346 " " __SC "%2, %1 \n" 339 " " __SC "%2, %1 \n"
347 " beqzl %2, 1b \n" 340 " beqzl %2, 1b \n"
348 " and %2, %0, %3 \n" 341 " and %2, %0, %3 \n"
349#ifdef CONFIG_SMP
350 " sync \n"
351#endif
352 " .set mips0 \n" 342 " .set mips0 \n"
353 : "=&r" (temp), "=m" (*m), "=&r" (res) 343 : "=&r" (temp), "=m" (*m), "=&r" (res)
354 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 344 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -368,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr,
368 " " __SC "\t%2, %1 \n" 358 " " __SC "\t%2, %1 \n"
369 " beqz %2, 1b \n" 359 " beqz %2, 1b \n"
370 " and %2, %0, %3 \n" 360 " and %2, %0, %3 \n"
371#ifdef CONFIG_SMP
372 " sync \n"
373#endif
374 " .set pop \n" 361 " .set pop \n"
375 : "=&r" (temp), "=m" (*m), "=&r" (res) 362 : "=&r" (temp), "=m" (*m), "=&r" (res)
376 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) 363 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -391,6 +378,8 @@ static inline int test_and_change_bit(unsigned long nr,
391 378
392 return retval; 379 return retval;
393 } 380 }
381
382 smp_mb();
394} 383}
395 384
396#include <asm-generic/bitops/non-atomic.h> 385#include <asm-generic/bitops/non-atomic.h>
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 900f472fdd2b..55a0152feb08 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -32,6 +32,7 @@ typedef struct {
32 s32 val[2]; 32 s32 val[2];
33} compat_fsid_t; 33} compat_fsid_t;
34typedef s32 compat_timer_t; 34typedef s32 compat_timer_t;
35typedef s32 compat_key_t;
35 36
36typedef s32 compat_int_t; 37typedef s32 compat_int_t;
37typedef s32 compat_long_t; 38typedef s32 compat_long_t;
@@ -146,4 +147,71 @@ static inline void __user *compat_alloc_user_space(long len)
146 return (void __user *) (regs->regs[29] - len); 147 return (void __user *) (regs->regs[29] - len);
147} 148}
148 149
150struct compat_ipc64_perm {
151 compat_key_t key;
152 __compat_uid32_t uid;
153 __compat_gid32_t gid;
154 __compat_uid32_t cuid;
155 __compat_gid32_t cgid;
156 compat_mode_t mode;
157 unsigned short seq;
158 unsigned short __pad2;
159 compat_ulong_t __unused1;
160 compat_ulong_t __unused2;
161};
162
163struct compat_semid64_ds {
164 struct compat_ipc64_perm sem_perm;
165 compat_time_t sem_otime;
166 compat_time_t sem_ctime;
167 compat_ulong_t sem_nsems;
168 compat_ulong_t __unused1;
169 compat_ulong_t __unused2;
170};
171
172struct compat_msqid64_ds {
173 struct compat_ipc64_perm msg_perm;
174#ifndef CONFIG_CPU_LITTLE_ENDIAN
175 compat_ulong_t __unused1;
176#endif
177 compat_time_t msg_stime;
178#ifdef CONFIG_CPU_LITTLE_ENDIAN
179 compat_ulong_t __unused1;
180#endif
181#ifndef CONFIG_CPU_LITTLE_ENDIAN
182 compat_ulong_t __unused2;
183#endif
184 compat_time_t msg_rtime;
185#ifdef CONFIG_CPU_LITTLE_ENDIAN
186 compat_ulong_t __unused2;
187#endif
188#ifndef CONFIG_CPU_LITTLE_ENDIAN
189 compat_ulong_t __unused3;
190#endif
191 compat_time_t msg_ctime;
192#ifdef CONFIG_CPU_LITTLE_ENDIAN
193 compat_ulong_t __unused3;
194#endif
195 compat_ulong_t msg_cbytes;
196 compat_ulong_t msg_qnum;
197 compat_ulong_t msg_qbytes;
198 compat_pid_t msg_lspid;
199 compat_pid_t msg_lrpid;
200 compat_ulong_t __unused4;
201 compat_ulong_t __unused5;
202};
203
204struct compat_shmid64_ds {
205 struct compat_ipc64_perm shm_perm;
206 compat_size_t shm_segsz;
207 compat_time_t shm_atime;
208 compat_time_t shm_dtime;
209 compat_time_t shm_ctime;
210 compat_pid_t shm_cpid;
211 compat_pid_t shm_lpid;
212 compat_ulong_t shm_nattch;
213 compat_ulong_t __unused1;
214 compat_ulong_t __unused2;
215};
216
149#endif /* _ASM_COMPAT_H */ 217#endif /* _ASM_COMPAT_H */
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index ed023eae0674..927a216bd530 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -1,19 +1,21 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
7 */
1#ifndef _ASM_FUTEX_H 8#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H 9#define _ASM_FUTEX_H
3 10
4#ifdef __KERNEL__ 11#ifdef __KERNEL__
5 12
6#include <linux/futex.h> 13#include <linux/futex.h>
14#include <asm/barrier.h>
7#include <asm/errno.h> 15#include <asm/errno.h>
8#include <asm/uaccess.h> 16#include <asm/uaccess.h>
9#include <asm/war.h> 17#include <asm/war.h>
10 18
11#ifdef CONFIG_SMP
12#define __FUTEX_SMP_SYNC " sync \n"
13#else
14#define __FUTEX_SMP_SYNC
15#endif
16
17#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 19#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
18{ \ 20{ \
19 if (cpu_has_llsc && R10000_LLSC_WAR) { \ 21 if (cpu_has_llsc && R10000_LLSC_WAR) { \
@@ -27,7 +29,7 @@
27 " .set mips3 \n" \ 29 " .set mips3 \n" \
28 "2: sc $1, %2 \n" \ 30 "2: sc $1, %2 \n" \
29 " beqzl $1, 1b \n" \ 31 " beqzl $1, 1b \n" \
30 __FUTEX_SMP_SYNC \ 32 __WEAK_ORDERING_MB \
31 "3: \n" \ 33 "3: \n" \
32 " .set pop \n" \ 34 " .set pop \n" \
33 " .set mips0 \n" \ 35 " .set mips0 \n" \
@@ -53,7 +55,7 @@
53 " .set mips3 \n" \ 55 " .set mips3 \n" \
54 "2: sc $1, %2 \n" \ 56 "2: sc $1, %2 \n" \
55 " beqz $1, 1b \n" \ 57 " beqz $1, 1b \n" \
56 __FUTEX_SMP_SYNC \ 58 __WEAK_ORDERING_MB \
57 "3: \n" \ 59 "3: \n" \
58 " .set pop \n" \ 60 " .set pop \n" \
59 " .set mips0 \n" \ 61 " .set mips0 \n" \
@@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
150 " .set mips3 \n" 152 " .set mips3 \n"
151 "2: sc $1, %1 \n" 153 "2: sc $1, %1 \n"
152 " beqzl $1, 1b \n" 154 " beqzl $1, 1b \n"
153 __FUTEX_SMP_SYNC 155 __WEAK_ORDERING_MB
154 "3: \n" 156 "3: \n"
155 " .set pop \n" 157 " .set pop \n"
156 " .section .fixup,\"ax\" \n" 158 " .section .fixup,\"ax\" \n"
@@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
177 " .set mips3 \n" 179 " .set mips3 \n"
178 "2: sc $1, %1 \n" 180 "2: sc $1, %1 \n"
179 " beqz $1, 1b \n" 181 " beqz $1, 1b \n"
180 __FUTEX_SMP_SYNC 182 __WEAK_ORDERING_MB
181 "3: \n" 183 "3: \n"
182 " .set pop \n" 184 " .set pop \n"
183 " .section .fixup,\"ax\" \n" 185 " .section .fixup,\"ax\" \n"
diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h
index b63cd0655b3d..15d70ca56187 100644
--- a/include/asm-mips/sn/klconfig.h
+++ b/include/asm-mips/sn/klconfig.h
@@ -176,7 +176,7 @@ typedef struct kl_config_hdr {
176/* --- New Macros for the changed kl_config_hdr_t structure --- */ 176/* --- New Macros for the changed kl_config_hdr_t structure --- */
177 177
178#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ 178#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
179 (unsigned long)_k + (_k->ch_malloc_hdr_off))) 179 ((unsigned long)_k + (_k->ch_malloc_hdr_off)))
180 180
181#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) 181#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
182 182
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index c8d5587467bb..fc3217fc1118 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -3,12 +3,13 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999, 2000 by Ralf Baechle 6 * Copyright (C) 1999, 2000, 06 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */ 8 */
9#ifndef _ASM_SPINLOCK_H 9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H 10#define _ASM_SPINLOCK_H
11 11
12#include <asm/barrier.h>
12#include <asm/war.h> 13#include <asm/war.h>
13 14
14/* 15/*
@@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
40 " sc %1, %0 \n" 41 " sc %1, %0 \n"
41 " beqzl %1, 1b \n" 42 " beqzl %1, 1b \n"
42 " nop \n" 43 " nop \n"
43 " sync \n"
44 " .set reorder \n" 44 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp) 45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock) 46 : "m" (lock->lock)
@@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
53 " li %1, 1 \n" 53 " li %1, 1 \n"
54 " sc %1, %0 \n" 54 " sc %1, %0 \n"
55 " beqz %1, 1b \n" 55 " beqz %1, 1b \n"
56 " sync \n" 56 " nop \n"
57 " .set reorder \n" 57 " .set reorder \n"
58 : "=m" (lock->lock), "=&r" (tmp) 58 : "=m" (lock->lock), "=&r" (tmp)
59 : "m" (lock->lock) 59 : "m" (lock->lock)
60 : "memory"); 60 : "memory");
61 } 61 }
62
63 smp_mb();
62} 64}
63 65
64static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66static inline void __raw_spin_unlock(raw_spinlock_t *lock)
65{ 67{
68 smp_mb();
69
66 __asm__ __volatile__( 70 __asm__ __volatile__(
67 " .set noreorder # __raw_spin_unlock \n" 71 " .set noreorder # __raw_spin_unlock \n"
68 " sync \n"
69 " sw $0, %0 \n" 72 " sw $0, %0 \n"
70 " .set\treorder \n" 73 " .set\treorder \n"
71 : "=m" (lock->lock) 74 : "=m" (lock->lock)
@@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
86 " beqzl %2, 1b \n" 89 " beqzl %2, 1b \n"
87 " nop \n" 90 " nop \n"
88 " andi %2, %0, 1 \n" 91 " andi %2, %0, 1 \n"
89 " sync \n"
90 " .set reorder" 92 " .set reorder"
91 : "=&r" (temp), "=m" (lock->lock), "=&r" (res) 93 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
92 : "m" (lock->lock) 94 : "m" (lock->lock)
@@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
99 " sc %2, %1 \n" 101 " sc %2, %1 \n"
100 " beqz %2, 1b \n" 102 " beqz %2, 1b \n"
101 " andi %2, %0, 1 \n" 103 " andi %2, %0, 1 \n"
102 " sync \n"
103 " .set reorder" 104 " .set reorder"
104 : "=&r" (temp), "=m" (lock->lock), "=&r" (res) 105 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
105 : "m" (lock->lock) 106 : "m" (lock->lock)
106 : "memory"); 107 : "memory");
107 } 108 }
108 109
110 smp_mb();
111
109 return res == 0; 112 return res == 0;
110} 113}
111 114
@@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
143 " sc %1, %0 \n" 146 " sc %1, %0 \n"
144 " beqzl %1, 1b \n" 147 " beqzl %1, 1b \n"
145 " nop \n" 148 " nop \n"
146 " sync \n"
147 " .set reorder \n" 149 " .set reorder \n"
148 : "=m" (rw->lock), "=&r" (tmp) 150 : "=m" (rw->lock), "=&r" (tmp)
149 : "m" (rw->lock) 151 : "m" (rw->lock)
@@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
156 " addu %1, 1 \n" 158 " addu %1, 1 \n"
157 " sc %1, %0 \n" 159 " sc %1, %0 \n"
158 " beqz %1, 1b \n" 160 " beqz %1, 1b \n"
159 " sync \n" 161 " nop \n"
160 " .set reorder \n" 162 " .set reorder \n"
161 : "=m" (rw->lock), "=&r" (tmp) 163 : "=m" (rw->lock), "=&r" (tmp)
162 : "m" (rw->lock) 164 : "m" (rw->lock)
163 : "memory"); 165 : "memory");
164 } 166 }
167
168 smp_mb();
165} 169}
166 170
167/* Note the use of sub, not subu which will make the kernel die with an 171/* Note the use of sub, not subu which will make the kernel die with an
@@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
171{ 175{
172 unsigned int tmp; 176 unsigned int tmp;
173 177
178 smp_mb();
179
174 if (R10000_LLSC_WAR) { 180 if (R10000_LLSC_WAR) {
175 __asm__ __volatile__( 181 __asm__ __volatile__(
176 "1: ll %1, %2 # __raw_read_unlock \n" 182 "1: ll %1, %2 # __raw_read_unlock \n"
177 " sub %1, 1 \n" 183 " sub %1, 1 \n"
178 " sc %1, %0 \n" 184 " sc %1, %0 \n"
179 " beqzl %1, 1b \n" 185 " beqzl %1, 1b \n"
180 " sync \n"
181 : "=m" (rw->lock), "=&r" (tmp) 186 : "=m" (rw->lock), "=&r" (tmp)
182 : "m" (rw->lock) 187 : "m" (rw->lock)
183 : "memory"); 188 : "memory");
@@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
188 " sub %1, 1 \n" 193 " sub %1, 1 \n"
189 " sc %1, %0 \n" 194 " sc %1, %0 \n"
190 " beqz %1, 1b \n" 195 " beqz %1, 1b \n"
191 " sync \n" 196 " nop \n"
192 " .set reorder \n" 197 " .set reorder \n"
193 : "=m" (rw->lock), "=&r" (tmp) 198 : "=m" (rw->lock), "=&r" (tmp)
194 : "m" (rw->lock) 199 : "m" (rw->lock)
@@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
208 " lui %1, 0x8000 \n" 213 " lui %1, 0x8000 \n"
209 " sc %1, %0 \n" 214 " sc %1, %0 \n"
210 " beqzl %1, 1b \n" 215 " beqzl %1, 1b \n"
211 " sync \n" 216 " nop \n"
212 " .set reorder \n" 217 " .set reorder \n"
213 : "=m" (rw->lock), "=&r" (tmp) 218 : "=m" (rw->lock), "=&r" (tmp)
214 : "m" (rw->lock) 219 : "m" (rw->lock)
@@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
221 " lui %1, 0x8000 \n" 226 " lui %1, 0x8000 \n"
222 " sc %1, %0 \n" 227 " sc %1, %0 \n"
223 " beqz %1, 1b \n" 228 " beqz %1, 1b \n"
224 " sync \n" 229 " nop \n"
225 " .set reorder \n" 230 " .set reorder \n"
226 : "=m" (rw->lock), "=&r" (tmp) 231 : "=m" (rw->lock), "=&r" (tmp)
227 : "m" (rw->lock) 232 : "m" (rw->lock)
228 : "memory"); 233 : "memory");
229 } 234 }
235
236 smp_mb();
230} 237}
231 238
232static inline void __raw_write_unlock(raw_rwlock_t *rw) 239static inline void __raw_write_unlock(raw_rwlock_t *rw)
233{ 240{
241 smp_mb();
242
234 __asm__ __volatile__( 243 __asm__ __volatile__(
235 " sync # __raw_write_unlock \n" 244 " # __raw_write_unlock \n"
236 " sw $0, %0 \n" 245 " sw $0, %0 \n"
237 : "=m" (rw->lock) 246 : "=m" (rw->lock)
238 : "m" (rw->lock) 247 : "m" (rw->lock)
@@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
252 " bnez %1, 2f \n" 261 " bnez %1, 2f \n"
253 " addu %1, 1 \n" 262 " addu %1, 1 \n"
254 " sc %1, %0 \n" 263 " sc %1, %0 \n"
255 " beqzl %1, 1b \n"
256 " .set reorder \n" 264 " .set reorder \n"
257#ifdef CONFIG_SMP 265 " beqzl %1, 1b \n"
258 " sync \n" 266 " nop \n"
259#endif 267 __WEAK_ORDERING_MB
260 " li %2, 1 \n" 268 " li %2, 1 \n"
261 "2: \n" 269 "2: \n"
262 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 270 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
271 " addu %1, 1 \n" 279 " addu %1, 1 \n"
272 " sc %1, %0 \n" 280 " sc %1, %0 \n"
273 " beqz %1, 1b \n" 281 " beqz %1, 1b \n"
282 " nop \n"
274 " .set reorder \n" 283 " .set reorder \n"
275#ifdef CONFIG_SMP 284 __WEAK_ORDERING_MB
276 " sync \n"
277#endif
278 " li %2, 1 \n" 285 " li %2, 1 \n"
279 "2: \n" 286 "2: \n"
280 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 287 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
299 " lui %1, 0x8000 \n" 306 " lui %1, 0x8000 \n"
300 " sc %1, %0 \n" 307 " sc %1, %0 \n"
301 " beqzl %1, 1b \n" 308 " beqzl %1, 1b \n"
302 " sync \n" 309 " nop \n"
310 __WEAK_ORDERING_MB
303 " li %2, 1 \n" 311 " li %2, 1 \n"
304 " .set reorder \n" 312 " .set reorder \n"
305 "2: \n" 313 "2: \n"
@@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
315 " lui %1, 0x8000 \n" 323 " lui %1, 0x8000 \n"
316 " sc %1, %0 \n" 324 " sc %1, %0 \n"
317 " beqz %1, 1b \n" 325 " beqz %1, 1b \n"
318 " sync \n" 326 " nop \n"
327 __WEAK_ORDERING_MB
319 " li %2, 1 \n" 328 " li %2, 1 \n"
320 " .set reorder \n" 329 " .set reorder \n"
321 "2: \n" 330 "2: \n"
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 3056feed5a36..9428057a50cf 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine 7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics 8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
@@ -16,132 +16,12 @@
16#include <linux/irqflags.h> 16#include <linux/irqflags.h>
17 17
18#include <asm/addrspace.h> 18#include <asm/addrspace.h>
19#include <asm/barrier.h>
19#include <asm/cpu-features.h> 20#include <asm/cpu-features.h>
20#include <asm/dsp.h> 21#include <asm/dsp.h>
21#include <asm/ptrace.h> 22#include <asm/ptrace.h>
22#include <asm/war.h> 23#include <asm/war.h>
23 24
24/*
25 * read_barrier_depends - Flush all pending reads that subsequents reads
26 * depend on.
27 *
28 * No data-dependent reads from memory-like regions are ever reordered
29 * over this barrier. All reads preceding this primitive are guaranteed
30 * to access memory (but not necessarily other CPUs' caches) before any
31 * reads following this primitive that depend on the data return by
32 * any of the preceding reads. This primitive is much lighter weight than
33 * rmb() on most CPUs, and is never heavier weight than is
34 * rmb().
35 *
36 * These ordering constraints are respected by both the local CPU
37 * and the compiler.
38 *
39 * Ordering is not guaranteed by anything other than these primitives,
40 * not even by data dependencies. See the documentation for
41 * memory_barrier() for examples and URLs to more information.
42 *
43 * For example, the following code would force ordering (the initial
44 * value of "a" is zero, "b" is one, and "p" is "&a"):
45 *
46 * <programlisting>
47 * CPU 0 CPU 1
48 *
49 * b = 2;
50 * memory_barrier();
51 * p = &b; q = p;
52 * read_barrier_depends();
53 * d = *q;
54 * </programlisting>
55 *
56 * because the read of "*q" depends on the read of "p" and these
57 * two reads are separated by a read_barrier_depends(). However,
58 * the following code, with the same initial values for "a" and "b":
59 *
60 * <programlisting>
61 * CPU 0 CPU 1
62 *
63 * a = 2;
64 * memory_barrier();
65 * b = 3; y = b;
66 * read_barrier_depends();
67 * x = a;
68 * </programlisting>
69 *
70 * does not enforce ordering, since there is no data dependency between
71 * the read of "a" and the read of "b". Therefore, on some CPUs, such
72 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
73 * in cases like this where there are no data dependencies.
74 */
75
76#define read_barrier_depends() do { } while(0)
77
78#ifdef CONFIG_CPU_HAS_SYNC
79#define __sync() \
80 __asm__ __volatile__( \
81 ".set push\n\t" \
82 ".set noreorder\n\t" \
83 ".set mips2\n\t" \
84 "sync\n\t" \
85 ".set pop" \
86 : /* no output */ \
87 : /* no input */ \
88 : "memory")
89#else
90#define __sync() do { } while(0)
91#endif
92
93#define __fast_iob() \
94 __asm__ __volatile__( \
95 ".set push\n\t" \
96 ".set noreorder\n\t" \
97 "lw $0,%0\n\t" \
98 "nop\n\t" \
99 ".set pop" \
100 : /* no output */ \
101 : "m" (*(int *)CKSEG1) \
102 : "memory")
103
104#define fast_wmb() __sync()
105#define fast_rmb() __sync()
106#define fast_mb() __sync()
107#define fast_iob() \
108 do { \
109 __sync(); \
110 __fast_iob(); \
111 } while (0)
112
113#ifdef CONFIG_CPU_HAS_WB
114
115#include <asm/wbflush.h>
116
117#define wmb() fast_wmb()
118#define rmb() fast_rmb()
119#define mb() wbflush()
120#define iob() wbflush()
121
122#else /* !CONFIG_CPU_HAS_WB */
123
124#define wmb() fast_wmb()
125#define rmb() fast_rmb()
126#define mb() fast_mb()
127#define iob() fast_iob()
128
129#endif /* !CONFIG_CPU_HAS_WB */
130
131#ifdef CONFIG_SMP
132#define smp_mb() mb()
133#define smp_rmb() rmb()
134#define smp_wmb() wmb()
135#define smp_read_barrier_depends() read_barrier_depends()
136#else
137#define smp_mb() barrier()
138#define smp_rmb() barrier()
139#define smp_wmb() barrier()
140#define smp_read_barrier_depends() do { } while(0)
141#endif
142
143#define set_mb(var, value) \
144do { var = value; mb(); } while (0)
145 25
146/* 26/*
147 * switch_to(n) should switch tasks to task nr n, first 27 * switch_to(n) should switch tasks to task nr n, first
@@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
217 " .set mips3 \n" 97 " .set mips3 \n"
218 " sc %2, %1 \n" 98 " sc %2, %1 \n"
219 " beqzl %2, 1b \n" 99 " beqzl %2, 1b \n"
220#ifdef CONFIG_SMP
221 " sync \n"
222#endif
223 " .set mips0 \n" 100 " .set mips0 \n"
224 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 101 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
225 : "R" (*m), "Jr" (val) 102 : "R" (*m), "Jr" (val)
@@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
235 " .set mips3 \n" 112 " .set mips3 \n"
236 " sc %2, %1 \n" 113 " sc %2, %1 \n"
237 " beqz %2, 1b \n" 114 " beqz %2, 1b \n"
238#ifdef CONFIG_SMP
239 " sync \n"
240#endif
241 " .set mips0 \n" 115 " .set mips0 \n"
242 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 116 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
243 : "R" (*m), "Jr" (val) 117 : "R" (*m), "Jr" (val)
@@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
251 local_irq_restore(flags); /* implies memory barrier */ 125 local_irq_restore(flags); /* implies memory barrier */
252 } 126 }
253 127
128 smp_mb();
129
254 return retval; 130 return retval;
255} 131}
256 132
@@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
268 " move %2, %z4 \n" 144 " move %2, %z4 \n"
269 " scd %2, %1 \n" 145 " scd %2, %1 \n"
270 " beqzl %2, 1b \n" 146 " beqzl %2, 1b \n"
271#ifdef CONFIG_SMP
272 " sync \n"
273#endif
274 " .set mips0 \n" 147 " .set mips0 \n"
275 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 148 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
276 : "R" (*m), "Jr" (val) 149 : "R" (*m), "Jr" (val)
@@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
284 " move %2, %z4 \n" 157 " move %2, %z4 \n"
285 " scd %2, %1 \n" 158 " scd %2, %1 \n"
286 " beqz %2, 1b \n" 159 " beqz %2, 1b \n"
287#ifdef CONFIG_SMP
288 " sync \n"
289#endif
290 " .set mips0 \n" 160 " .set mips0 \n"
291 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 161 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
292 : "R" (*m), "Jr" (val) 162 : "R" (*m), "Jr" (val)
@@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
300 local_irq_restore(flags); /* implies memory barrier */ 170 local_irq_restore(flags); /* implies memory barrier */
301 } 171 }
302 172
173 smp_mb();
174
303 return retval; 175 return retval;
304} 176}
305#else 177#else
@@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
345 " .set mips3 \n" 217 " .set mips3 \n"
346 " sc $1, %1 \n" 218 " sc $1, %1 \n"
347 " beqzl $1, 1b \n" 219 " beqzl $1, 1b \n"
348#ifdef CONFIG_SMP
349 " sync \n"
350#endif
351 "2: \n" 220 "2: \n"
352 " .set pop \n" 221 " .set pop \n"
353 : "=&r" (retval), "=R" (*m) 222 : "=&r" (retval), "=R" (*m)
@@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
365 " .set mips3 \n" 234 " .set mips3 \n"
366 " sc $1, %1 \n" 235 " sc $1, %1 \n"
367 " beqz $1, 1b \n" 236 " beqz $1, 1b \n"
368#ifdef CONFIG_SMP
369 " sync \n"
370#endif
371 "2: \n" 237 "2: \n"
372 " .set pop \n" 238 " .set pop \n"
373 : "=&r" (retval), "=R" (*m) 239 : "=&r" (retval), "=R" (*m)
@@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
383 local_irq_restore(flags); /* implies memory barrier */ 249 local_irq_restore(flags); /* implies memory barrier */
384 } 250 }
385 251
252 smp_mb();
253
386 return retval; 254 return retval;
387} 255}
388 256
@@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
402 " move $1, %z4 \n" 270 " move $1, %z4 \n"
403 " scd $1, %1 \n" 271 " scd $1, %1 \n"
404 " beqzl $1, 1b \n" 272 " beqzl $1, 1b \n"
405#ifdef CONFIG_SMP
406 " sync \n"
407#endif
408 "2: \n" 273 "2: \n"
409 " .set pop \n" 274 " .set pop \n"
410 : "=&r" (retval), "=R" (*m) 275 : "=&r" (retval), "=R" (*m)
@@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
420 " move $1, %z4 \n" 285 " move $1, %z4 \n"
421 " scd $1, %1 \n" 286 " scd $1, %1 \n"
422 " beqz $1, 1b \n" 287 " beqz $1, 1b \n"
423#ifdef CONFIG_SMP
424 " sync \n"
425#endif
426 "2: \n" 288 "2: \n"
427 " .set pop \n" 289 " .set pop \n"
428 : "=&r" (retval), "=R" (*m) 290 : "=&r" (retval), "=R" (*m)
@@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
438 local_irq_restore(flags); /* implies memory barrier */ 300 local_irq_restore(flags); /* implies memory barrier */
439 } 301 }
440 302
303 smp_mb();
304
441 return retval; 305 return retval;
442} 306}
443#else 307#else