aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/memory-barriers.txt348
-rw-r--r--Documentation/serial/driver9
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c1
-rw-r--r--arch/alpha/kernel/process.c6
-rw-r--r--arch/alpha/kernel/smp.c14
-rw-r--r--arch/alpha/kernel/sys_titan.c2
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c8
-rw-r--r--arch/arm/mach-ixp23xx/core.c18
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig2
-rw-r--r--arch/arm/mach-pxa/mainstone.c1
-rw-r--r--arch/arm/mach-s3c2410/Kconfig2
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c23
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/mips/au1000/common/prom.c24
-rw-r--r--arch/mips/au1000/common/sleeper.S5
-rw-r--r--arch/mips/ddb5xxx/ddb5476/dbg_io.c2
-rw-r--r--arch/mips/ddb5xxx/ddb5477/kgdb_io.c2
-rw-r--r--arch/mips/gt64120/ev64120/serialGT.c2
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c2
-rw-r--r--arch/mips/ite-boards/generic/dbg_io.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c8
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/module.c6
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c18
-rw-r--r--arch/mips/kernel/smp.c5
-rw-r--r--arch/mips/kernel/syscall.c3
-rw-r--r--arch/mips/kernel/traps.c19
-rw-r--r--arch/mips/math-emu/dp_fint.c4
-rw-r--r--arch/mips/math-emu/dp_flong.c4
-rw-r--r--arch/mips/math-emu/sp_fint.c4
-rw-r--r--arch/mips/math-emu/sp_flong.c4
-rw-r--r--arch/mips/mm/c-r4k.c34
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/momentum/jaguar_atx/dbg_io.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c2
-rw-r--r--arch/mips/oprofile/common.c8
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c32
-rw-r--r--arch/mips/oprofile/op_model_rm9000.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c11
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c18
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/sparc/kernel/smp.c11
-rw-r--r--arch/sparc64/kernel/head.S30
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c124
-rw-r--r--arch/sparc64/kernel/setup.c23
-rw-r--r--arch/sparc64/kernel/smp.c51
-rw-r--r--arch/sparc64/kernel/traps.c11
-rw-r--r--arch/sparc64/lib/checksum.S5
-rw-r--r--arch/sparc64/lib/csum_copy.S5
-rw-r--r--arch/um/Makefile-i3864
-rw-r--r--arch/um/include/kern_util.h13
-rw-r--r--arch/um/kernel/time_kern.c10
-rw-r--r--arch/um/os-Linux/main.c2
-rw-r--r--arch/um/os-Linux/time.c10
-rw-r--r--arch/um/sys-i386/syscalls.c9
-rw-r--r--arch/um/sys-x86_64/signal.c24
-rw-r--r--arch/um/sys-x86_64/syscalls.c2
-rw-r--r--arch/x86_64/kernel/io_apic.c30
-rw-r--r--block/as-iosched.c13
-rw-r--r--block/cfq-iosched.c62
-rw-r--r--block/deadline-iosched.c13
-rw-r--r--block/elevator.c55
-rw-r--r--block/noop-iosched.c7
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/n_tty.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/message/fusion/mptbase.c27
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/exec-osm.c72
-rw-r--r--drivers/message/i2o/iop.c4
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/net/e1000/e1000_ethtool.c5
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/tg3.c144
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c31
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c18
-rw-r--r--drivers/pcmcia/ds.c6
-rw-r--r--drivers/rtc/rtc-m48t86.c72
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/scsi/ppa.c7
-rw-r--r--drivers/scsi/sata_sil24.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/namei.c19
-rw-r--r--include/asm-alpha/smp.h4
-rw-r--r--include/asm-arm/arch-ixp23xx/memory.h2
-rw-r--r--include/asm-arm/arch-l7200/serial_l7200.h2
-rw-r--r--include/asm-arm/arch-l7200/uncompress.h2
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-mips/addrspace.h1
-rw-r--r--include/asm-mips/delay.h22
-rw-r--r--include/asm-mips/page.h2
-rw-r--r--include/asm-mips/pgtable-32.h61
-rw-r--r--include/asm-mips/pgtable-64.h13
-rw-r--r--include/asm-mips/pgtable.h15
-rw-r--r--include/asm-mips/smp.h5
-rw-r--r--include/asm-mips/sparsemem.h14
-rw-r--r--include/asm-s390/futex.h15
-rw-r--r--include/asm-s390/lowcore.h4
-rw-r--r--include/asm-sparc64/pgtable.h17
-rw-r--r--include/asm-um/irqflags.h6
-rw-r--r--include/asm-um/uaccess.h6
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/m48t86.h4
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--mm/slab.c27
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/bridge/br_if.c19
-rw-r--r--net/ethernet/Makefile1
-rw-r--r--net/ethernet/sysctl_net_ether.c14
-rw-r--r--net/ipv4/tcp_highspeed.c3
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/irda/irlap.c3
-rw-r--r--net/sysctl_net.c8
-rw-r--r--security/selinux/hooks.c6
140 files changed, 1353 insertions, 703 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c61d8b876fdb..4710845dbac4 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -19,6 +19,7 @@ Contents:
19 - Control dependencies. 19 - Control dependencies.
20 - SMP barrier pairing. 20 - SMP barrier pairing.
21 - Examples of memory barrier sequences. 21 - Examples of memory barrier sequences.
22 - Read memory barriers vs load speculation.
22 23
23 (*) Explicit kernel barriers. 24 (*) Explicit kernel barriers.
24 25
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
248 we may get either of: 249 we may get either of:
249 250
250 STORE *A = X; Y = LOAD *A; 251 STORE *A = X; Y = LOAD *A;
251 STORE *A = Y; 252 STORE *A = Y = X;
252 253
253 254
254========================= 255=========================
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
344 345
345 (4) General memory barriers. 346 (4) General memory barriers.
346 347
347 A general memory barrier is a combination of both a read memory barrier 348 A general memory barrier gives a guarantee that all the LOAD and STORE
348 and a write memory barrier. It is a partial ordering over both loads and 349 operations specified before the barrier will appear to happen before all
349 stores. 350 the LOAD and STORE operations specified after the barrier with respect to
351 the other components of the system.
352
353 A general memory barrier is a partial ordering over both loads and stores.
350 354
351 General memory barriers imply both read and write memory barriers, and so 355 General memory barriers imply both read and write memory barriers, and so
352 can substitute for either. 356 can substitute for either.
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
546 =============== =============== 550 =============== ===============
547 a = 1; 551 a = 1;
548 <write barrier> 552 <write barrier>
549 b = 2; x = a; 553 b = 2; x = b;
550 <read barrier> 554 <read barrier>
551 y = b; 555 y = a;
552 556
553Or: 557Or:
554 558
@@ -563,6 +567,18 @@ Or:
563Basically, the read barrier always has to be there, even though it can be of 567Basically, the read barrier always has to be there, even though it can be of
564the "weaker" type. 568the "weaker" type.
565 569
570[!] Note that the stores before the write barrier would normally be expected to
571match the loads after the read barrier or data dependency barrier, and vice
572versa:
573
574 CPU 1 CPU 2
575 =============== ===============
576 a = 1; }---- --->{ v = c
577 b = 2; } \ / { w = d
578 <write barrier> \ <read barrier>
579 c = 3; } / \ { x = a;
580 d = 4; }---- --->{ y = b;
581
566 582
567EXAMPLES OF MEMORY BARRIER SEQUENCES 583EXAMPLES OF MEMORY BARRIER SEQUENCES
568------------------------------------ 584------------------------------------
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
600 | | +------+ 616 | | +------+
601 +-------+ : : 617 +-------+ : :
602 | 618 |
603 | Sequence in which stores committed to memory system 619 | Sequence in which stores are committed to the
604 | by CPU 1 620 | memory system by CPU 1
605 V 621 V
606 622
607 623
@@ -683,14 +699,12 @@ then the following will occur:
683 | : : | | 699 | : : | |
684 | : : | CPU 2 | 700 | : : | CPU 2 |
685 | +-------+ | | 701 | +-------+ | |
686 \ | X->9 |------>| | 702 | | X->9 |------>| |
687 \ +-------+ | | 703 | +-------+ | |
688 ----->| B->2 | | | 704 Makes sure all effects ---> \ ddddddddddddddddd | |
689 +-------+ | | 705 prior to the store of C \ +-------+ | |
690 Makes sure all effects ---> ddddddddddddddddd | | 706 are perceptible to ----->| B->2 |------>| |
691 prior to the store of C +-------+ | | 707 subsequent loads +-------+ | |
692 are perceptible to | B->2 |------>| |
693 successive loads +-------+ | |
694 : : +-------+ 708 : : +-------+
695 709
696 710
@@ -699,73 +713,239 @@ following sequence of events:
699 713
700 CPU 1 CPU 2 714 CPU 1 CPU 2
701 ======================= ======================= 715 ======================= =======================
716 { A = 0, B = 9 }
702 STORE A=1 717 STORE A=1
703 STORE B=2
704 STORE C=3
705 <write barrier> 718 <write barrier>
706 STORE D=4 719 STORE B=2
707 STORE E=5
708 LOAD A
709 LOAD B 720 LOAD B
710 LOAD C 721 LOAD A
711 LOAD D
712 LOAD E
713 722
714Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in 723Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
715some effectively random order, despite the write barrier issued by CPU 1: 724some effectively random order, despite the write barrier issued by CPU 1:
716 725
717 +-------+ : : 726 +-------+ : : : :
718 | | +------+ 727 | | +------+ +-------+
719 | |------>| C=3 | } 728 | |------>| A=1 |------ --->| A->0 |
720 | | : +------+ } 729 | | +------+ \ +-------+
721 | | : | A=1 | } 730 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
722 | | : +------+ } 731 | | +------+ | +-------+
723 | CPU 1 | : | B=2 | }--- 732 | |------>| B=2 |--- | : :
724 | | +------+ } \ 733 | | +------+ \ | : : +-------+
725 | | wwwwwwwwwwwww} \ 734 +-------+ : : \ | +-------+ | |
726 | | +------+ } \ : : +-------+ 735 ---------->| B->2 |------>| |
727 | | : | E=5 | } \ +-------+ | | 736 | +-------+ | CPU 2 |
728 | | : +------+ } \ { | C->3 |------>| | 737 | | A->0 |------>| |
729 | |------>| D=4 | } \ { +-------+ : | | 738 | +-------+ | |
730 | | +------+ \ { | E->5 | : | | 739 | : : +-------+
731 +-------+ : : \ { +-------+ : | | 740 \ : :
732 Transfer -->{ | A->1 | : | CPU 2 | 741 \ +-------+
733 from CPU 1 { +-------+ : | | 742 ---->| A->1 |
734 to CPU 2 { | D->4 | : | | 743 +-------+
735 { +-------+ : | | 744 : :
736 { | B->2 |------>| |
737 +-------+ | |
738 : : +-------+
739
740
741If, however, a read barrier were to be placed between the load of C and the
742load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
743perceived correctly by CPU 2.
744 745
745 +-------+ : : 746
746 | | +------+ 747If, however, a read barrier were to be placed between the load of E and the
747 | |------>| C=3 | } 748load of A on CPU 2:
748 | | : +------+ } 749
749 | | : | A=1 | }--- 750 CPU 1 CPU 2
750 | | : +------+ } \ 751 ======================= =======================
751 | CPU 1 | : | B=2 | } \ 752 { A = 0, B = 9 }
752 | | +------+ \ 753 STORE A=1
753 | | wwwwwwwwwwwwwwww \ 754 <write barrier>
754 | | +------+ \ : : +-------+ 755 STORE B=2
755 | | : | E=5 | } \ +-------+ | | 756 LOAD B
756 | | : +------+ }--- \ { | C->3 |------>| | 757 <read barrier>
757 | |------>| D=4 | } \ \ { +-------+ : | | 758 LOAD A
758 | | +------+ \ -->{ | B->2 | : | | 759
759 +-------+ : : \ { +-------+ : | | 760then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
760 \ { | A->1 | : | CPU 2 | 7612:
761 \ +-------+ | | 762
762 At this point the read ----> \ rrrrrrrrrrrrrrrrr | | 763 +-------+ : : : :
763 barrier causes all effects \ +-------+ | | 764 | | +------+ +-------+
764 prior to the storage of C \ { | E->5 | : | | 765 | |------>| A=1 |------ --->| A->0 |
765 to be perceptible to CPU 2 -->{ +-------+ : | | 766 | | +------+ \ +-------+
766 { | D->4 |------>| | 767 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
767 +-------+ | | 768 | | +------+ | +-------+
768 : : +-------+ 769 | |------>| B=2 |--- | : :
770 | | +------+ \ | : : +-------+
771 +-------+ : : \ | +-------+ | |
772 ---------->| B->2 |------>| |
773 | +-------+ | CPU 2 |
774 | : : | |
775 | : : | |
776 At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
777 barrier causes all effects \ +-------+ | |
778 prior to the storage of B ---->| A->1 |------>| |
779 to be perceptible to CPU 2 +-------+ | |
780 : : +-------+
781
782
783To illustrate this more completely, consider what could happen if the code
784contained a load of A either side of the read barrier:
785
786 CPU 1 CPU 2
787 ======================= =======================
788 { A = 0, B = 9 }
789 STORE A=1
790 <write barrier>
791 STORE B=2
792 LOAD B
793 LOAD A [first load of A]
794 <read barrier>
795 LOAD A [second load of A]
796
797Even though the two loads of A both occur after the load of B, they may both
798come up with different values:
799
800 +-------+ : : : :
801 | | +------+ +-------+
802 | |------>| A=1 |------ --->| A->0 |
803 | | +------+ \ +-------+
804 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
805 | | +------+ | +-------+
806 | |------>| B=2 |--- | : :
807 | | +------+ \ | : : +-------+
808 +-------+ : : \ | +-------+ | |
809 ---------->| B->2 |------>| |
810 | +-------+ | CPU 2 |
811 | : : | |
812 | : : | |
813 | +-------+ | |
814 | | A->0 |------>| 1st |
815 | +-------+ | |
816 At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
817 barrier causes all effects \ +-------+ | |
818 prior to the storage of B ---->| A->1 |------>| 2nd |
819 to be perceptible to CPU 2 +-------+ | |
820 : : +-------+
821
822
823But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
824before the read barrier completes anyway:
825
826 +-------+ : : : :
827 | | +------+ +-------+
828 | |------>| A=1 |------ --->| A->0 |
829 | | +------+ \ +-------+
830 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
831 | | +------+ | +-------+
832 | |------>| B=2 |--- | : :
833 | | +------+ \ | : : +-------+
834 +-------+ : : \ | +-------+ | |
835 ---------->| B->2 |------>| |
836 | +-------+ | CPU 2 |
837 | : : | |
838 \ : : | |
839 \ +-------+ | |
840 ---->| A->1 |------>| 1st |
841 +-------+ | |
842 rrrrrrrrrrrrrrrrr | |
843 +-------+ | |
844 | A->1 |------>| 2nd |
845 +-------+ | |
846 : : +-------+
847
848
849The guarantee is that the second load will always come up with A == 1 if the
850load of B came up with B == 2. No such guarantee exists for the first load of
851A; that may come up with either A == 0 or A == 1.
852
853
854READ MEMORY BARRIERS VS LOAD SPECULATION
855----------------------------------------
856
857Many CPUs speculate with loads: that is they see that they will need to load an
858item from memory, and they find a time where they're not using the bus for any
859other loads, and so do the load in advance - even though they haven't actually
860got to that point in the instruction execution flow yet. This permits the
861actual load instruction to potentially complete immediately because the CPU
862already has the value to hand.
863
864It may turn out that the CPU didn't actually need the value - perhaps because a
865branch circumvented the load - in which case it can discard the value or just
866cache it for later use.
867
868Consider:
869
870 CPU 1 CPU 2
871 ======================= =======================
872 LOAD B
873 DIVIDE } Divide instructions generally
874 DIVIDE } take a long time to perform
875 LOAD A
876
877Which might appear as this:
878
879 : : +-------+
880 +-------+ | |
881 --->| B->2 |------>| |
882 +-------+ | CPU 2 |
883 : :DIVIDE | |
884 +-------+ | |
885 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
886 division speculates on the +-------+ ~ | |
887 LOAD of A : : ~ | |
888 : :DIVIDE | |
889 : : ~ | |
890 Once the divisions are complete --> : : ~-->| |
891 the CPU can then perform the : : | |
892 LOAD with immediate effect : : +-------+
893
894
895Placing a read barrier or a data dependency barrier just before the second
896load:
897
898 CPU 1 CPU 2
899 ======================= =======================
900 LOAD B
901 DIVIDE
902 DIVIDE
903 <read barrier>
904 LOAD A
905
906will force any value speculatively obtained to be reconsidered to an extent
907dependent on the type of barrier used. If there was no change made to the
908speculated memory location, then the speculated value will just be used:
909
910 : : +-------+
911 +-------+ | |
912 --->| B->2 |------>| |
913 +-------+ | CPU 2 |
914 : :DIVIDE | |
915 +-------+ | |
916 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
917 division speculates on the +-------+ ~ | |
918 LOAD of A : : ~ | |
919 : :DIVIDE | |
920 : : ~ | |
921 : : ~ | |
922 rrrrrrrrrrrrrrrr~ | |
923 : : ~ | |
924 : : ~-->| |
925 : : | |
926 : : +-------+
927
928
929but if there was an update or an invalidation from another CPU pending, then
930the speculation will be cancelled and the value reloaded:
931
932 : : +-------+
933 +-------+ | |
934 --->| B->2 |------>| |
935 +-------+ | CPU 2 |
936 : :DIVIDE | |
937 +-------+ | |
938 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
939 division speculates on the +-------+ ~ | |
940 LOAD of A : : ~ | |
941 : :DIVIDE | |
942 : : ~ | |
943 : : ~ | |
944 rrrrrrrrrrrrrrrrr | |
945 +-------+ | |
946 The speculation is discarded ---> --->| A->1 |------>| |
947 and an updated value is +-------+ | |
948 retrieved : : +-------+
769 949
770 950
771======================== 951========================
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
901=============================== 1081===============================
902 1082
903Some of the other functions in the linux kernel imply memory barriers, amongst 1083Some of the other functions in the linux kernel imply memory barriers, amongst
904which are locking, scheduling and memory allocation functions. 1084which are locking and scheduling functions.
905 1085
906This specification is a _minimum_ guarantee; any particular architecture may 1086This specification is a _minimum_ guarantee; any particular architecture may
907provide more substantial guarantees, but these may not be relied upon outside 1087provide more substantial guarantees, but these may not be relied upon outside
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
966 barriers is that the effects instructions outside of a critical section may 1146 barriers is that the effects instructions outside of a critical section may
967 seep into the inside of the critical section. 1147 seep into the inside of the critical section.
968 1148
1149A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
1150because it is possible for an access preceding the LOCK to happen after the
1151LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
1152two accesses can themselves then cross:
1153
1154 *A = a;
1155 LOCK
1156 UNLOCK
1157 *B = b;
1158
1159may occur as:
1160
1161 LOCK, STORE *B, STORE *A, UNLOCK
1162
969Locks and semaphores may not provide any guarantee of ordering on UP compiled 1163Locks and semaphores may not provide any guarantee of ordering on UP compiled
970systems, and so cannot be counted on in such a situation to actually achieve 1164systems, and so cannot be counted on in such a situation to actually achieve
971anything at all - especially with respect to I/O accesses - unless combined 1165anything at all - especially with respect to I/O accesses - unless combined
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
1016 1210
1017 (*) schedule() and similar imply full memory barriers. 1211 (*) schedule() and similar imply full memory barriers.
1018 1212
1019 (*) Memory allocation and release functions imply full memory barriers.
1020
1021 1213
1022================================= 1214=================================
1023INTER-CPU LOCKING BARRIER EFFECTS 1215INTER-CPU LOCKING BARRIER EFFECTS
diff --git a/Documentation/serial/driver b/Documentation/serial/driver
index df82116a9f26..88ad615dd338 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver
@@ -214,12 +214,13 @@ hardware.
214 The interaction of the iflag bits is as follows (parity error 214 The interaction of the iflag bits is as follows (parity error
215 given as an example): 215 given as an example):
216 Parity error INPCK IGNPAR 216 Parity error INPCK IGNPAR
217 None n/a n/a character received 217 n/a 0 n/a character received, marked as
218 Yes n/a 0 character discarded
219 Yes 0 1 character received, marked as
220 TTY_NORMAL 218 TTY_NORMAL
221 Yes 1 1 character received, marked as 219 None 1 n/a character received, marked as
220 TTY_NORMAL
221 Yes 1 0 character received, marked as
222 TTY_PARITY 222 TTY_PARITY
223 Yes 1 1 character discarded
223 224
224 Other flags may be used (eg, xon/xoff characters) if your 225 Other flags may be used (eg, xon/xoff characters) if your
225 hardware supports hardware "soft" flow control. 226 hardware supports hardware "soft" flow control.
diff --git a/MAINTAINERS b/MAINTAINERS
index 74d71cafb17c..c3c5842402df 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -568,6 +568,18 @@ L: linuxppc-dev@ozlabs.org
568W: http://www.penguinppc.org/ppc64/ 568W: http://www.penguinppc.org/ppc64/
569S: Supported 569S: Supported
570 570
571BROADCOM BNX2 GIGABIT ETHERNET DRIVER
572P: Michael Chan
573M: mchan@broadcom.com
574L: netdev@vger.kernel.org
575S: Supported
576
577BROADCOM TG3 GIGABIT ETHERNET DRIVER
578P: Michael Chan
579M: mchan@broadcom.com
580L: netdev@vger.kernel.org
581S: Supported
582
571BTTV VIDEO4LINUX DRIVER 583BTTV VIDEO4LINUX DRIVER
572P: Mauro Carvalho Chehab 584P: Mauro Carvalho Chehab
573M: mchehab@infradead.org 585M: mchehab@infradead.org
@@ -1877,6 +1889,11 @@ L: linux-kernel@vger.kernel.org
1877W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html 1889W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
1878S: Maintained 1890S: Maintained
1879 1891
1892MULTIMEDIA CARD SUBSYSTEM
1893P: Russell King
1894M: rmk+mmc@arm.linux.org.uk
1895S: Maintained
1896
1880MULTISOUND SOUND DRIVER 1897MULTISOUND SOUND DRIVER
1881P: Andrew Veliath 1898P: Andrew Veliath
1882M: andrewtv@usa.net 1899M: andrewtv@usa.net
diff --git a/Makefile b/Makefile
index 435d209f42d8..a3a7baad8555 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 17 3SUBLEVEL = 17
4EXTRAVERSION =-rc5 4EXTRAVERSION =-rc6
5NAME=Lordi Rules 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index c645c5e14786..2b245ad731ee 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -182,7 +182,6 @@ EXPORT_SYMBOL(smp_num_cpus);
182EXPORT_SYMBOL(smp_call_function); 182EXPORT_SYMBOL(smp_call_function);
183EXPORT_SYMBOL(smp_call_function_on_cpu); 183EXPORT_SYMBOL(smp_call_function_on_cpu);
184EXPORT_SYMBOL(_atomic_dec_and_lock); 184EXPORT_SYMBOL(_atomic_dec_and_lock);
185EXPORT_SYMBOL(cpu_present_mask);
186#endif /* CONFIG_SMP */ 185#endif /* CONFIG_SMP */
187 186
188/* 187/*
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 9924fd07743a..c760a831fd1a 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,7 +94,7 @@ common_shutdown_1(void *generic_ptr)
94 if (cpuid != boot_cpuid) { 94 if (cpuid != boot_cpuid) {
95 flags |= 0x00040000UL; /* "remain halted" */ 95 flags |= 0x00040000UL; /* "remain halted" */
96 *pflags = flags; 96 *pflags = flags;
97 clear_bit(cpuid, &cpu_present_mask); 97 cpu_clear(cpuid, cpu_present_map);
98 halt(); 98 halt();
99 } 99 }
100#endif 100#endif
@@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
120 120
121#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
122 /* Wait for the secondaries to halt. */ 122 /* Wait for the secondaries to halt. */
123 cpu_clear(boot_cpuid, cpu_possible_map); 123 cpu_clear(boot_cpuid, cpu_present_map);
124 while (cpus_weight(cpu_possible_map)) 124 while (cpus_weight(cpu_present_map))
125 barrier(); 125 barrier();
126#endif 126#endif
127 127
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 185255416e85..4dc273e537fd 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -68,7 +68,6 @@ enum ipi_message_type {
68static int smp_secondary_alive __initdata = 0; 68static int smp_secondary_alive __initdata = 0;
69 69
70/* Which cpus ids came online. */ 70/* Which cpus ids came online. */
71cpumask_t cpu_present_mask;
72cpumask_t cpu_online_map; 71cpumask_t cpu_online_map;
73 72
74EXPORT_SYMBOL(cpu_online_map); 73EXPORT_SYMBOL(cpu_online_map);
@@ -439,7 +438,7 @@ setup_smp(void)
439 if ((cpu->flags & 0x1cc) == 0x1cc) { 438 if ((cpu->flags & 0x1cc) == 0x1cc) {
440 smp_num_probed++; 439 smp_num_probed++;
441 /* Assume here that "whami" == index */ 440 /* Assume here that "whami" == index */
442 cpu_set(i, cpu_present_mask); 441 cpu_set(i, cpu_present_map);
443 cpu->pal_revision = boot_cpu_palrev; 442 cpu->pal_revision = boot_cpu_palrev;
444 } 443 }
445 444
@@ -450,11 +449,10 @@ setup_smp(void)
450 } 449 }
451 } else { 450 } else {
452 smp_num_probed = 1; 451 smp_num_probed = 1;
453 cpu_set(boot_cpuid, cpu_present_mask);
454 } 452 }
455 453
456 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", 454 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
457 smp_num_probed, cpu_possible_map.bits[0]); 455 smp_num_probed, cpu_present_map.bits[0]);
458} 456}
459 457
460/* 458/*
@@ -473,7 +471,7 @@ smp_prepare_cpus(unsigned int max_cpus)
473 471
474 /* Nothing to do on a UP box, or when told not to. */ 472 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) { 473 if (smp_num_probed == 1 || max_cpus == 0) {
476 cpu_present_mask = cpumask_of_cpu(boot_cpuid); 474 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477 printk(KERN_INFO "SMP mode deactivated.\n"); 475 printk(KERN_INFO "SMP mode deactivated.\n");
478 return; 476 return;
479 } 477 }
@@ -486,10 +484,6 @@ smp_prepare_cpus(unsigned int max_cpus)
486void __devinit 484void __devinit
487smp_prepare_boot_cpu(void) 485smp_prepare_boot_cpu(void)
488{ 486{
489 /*
490 * Mark the boot cpu (current cpu) as online
491 */
492 cpu_set(smp_processor_id(), cpu_online_map);
493} 487}
494 488
495int __devinit 489int __devinit
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5f84417eeb7b..2551fb49ae09 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -66,7 +66,7 @@ titan_update_irq_hw(unsigned long mask)
66 register int bcpu = boot_cpuid; 66 register int bcpu = boot_cpuid;
67 67
68#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
69 cpumask_t cpm = cpu_present_mask; 69 cpumask_t cpm = cpu_present_map;
70 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 70 volatile unsigned long *dim0, *dim1, *dim2, *dim3;
71 unsigned long mask0, mask1, mask2, mask3, dummy; 71 unsigned long mask0, mask1, mask2, mask3, dummy;
72 72
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 5d3acff8c596..d22f38b957db 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -101,7 +101,7 @@ config DEBUG_S3C2410_UART
101 help 101 help
102 Choice for UART for kernel low-level using S3C2410 UARTS, 102 Choice for UART for kernel low-level using S3C2410 UARTS,
103 should be between zero and two. The port must have been 103 should be between zero and two. The port must have been
104 initalised by the boot-loader before use. 104 initialised by the boot-loader before use.
105 105
106 The uncompressor code port configuration is now handled 106 The uncompressor code port configuration is now handled
107 by CONFIG_S3C2410_LOWLEVEL_UART_PORT. 107 by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 9be01b0c3f48..e24566b88a78 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
111 } 111 }
112} 112}
113 113
114static unsigned char ts72xx_rtc_readb(unsigned long addr) 114static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
115{ 115{
116 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); 116 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
117 return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); 117 return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
118} 118}
119 119
120static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr) 120static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
121{ 121{
122 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); 122 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
123 __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); 123 __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
124} 124}
125 125
126static struct m48t86_ops ts72xx_rtc_ops = { 126static struct m48t86_ops ts72xx_rtc_ops = {
127 .readb = ts72xx_rtc_readb, 127 .readbyte = ts72xx_rtc_readbyte,
128 .writeb = ts72xx_rtc_writeb, 128 .writebyte = ts72xx_rtc_writebyte,
129}; 129};
130 130
131static struct platform_device ts72xx_rtc_device = { 131static struct platform_device ts72xx_rtc_device = {
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index 092ee12ced42..affd1d5d7440 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -178,8 +178,12 @@ static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
178 178
179static void ixp23xx_irq_mask(unsigned int irq) 179static void ixp23xx_irq_mask(unsigned int irq)
180{ 180{
181 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 181 volatile unsigned long *intr_reg;
182 182
183 if (irq >= 56)
184 irq += 8;
185
186 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
183 *intr_reg &= ~(1 << (irq % 32)); 187 *intr_reg &= ~(1 << (irq % 32));
184} 188}
185 189
@@ -199,17 +203,25 @@ static void ixp23xx_irq_ack(unsigned int irq)
199 */ 203 */
200static void ixp23xx_irq_level_unmask(unsigned int irq) 204static void ixp23xx_irq_level_unmask(unsigned int irq)
201{ 205{
202 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 206 volatile unsigned long *intr_reg;
203 207
204 ixp23xx_irq_ack(irq); 208 ixp23xx_irq_ack(irq);
205 209
210 if (irq >= 56)
211 irq += 8;
212
213 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
206 *intr_reg |= (1 << (irq % 32)); 214 *intr_reg |= (1 << (irq % 32));
207} 215}
208 216
209static void ixp23xx_irq_edge_unmask(unsigned int irq) 217static void ixp23xx_irq_edge_unmask(unsigned int irq)
210{ 218{
211 volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32); 219 volatile unsigned long *intr_reg;
220
221 if (irq >= 56)
222 irq += 8;
212 223
224 intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
213 *intr_reg |= (1 << (irq % 32)); 225 *intr_reg |= (1 << (irq % 32));
214} 226}
215 227
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 2a39f9e481ad..3b23f43cb160 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -141,7 +141,7 @@ config IXP4XX_INDIRECT_PCI
141 2) If > 64MB of memory space is required, the IXP4xx can be 141 2) If > 64MB of memory space is required, the IXP4xx can be
142 configured to use indirect registers to access PCI This allows 142 configured to use indirect registers to access PCI This allows
143 for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. 143 for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
144 The disadvantadge of this is that every PCI access requires 144 The disadvantage of this is that every PCI access requires
145 three local register accesses plus a spinlock, but in some 145 three local register accesses plus a spinlock, but in some
146 cases the performance hit is acceptable. In addition, you cannot 146 cases the performance hit is acceptable. In addition, you cannot
147 mmap() PCI devices in this case due to the indirect nature 147 mmap() PCI devices in this case due to the indirect nature
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 02e188d98e7d..b307f11951df 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -493,6 +493,7 @@ static void __init mainstone_map_io(void)
493MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") 493MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
494 /* Maintainer: MontaVista Software Inc. */ 494 /* Maintainer: MontaVista Software Inc. */
495 .phys_io = 0x40000000, 495 .phys_io = 0x40000000,
496 .boot_params = 0xa0000100, /* BLOB boot parameter setting */
496 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, 497 .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
497 .map_io = mainstone_map_io, 498 .map_io = mainstone_map_io,
498 .init_irq = mainstone_init_irq, 499 .init_irq = mainstone_init_irq,
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index ce7d81000695..970f98dadffc 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -170,7 +170,7 @@ config S3C2410_PM_DEBUG
170 depends on ARCH_S3C2410 && PM 170 depends on ARCH_S3C2410 && PM
171 help 171 help
172 Say Y here if you want verbose debugging from the PM Suspend and 172 Say Y here if you want verbose debugging from the PM Suspend and
173 Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt` 173 Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
174 for more information. 174 for more information.
175 175
176config S3C2410_PM_CHECK 176config S3C2410_PM_CHECK
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 2e3b643a4dc4..1649a175a206 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -5,17 +5,34 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/acpi.h>
9
8#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
9#include <asm/acpi.h> 11#include <asm/acpi.h>
10#include <asm/apic.h> 12#include <asm/apic.h>
11 13
14#ifdef CONFIG_ACPI
15
16static int nvidia_hpet_detected __initdata;
17
18static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
19{
20 nvidia_hpet_detected = 1;
21 return 0;
22}
23#endif
24
12static int __init check_bridge(int vendor, int device) 25static int __init check_bridge(int vendor, int device)
13{ 26{
14#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
15 /* According to Nvidia all timer overrides are bogus. Just ignore 28 /* According to Nvidia all timer overrides are bogus unless HPET
16 them all. */ 29 is enabled. */
17 if (vendor == PCI_VENDOR_ID_NVIDIA) { 30 if (vendor == PCI_VENDOR_ID_NVIDIA) {
18 acpi_skip_timer_override = 1; 31 nvidia_hpet_detected = 0;
32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
33 if (nvidia_hpet_detected == 0) {
34 acpi_skip_timer_override = 1;
35 }
19 } 36 }
20#endif 37#endif
21 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { 38 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 846e1639ef7c..dd6b0e3386ce 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
1547 if (efi_enabled) 1547 if (efi_enabled)
1548 efi_map_memmap(); 1548 efi_map_memmap();
1549 1549
1550#ifdef CONFIG_X86_IO_APIC
1551 check_acpi_pci(); /* Checks more than just ACPI actually */
1552#endif
1553
1554#ifdef CONFIG_ACPI 1550#ifdef CONFIG_ACPI
1555 /* 1551 /*
1556 * Parse the ACPI tables for possible boot-time SMP configuration. 1552 * Parse the ACPI tables for possible boot-time SMP configuration.
1557 */ 1553 */
1558 acpi_boot_table_init(); 1554 acpi_boot_table_init();
1555#endif
1556
1557#ifdef CONFIG_X86_IO_APIC
1558 check_acpi_pci(); /* Checks more than just ACPI actually */
1559#endif
1560
1561#ifdef CONFIG_ACPI
1559 acpi_boot_init(); 1562 acpi_boot_init();
1560 1563
1561#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) 1564#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 9c171afd9a53..ae7d8c57bf3f 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -1,10 +1,9 @@
1/* 1/*
2 * 2 *
3 * BRIEF MODULE DESCRIPTION 3 * BRIEF MODULE DESCRIPTION
4 * PROM library initialisation code, assuming a version of 4 * PROM library initialisation code, assuming YAMON is the boot loader.
5 * pmon is the boot code.
6 * 5 *
7 * Copyright 2000,2001 MontaVista Software Inc. 6 * Copyright 2000, 2001, 2006 MontaVista Software Inc.
8 * Author: MontaVista Software, Inc. 7 * Author: MontaVista Software, Inc.
9 * ppopov@mvista.com or source@mvista.com 8 * ppopov@mvista.com or source@mvista.com
10 * 9 *
@@ -49,9 +48,9 @@ extern char **prom_argv, **prom_envp;
49 48
50typedef struct 49typedef struct
51{ 50{
52 char *name; 51 char *name;
53/* char *val; */ 52 char *val;
54}t_env_var; 53} t_env_var;
55 54
56 55
57char * prom_getcmdline(void) 56char * prom_getcmdline(void)
@@ -85,21 +84,16 @@ char *prom_getenv(char *envname)
85{ 84{
86 /* 85 /*
87 * Return a pointer to the given environment variable. 86 * Return a pointer to the given environment variable.
88 * Environment variables are stored in the form of "memsize=64".
89 */ 87 */
90 88
91 t_env_var *env = (t_env_var *)prom_envp; 89 t_env_var *env = (t_env_var *)prom_envp;
92 int i;
93
94 i = strlen(envname);
95 90
96 while(env->name) { 91 while (env->name) {
97 if(strncmp(envname, env->name, i) == 0) { 92 if (strcmp(envname, env->name) == 0)
98 return(env->name + strlen(envname) + 1); 93 return env->val;
99 }
100 env++; 94 env++;
101 } 95 }
102 return(NULL); 96 return NULL;
103} 97}
104 98
105inline unsigned char str2hexnum(unsigned char c) 99inline unsigned char str2hexnum(unsigned char c)
diff --git a/arch/mips/au1000/common/sleeper.S b/arch/mips/au1000/common/sleeper.S
index 44dac3b0df3b..683d9da84b66 100644
--- a/arch/mips/au1000/common/sleeper.S
+++ b/arch/mips/au1000/common/sleeper.S
@@ -112,6 +112,11 @@ sdsleep:
112 mtc0 k0, CP0_PAGEMASK 112 mtc0 k0, CP0_PAGEMASK
113 lw k0, 0x14(sp) 113 lw k0, 0x14(sp)
114 mtc0 k0, CP0_CONFIG 114 mtc0 k0, CP0_CONFIG
115
116 /* We need to catch the ealry Alchemy SOCs with
117 * the write-only Config[OD] bit and set it back to one...
118 */
119 jal au1x00_fixup_config_od
115 lw $1, PT_R1(sp) 120 lw $1, PT_R1(sp)
116 lw $2, PT_R2(sp) 121 lw $2, PT_R2(sp)
117 lw $3, PT_R3(sp) 122 lw $3, PT_R3(sp)
diff --git a/arch/mips/ddb5xxx/ddb5476/dbg_io.c b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
index 85e9e5013679..f2296a999953 100644
--- a/arch/mips/ddb5xxx/ddb5476/dbg_io.c
+++ b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
@@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
86 /* disable interrupts */ 86 /* disable interrupts */
87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 87 UART16550_WRITE(OFS_INTR_ENABLE, 0);
88 88
89 /* set up buad rate */ 89 /* set up baud rate */
90 { 90 {
91 uint32 divisor; 91 uint32 divisor;
92 92
diff --git a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
index 1d18d590495b..385bbdb10170 100644
--- a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
+++ b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
@@ -86,7 +86,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
86 /* disable interrupts */ 86 /* disable interrupts */
87 UART16550_WRITE(OFS_INTR_ENABLE, 0); 87 UART16550_WRITE(OFS_INTR_ENABLE, 0);
88 88
89 /* set up buad rate */ 89 /* set up baud rate */
90 { 90 {
91 uint32 divisor; 91 uint32 divisor;
92 92
diff --git a/arch/mips/gt64120/ev64120/serialGT.c b/arch/mips/gt64120/ev64120/serialGT.c
index 16e34a546e54..8f0d835491ff 100644
--- a/arch/mips/gt64120/ev64120/serialGT.c
+++ b/arch/mips/gt64120/ev64120/serialGT.c
@@ -149,7 +149,7 @@ void serial_set(int channel, unsigned long baud)
149#else 149#else
150 /* 150 /*
151 * Note: Set baud rate, hardcoded here for rate of 115200 151 * Note: Set baud rate, hardcoded here for rate of 115200
152 * since became unsure of above "buad rate" algorithm (??). 152 * since became unsure of above "baud rate" algorithm (??).
153 */ 153 */
154 outreg(channel, LCR, 0x83); 154 outreg(channel, LCR, 0x83);
155 outreg(channel, DLM, 0x00); // See note above 155 outreg(channel, DLM, 0x00); // See note above
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
73 /* disable interrupts */ 73 /* disable interrupts */
74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 UART16550_WRITE(OFS_INTR_ENABLE, 0);
75 75
76 /* set up buad rate */ 76 /* set up baud rate */
77 { 77 {
78 uint32 divisor; 78 uint32 divisor;
79 79
diff --git a/arch/mips/ite-boards/generic/dbg_io.c b/arch/mips/ite-boards/generic/dbg_io.c
index c4f8530fd07e..6a7ccaf93502 100644
--- a/arch/mips/ite-boards/generic/dbg_io.c
+++ b/arch/mips/ite-boards/generic/dbg_io.c
@@ -72,7 +72,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
72 /* disable interrupts */ 72 /* disable interrupts */
73 UART16550_WRITE(OFS_INTR_ENABLE, 0); 73 UART16550_WRITE(OFS_INTR_ENABLE, 0);
74 74
75 /* set up buad rate */ 75 /* set up baud rate */
76 { 76 {
77 uint32 divisor; 77 uint32 divisor;
78 78
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 47a087b6c11b..d268827c62bd 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -206,7 +206,7 @@ static inline void check_daddi(void)
206 "daddi %0, %1, %3\n\t" 206 "daddi %0, %1, %3\n\t"
207 ".set pop" 207 ".set pop"
208 : "=r" (v), "=&r" (tmp) 208 : "=r" (v), "=&r" (tmp)
209 : "I" (0xffffffffffffdb9a), "I" (0x1234)); 209 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
210 set_except_vector(12, handler); 210 set_except_vector(12, handler);
211 local_irq_restore(flags); 211 local_irq_restore(flags);
212 212
@@ -224,7 +224,7 @@ static inline void check_daddi(void)
224 "dsrl %1, %1, 1\n\t" 224 "dsrl %1, %1, 1\n\t"
225 "daddi %0, %1, %3" 225 "daddi %0, %1, %3"
226 : "=r" (v), "=&r" (tmp) 226 : "=r" (v), "=&r" (tmp)
227 : "I" (0xffffffffffffdb9a), "I" (0x1234)); 227 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
228 set_except_vector(12, handler); 228 set_except_vector(12, handler);
229 local_irq_restore(flags); 229 local_irq_restore(flags);
230 230
@@ -280,7 +280,7 @@ static inline void check_daddiu(void)
280 "daddu %1, %2\n\t" 280 "daddu %1, %2\n\t"
281 ".set pop" 281 ".set pop"
282 : "=&r" (v), "=&r" (w), "=&r" (tmp) 282 : "=&r" (v), "=&r" (w), "=&r" (tmp)
283 : "I" (0xffffffffffffdb9a), "I" (0x1234)); 283 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
284 284
285 if (v == w) { 285 if (v == w) {
286 printk("no.\n"); 286 printk("no.\n");
@@ -296,7 +296,7 @@ static inline void check_daddiu(void)
296 "addiu %1, $0, %4\n\t" 296 "addiu %1, $0, %4\n\t"
297 "daddu %1, %2" 297 "daddu %1, %2"
298 : "=&r" (v), "=&r" (w), "=&r" (tmp) 298 : "=&r" (v), "=&r" (w), "=&r" (tmp)
299 : "I" (0xffffffffffffdb9a), "I" (0x1234)); 299 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
300 300
301 if (v == w) { 301 if (v == w) {
302 printk("yes.\n"); 302 printk("yes.\n");
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index bef3e2dc7c52..8c2c359a05f4 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -655,7 +655,7 @@ static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
655 case PRID_IMP_SB1: 655 case PRID_IMP_SB1:
656 c->cputype = CPU_SB1; 656 c->cputype = CPU_SB1;
657 /* FPU in pass1 is known to have issues. */ 657 /* FPU in pass1 is known to have issues. */
658 if ((c->processor_id & 0xff) < 0x20) 658 if ((c->processor_id & 0xff) < 0x02)
659 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); 659 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
660 break; 660 break;
661 case PRID_IMP_SB1A: 661 case PRID_IMP_SB1A:
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index e54a7f442f8a..d7bf0215bc1d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -288,6 +288,9 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
288 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 288 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
289 + ELF_MIPS_R_SYM(rel[i]); 289 + ELF_MIPS_R_SYM(rel[i]);
290 if (!sym->st_value) { 290 if (!sym->st_value) {
291 /* Ignore unresolved weak symbol */
292 if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
293 continue;
291 printk(KERN_WARNING "%s: Unknown symbol %s\n", 294 printk(KERN_WARNING "%s: Unknown symbol %s\n",
292 me->name, strtab + sym->st_name); 295 me->name, strtab + sym->st_name);
293 return -ENOENT; 296 return -ENOENT;
@@ -325,6 +328,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
325 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 328 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
326 + ELF_MIPS_R_SYM(rel[i]); 329 + ELF_MIPS_R_SYM(rel[i]);
327 if (!sym->st_value) { 330 if (!sym->st_value) {
331 /* Ignore unresolved weak symbol */
332 if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
333 continue;
328 printk(KERN_WARNING "%s: Unknown symbol %s\n", 334 printk(KERN_WARNING "%s: Unknown symbol %s\n",
329 me->name, strtab + sym->st_name); 335 me->name, strtab + sym->st_name);
330 return -ENOENT; 336 return -ENOENT;
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b53a9207f530..8efb23a84131 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -209,7 +209,7 @@ sys_call_table:
209 PTR sys_fork 209 PTR sys_fork
210 PTR sys_read 210 PTR sys_read
211 PTR sys_write 211 PTR sys_write
212 PTR sys_open /* 4005 */ 212 PTR compat_sys_open /* 4005 */
213 PTR sys_close 213 PTR sys_close
214 PTR sys_waitpid 214 PTR sys_waitpid
215 PTR sys_creat 215 PTR sys_creat
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index bcf1b10e518f..397a70e651b5 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -246,7 +246,7 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en
246#ifdef CONFIG_64BIT 246#ifdef CONFIG_64BIT
247 /* HACK: Guess if the sign extension was forgotten */ 247 /* HACK: Guess if the sign extension was forgotten */
248 if (start > 0x0000000080000000 && start < 0x00000000ffffffff) 248 if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
249 start |= 0xffffffff00000000; 249 start |= 0xffffffff00000000UL;
250#endif 250#endif
251 251
252 end = start + size; 252 end = start + size;
@@ -355,8 +355,6 @@ static inline void bootmem_init(void)
355 } 355 }
356#endif 356#endif
357 357
358 memory_present(0, first_usable_pfn, max_low_pfn);
359
360 /* Initialize the boot-time allocator with low memory only. */ 358 /* Initialize the boot-time allocator with low memory only. */
361 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn); 359 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
362 360
@@ -410,6 +408,7 @@ static inline void bootmem_init(void)
410 408
411 /* Register lowmem ranges */ 409 /* Register lowmem ranges */
412 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); 410 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
411 memory_present(0, curr_pfn, curr_pfn + size - 1);
413 } 412 }
414 413
415 /* Reserve the bootmap memory. */ 414 /* Reserve the bootmap memory. */
@@ -419,17 +418,20 @@ static inline void bootmem_init(void)
419#ifdef CONFIG_BLK_DEV_INITRD 418#ifdef CONFIG_BLK_DEV_INITRD
420 initrd_below_start_ok = 1; 419 initrd_below_start_ok = 1;
421 if (initrd_start) { 420 if (initrd_start) {
422 unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start); 421 unsigned long initrd_size = ((unsigned char *)initrd_end) -
422 ((unsigned char *)initrd_start);
423 const int width = sizeof(long) * 2;
424
423 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 425 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
424 (void *)initrd_start, initrd_size); 426 (void *)initrd_start, initrd_size);
425 427
426 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { 428 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
427 printk("initrd extends beyond end of memory " 429 printk("initrd extends beyond end of memory "
428 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n", 430 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
429 sizeof(long) * 2, 431 width,
430 (unsigned long long)CPHYSADDR(initrd_end), 432 (unsigned long long) CPHYSADDR(initrd_end),
431 sizeof(long) * 2, 433 width,
432 (unsigned long long)PFN_PHYS(max_low_pfn)); 434 (unsigned long long) PFN_PHYS(max_low_pfn));
433 initrd_start = initrd_end = 0; 435 initrd_start = initrd_end = 0;
434 initrd_reserve_bootmem = 0; 436 initrd_reserve_bootmem = 0;
435 } 437 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d42f358754ad..298f82fe8440 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -247,6 +247,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
247 current_thread_info()->cpu = 0; 247 current_thread_info()->cpu = 0;
248 smp_tune_scheduling(); 248 smp_tune_scheduling();
249 plat_prepare_cpus(max_cpus); 249 plat_prepare_cpus(max_cpus);
250#ifndef CONFIG_HOTPLUG_CPU
251 cpu_present_map = cpu_possible_map;
252#endif
250} 253}
251 254
252/* preload SMP state for boot cpu */ 255/* preload SMP state for boot cpu */
@@ -442,7 +445,7 @@ static int __init topology_init(void)
442 int cpu; 445 int cpu;
443 int ret; 446 int ret;
444 447
445 for_each_cpu(cpu) { 448 for_each_present_cpu(cpu) {
446 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); 449 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
447 if (ret) 450 if (ret)
448 printk(KERN_WARNING "topology_init: register_cpu %d " 451 printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 8f4fdd94dbd0..5e8a18a8e2bd 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -276,8 +276,7 @@ void sys_set_thread_area(unsigned long addr)
276 276
277asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 277asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
278{ 278{
279 int tmp, len; 279 int tmp;
280 char __user *name;
281 280
282 switch(cmd) { 281 switch(cmd) {
283 case MIPS_ATOMIC_SET: 282 case MIPS_ATOMIC_SET:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 35cb08da3820..a7564b08eb4d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -819,15 +819,30 @@ asmlinkage void do_watch(struct pt_regs *regs)
819 819
820asmlinkage void do_mcheck(struct pt_regs *regs) 820asmlinkage void do_mcheck(struct pt_regs *regs)
821{ 821{
822 const int field = 2 * sizeof(unsigned long);
823 int multi_match = regs->cp0_status & ST0_TS;
824
822 show_regs(regs); 825 show_regs(regs);
823 dump_tlb_all(); 826
827 if (multi_match) {
828 printk("Index : %0x\n", read_c0_index());
829 printk("Pagemask: %0x\n", read_c0_pagemask());
830 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
831 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
832 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
833 printk("\n");
834 dump_tlb_all();
835 }
836
837 show_code((unsigned int *) regs->cp0_epc);
838
824 /* 839 /*
825 * Some chips may have other causes of machine check (e.g. SB1 840 * Some chips may have other causes of machine check (e.g. SB1
826 * graduation timer) 841 * graduation timer)
827 */ 842 */
828 panic("Caught Machine Check exception - %scaused by multiple " 843 panic("Caught Machine Check exception - %scaused by multiple "
829 "matching entries in the TLB.", 844 "matching entries in the TLB.",
830 (regs->cp0_status & ST0_TS) ? "" : "not "); 845 (multi_match) ? "" : "not ");
831} 846}
832 847
833asmlinkage void do_mt(struct pt_regs *regs) 848asmlinkage void do_mt(struct pt_regs *regs)
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index a1962eb460f8..39a71de16f47 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -29,7 +29,9 @@
29 29
30ieee754dp ieee754dp_fint(int x) 30ieee754dp ieee754dp_fint(int x)
31{ 31{
32 COMPXDP; 32 u64 xm;
33 int xe;
34 int xs;
33 35
34 CLEARCX; 36 CLEARCX;
35 37
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index eae90a866aa1..f08f223e488a 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -29,7 +29,9 @@
29 29
30ieee754dp ieee754dp_flong(s64 x) 30ieee754dp ieee754dp_flong(s64 x)
31{ 31{
32 COMPXDP; 32 u64 xm;
33 int xe;
34 int xs;
33 35
34 CLEARCX; 36 CLEARCX;
35 37
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index 7aac13afb09a..e88e125e01c2 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -29,7 +29,9 @@
29 29
30ieee754sp ieee754sp_fint(int x) 30ieee754sp ieee754sp_fint(int x)
31{ 31{
32 COMPXSP; 32 unsigned xm;
33 int xe;
34 int xs;
33 35
34 CLEARCX; 36 CLEARCX;
35 37
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 3d6c1d11c178..26d6919a269a 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -29,7 +29,9 @@
29 29
30ieee754sp ieee754sp_flong(s64 x) 30ieee754sp ieee754sp_flong(s64 x)
31{ 31{
32 COMPXDP; /* <--- need 64-bit mantissa temp */ 32 u64 xm; /* <--- need 64-bit mantissa temp */
33 int xe;
34 int xs;
33 35
34 CLEARCX; 36 CLEARCX;
35 37
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6b3541769602..4a43924cd4fc 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1161,6 +1161,31 @@ static void __init setup_scache(void)
1161 c->options |= MIPS_CPU_SUBSET_CACHES; 1161 c->options |= MIPS_CPU_SUBSET_CACHES;
1162} 1162}
1163 1163
1164void au1x00_fixup_config_od(void)
1165{
1166 /*
1167 * c0_config.od (bit 19) was write only (and read as 0)
1168 * on the early revisions of Alchemy SOCs. It disables the bus
1169 * transaction overlapping and needs to be set to fix various errata.
1170 */
1171 switch (read_c0_prid()) {
1172 case 0x00030100: /* Au1000 DA */
1173 case 0x00030201: /* Au1000 HA */
1174 case 0x00030202: /* Au1000 HB */
1175 case 0x01030200: /* Au1500 AB */
1176 /*
1177 * Au1100 errata actually keeps silence about this bit, so we set it
1178 * just in case for those revisions that require it to be set according
1179 * to arch/mips/au1000/common/cputable.c
1180 */
1181 case 0x02030200: /* Au1100 AB */
1182 case 0x02030201: /* Au1100 BA */
1183 case 0x02030202: /* Au1100 BC */
1184 set_c0_config(1 << 19);
1185 break;
1186 }
1187}
1188
1164static inline void coherency_setup(void) 1189static inline void coherency_setup(void)
1165{ 1190{
1166 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 1191 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
@@ -1181,6 +1206,15 @@ static inline void coherency_setup(void)
1181 case CPU_R4400MC: 1206 case CPU_R4400MC:
1182 clear_c0_config(CONF_CU); 1207 clear_c0_config(CONF_CU);
1183 break; 1208 break;
1209 /*
1210 * We need to catch the ealry Alchemy SOCs with
1211 * the write-only co_config.od bit and set it back to one...
1212 */
1213 case CPU_AU1000: /* rev. DA, HA, HB */
1214 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1215 case CPU_AU1500: /* rev. AB */
1216 au1x00_fixup_config_od();
1217 break;
1184 } 1218 }
1185} 1219}
1186 1220
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c22308b93ff0..33f6e1cdfd5b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -227,7 +227,7 @@ void __init mem_init(void)
227 for (tmp = 0; tmp < max_low_pfn; tmp++) 227 for (tmp = 0; tmp < max_low_pfn; tmp++)
228 if (page_is_ram(tmp)) { 228 if (page_is_ram(tmp)) {
229 ram++; 229 ram++;
230 if (PageReserved(mem_map+tmp)) 230 if (PageReserved(pfn_to_page(tmp)))
231 reservedpages++; 231 reservedpages++;
232 } 232 }
233 233
diff --git a/arch/mips/momentum/jaguar_atx/dbg_io.c b/arch/mips/momentum/jaguar_atx/dbg_io.c
index 542eac82b63c..d7dea0a136aa 100644
--- a/arch/mips/momentum/jaguar_atx/dbg_io.c
+++ b/arch/mips/momentum/jaguar_atx/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
73 /* disable interrupts */ 73 /* disable interrupts */
74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 UART16550_WRITE(OFS_INTR_ENABLE, 0);
75 75
76 /* set up buad rate */ 76 /* set up baud rate */
77 { 77 {
78 uint32 divisor; 78 uint32 divisor;
79 79
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
73 /* disable interrupts */ 73 /* disable interrupts */
74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 UART16550_WRITE(OFS_INTR_ENABLE, 0);
75 75
76 /* set up buad rate */ 76 /* set up baud rate */
77 { 77 {
78 uint32 divisor; 78 uint32 divisor;
79 79
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 8720bccfdea2..f0a6a38fcf4d 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -73,7 +73,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
73 /* disable interrupts */ 73 /* disable interrupts */
74 UART16550_WRITE(OFS_INTR_ENABLE, 0); 74 UART16550_WRITE(OFS_INTR_ENABLE, 0);
75 75
76 /* set up buad rate */ 76 /* set up baud rate */
77 { 77 {
78 uint32 divisor; 78 uint32 divisor;
79 79
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 91b799d2cd88..c31e4cff64e0 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -14,8 +14,8 @@
14 14
15#include "op_impl.h" 15#include "op_impl.h"
16 16
17extern struct op_mips_model op_model_mipsxx __attribute__((weak)); 17extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak));
18extern struct op_mips_model op_model_rm9000 __attribute__((weak)); 18extern struct op_mips_model op_model_rm9000_ops __attribute__((weak));
19 19
20static struct op_mips_model *model; 20static struct op_mips_model *model;
21 21
@@ -83,11 +83,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
83 case CPU_74K: 83 case CPU_74K:
84 case CPU_SB1: 84 case CPU_SB1:
85 case CPU_SB1A: 85 case CPU_SB1A:
86 lmodel = &op_model_mipsxx; 86 lmodel = &op_model_mipsxx_ops;
87 break; 87 break;
88 88
89 case CPU_RM9000: 89 case CPU_RM9000:
90 lmodel = &op_model_rm9000; 90 lmodel = &op_model_rm9000_ops;
91 break; 91 break;
92 }; 92 };
93 93
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e7ce92391303..f26a00e13204 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -23,7 +23,7 @@
23 23
24#define M_COUNTER_OVERFLOW (1UL << 31) 24#define M_COUNTER_OVERFLOW (1UL << 31)
25 25
26struct op_mips_model op_model_mipsxx; 26struct op_mips_model op_model_mipsxx_ops;
27 27
28static struct mipsxx_register_config { 28static struct mipsxx_register_config {
29 unsigned int control[4]; 29 unsigned int control[4];
@@ -34,7 +34,7 @@ static struct mipsxx_register_config {
34 34
35static void mipsxx_reg_setup(struct op_counter_config *ctr) 35static void mipsxx_reg_setup(struct op_counter_config *ctr)
36{ 36{
37 unsigned int counters = op_model_mipsxx.num_counters; 37 unsigned int counters = op_model_mipsxx_ops.num_counters;
38 int i; 38 int i;
39 39
40 /* Compute the performance counter control word. */ 40 /* Compute the performance counter control word. */
@@ -62,7 +62,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
62 62
63static void mipsxx_cpu_setup (void *args) 63static void mipsxx_cpu_setup (void *args)
64{ 64{
65 unsigned int counters = op_model_mipsxx.num_counters; 65 unsigned int counters = op_model_mipsxx_ops.num_counters;
66 66
67 switch (counters) { 67 switch (counters) {
68 case 4: 68 case 4:
@@ -83,7 +83,7 @@ static void mipsxx_cpu_setup (void *args)
83/* Start all counters on current CPU */ 83/* Start all counters on current CPU */
84static void mipsxx_cpu_start(void *args) 84static void mipsxx_cpu_start(void *args)
85{ 85{
86 unsigned int counters = op_model_mipsxx.num_counters; 86 unsigned int counters = op_model_mipsxx_ops.num_counters;
87 87
88 switch (counters) { 88 switch (counters) {
89 case 4: 89 case 4:
@@ -100,7 +100,7 @@ static void mipsxx_cpu_start(void *args)
100/* Stop all counters on current CPU */ 100/* Stop all counters on current CPU */
101static void mipsxx_cpu_stop(void *args) 101static void mipsxx_cpu_stop(void *args)
102{ 102{
103 unsigned int counters = op_model_mipsxx.num_counters; 103 unsigned int counters = op_model_mipsxx_ops.num_counters;
104 104
105 switch (counters) { 105 switch (counters) {
106 case 4: 106 case 4:
@@ -116,7 +116,7 @@ static void mipsxx_cpu_stop(void *args)
116 116
117static int mipsxx_perfcount_handler(struct pt_regs *regs) 117static int mipsxx_perfcount_handler(struct pt_regs *regs)
118{ 118{
119 unsigned int counters = op_model_mipsxx.num_counters; 119 unsigned int counters = op_model_mipsxx_ops.num_counters;
120 unsigned int control; 120 unsigned int control;
121 unsigned int counter; 121 unsigned int counter;
122 int handled = 0; 122 int handled = 0;
@@ -187,37 +187,37 @@ static int __init mipsxx_init(void)
187 187
188 reset_counters(counters); 188 reset_counters(counters);
189 189
190 op_model_mipsxx.num_counters = counters; 190 op_model_mipsxx_ops.num_counters = counters;
191 switch (current_cpu_data.cputype) { 191 switch (current_cpu_data.cputype) {
192 case CPU_20KC: 192 case CPU_20KC:
193 op_model_mipsxx.cpu_type = "mips/20K"; 193 op_model_mipsxx_ops.cpu_type = "mips/20K";
194 break; 194 break;
195 195
196 case CPU_24K: 196 case CPU_24K:
197 op_model_mipsxx.cpu_type = "mips/24K"; 197 op_model_mipsxx_ops.cpu_type = "mips/24K";
198 break; 198 break;
199 199
200 case CPU_25KF: 200 case CPU_25KF:
201 op_model_mipsxx.cpu_type = "mips/25K"; 201 op_model_mipsxx_ops.cpu_type = "mips/25K";
202 break; 202 break;
203 203
204#ifndef CONFIG_SMP 204#ifndef CONFIG_SMP
205 case CPU_34K: 205 case CPU_34K:
206 op_model_mipsxx.cpu_type = "mips/34K"; 206 op_model_mipsxx_ops.cpu_type = "mips/34K";
207 break; 207 break;
208 208
209 case CPU_74K: 209 case CPU_74K:
210 op_model_mipsxx.cpu_type = "mips/74K"; 210 op_model_mipsxx_ops.cpu_type = "mips/74K";
211 break; 211 break;
212#endif 212#endif
213 213
214 case CPU_5KC: 214 case CPU_5KC:
215 op_model_mipsxx.cpu_type = "mips/5K"; 215 op_model_mipsxx_ops.cpu_type = "mips/5K";
216 break; 216 break;
217 217
218 case CPU_SB1: 218 case CPU_SB1:
219 case CPU_SB1A: 219 case CPU_SB1A:
220 op_model_mipsxx.cpu_type = "mips/sb1"; 220 op_model_mipsxx_ops.cpu_type = "mips/sb1";
221 break; 221 break;
222 222
223 default: 223 default:
@@ -233,12 +233,12 @@ static int __init mipsxx_init(void)
233 233
234static void mipsxx_exit(void) 234static void mipsxx_exit(void)
235{ 235{
236 reset_counters(op_model_mipsxx.num_counters); 236 reset_counters(op_model_mipsxx_ops.num_counters);
237 237
238 perf_irq = null_perf_irq; 238 perf_irq = null_perf_irq;
239} 239}
240 240
241struct op_mips_model op_model_mipsxx = { 241struct op_mips_model op_model_mipsxx_ops = {
242 .reg_setup = mipsxx_reg_setup, 242 .reg_setup = mipsxx_reg_setup,
243 .cpu_setup = mipsxx_cpu_setup, 243 .cpu_setup = mipsxx_cpu_setup,
244 .init = mipsxx_init, 244 .init = mipsxx_init,
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
index 9b75e41c78ef..b7063fefa65b 100644
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ b/arch/mips/oprofile/op_model_rm9000.c
@@ -126,7 +126,7 @@ static void rm9000_exit(void)
126 free_irq(rm9000_perfcount_irq, NULL); 126 free_irq(rm9000_perfcount_irq, NULL);
127} 127}
128 128
129struct op_mips_model op_model_rm9000 = { 129struct op_mips_model op_model_rm9000_ops = {
130 .reg_setup = rm9000_reg_setup, 130 .reg_setup = rm9000_reg_setup,
131 .cpu_setup = rm9000_cpu_setup, 131 .cpu_setup = rm9000_cpu_setup,
132 .init = rm9000_init, 132 .init = rm9000_init,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index de01c9815bdd..8ba08047d164 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -31,12 +31,12 @@
31/* issue a PIO read to make sure no PIO writes are pending */ 31/* issue a PIO read to make sure no PIO writes are pending */
32static void inline flush_crime_bus(void) 32static void inline flush_crime_bus(void)
33{ 33{
34 volatile unsigned long junk = crime->control; 34 crime->control;
35} 35}
36 36
37static void inline flush_mace_bus(void) 37static void inline flush_mace_bus(void)
38{ 38{
39 volatile unsigned long junk = mace->perif.ctrl.misc; 39 mace->perif.ctrl.misc;
40} 40}
41 41
42#undef DEBUG_IRQ 42#undef DEBUG_IRQ
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5908690d0868..57d8a16438a0 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -776,6 +776,7 @@ static void __init prom_send_capabilities(void)
776 /* try calling the ibm,client-architecture-support method */ 776 /* try calling the ibm,client-architecture-support method */
777 if (call_prom_ret("call-method", 3, 2, &ret, 777 if (call_prom_ret("call-method", 3, 2, &ret,
778 ADDR("ibm,client-architecture-support"), 778 ADDR("ibm,client-architecture-support"),
779 root,
779 ADDR(ibm_architecture_vec)) == 0) { 780 ADDR(ibm_architecture_vec)) == 0) {
780 /* the call exists... */ 781 /* the call exists... */
781 if (ret) 782 if (ret)
@@ -1541,6 +1542,15 @@ static int __init prom_find_machine_type(void)
1541 if (strstr(p, RELOC("Power Macintosh")) || 1542 if (strstr(p, RELOC("Power Macintosh")) ||
1542 strstr(p, RELOC("MacRISC"))) 1543 strstr(p, RELOC("MacRISC")))
1543 return PLATFORM_POWERMAC; 1544 return PLATFORM_POWERMAC;
1545#ifdef CONFIG_PPC64
1546 /* We must make sure we don't detect the IBM Cell
1547 * blades as pSeries due to some firmware issues,
1548 * so we do it here.
1549 */
1550 if (strstr(p, RELOC("IBM,CBEA")) ||
1551 strstr(p, RELOC("IBM,CPBW-1.0")))
1552 return PLATFORM_GENERIC;
1553#endif /* CONFIG_PPC64 */
1544 i += sl + 1; 1554 i += sl + 1;
1545 } 1555 }
1546 } 1556 }
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 237faeec2ec2..d73b25e22fca 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -808,10 +808,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
808 if (__get_user(cmcp, &ucp->uc_regs)) 808 if (__get_user(cmcp, &ucp->uc_regs))
809 return -EFAULT; 809 return -EFAULT;
810 mcp = (struct mcontext __user *)(u64)cmcp; 810 mcp = (struct mcontext __user *)(u64)cmcp;
811 /* no need to check access_ok(mcp), since mcp < 4GB */
811 } 812 }
812#else 813#else
813 if (__get_user(mcp, &ucp->uc_regs)) 814 if (__get_user(mcp, &ucp->uc_regs))
814 return -EFAULT; 815 return -EFAULT;
816 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
817 return -EFAULT;
815#endif 818#endif
816 restore_sigmask(&set); 819 restore_sigmask(&set);
817 if (restore_user_regs(regs, mcp, sig)) 820 if (restore_user_regs(regs, mcp, sig))
@@ -913,13 +916,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
913{ 916{
914 struct sig_dbg_op op; 917 struct sig_dbg_op op;
915 int i; 918 int i;
919 unsigned char tmp;
916 unsigned long new_msr = regs->msr; 920 unsigned long new_msr = regs->msr;
917#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 921#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
918 unsigned long new_dbcr0 = current->thread.dbcr0; 922 unsigned long new_dbcr0 = current->thread.dbcr0;
919#endif 923#endif
920 924
921 for (i=0; i<ndbg; i++) { 925 for (i=0; i<ndbg; i++) {
922 if (__copy_from_user(&op, dbg, sizeof(op))) 926 if (copy_from_user(&op, dbg + i, sizeof(op)))
923 return -EFAULT; 927 return -EFAULT;
924 switch (op.dbg_type) { 928 switch (op.dbg_type) {
925 case SIG_DBG_SINGLE_STEPPING: 929 case SIG_DBG_SINGLE_STEPPING:
@@ -964,6 +968,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
964 current->thread.dbcr0 = new_dbcr0; 968 current->thread.dbcr0 = new_dbcr0;
965#endif 969#endif
966 970
971 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
972 || __get_user(tmp, (u8 __user *) ctx)
973 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
974 return -EFAULT;
975
967 /* 976 /*
968 * If we get a fault copying the context into the kernel's 977 * If we get a fault copying the context into the kernel's
969 * image of the user's registers, we can't just return -EFAULT 978 * image of the user's registers, we can't just return -EFAULT
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 66a5fbe31989..6e75d7ab6d4d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -184,6 +184,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
184 err |= __get_user(v_regs, &sc->v_regs); 184 err |= __get_user(v_regs, &sc->v_regs);
185 if (err) 185 if (err)
186 return err; 186 return err;
187 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
188 return -EFAULT;
187 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 189 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
188 if (v_regs != 0 && (msr & MSR_VEC) != 0) 190 if (v_regs != 0 && (msr & MSR_VEC) != 0)
189 err |= __copy_from_user(current->thread.vr, v_regs, 191 err |= __copy_from_user(current->thread.vr, v_regs,
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6574b22b3cf3..fd3e5609e3e0 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
125 125
126static int __init cell_probe(void) 126static int __init cell_probe(void)
127{ 127{
128 /* XXX This is temporary, the Cell maintainer will come up with
129 * more appropriate detection logic
130 */
131 unsigned long root = of_get_flat_dt_root(); 128 unsigned long root = of_get_flat_dt_root();
132 if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
133 return 0;
134 129
135 return 1; 130 if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
131 of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
132 return 1;
133
134 return 0;
136} 135}
137 136
138/* 137/*
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 4baa75b1d36f..f08173b0f065 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/mutex.h>
14 15
15#include <asm/semaphore.h> 16#include <asm/semaphore.h>
16#include <asm/prom.h> 17#include <asm/prom.h>
@@ -546,6 +547,7 @@ struct pmf_device {
546 547
547static LIST_HEAD(pmf_devices); 548static LIST_HEAD(pmf_devices);
548static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; 549static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
550static DEFINE_MUTEX(pmf_irq_mutex);
549 551
550static void pmf_release_device(struct kref *kref) 552static void pmf_release_device(struct kref *kref)
551{ 553{
@@ -864,15 +866,17 @@ int pmf_register_irq_client(struct device_node *target,
864 866
865 spin_lock_irqsave(&pmf_lock, flags); 867 spin_lock_irqsave(&pmf_lock, flags);
866 func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN); 868 func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
867 if (func == NULL) { 869 if (func)
868 spin_unlock_irqrestore(&pmf_lock, flags); 870 func = pmf_get_function(func);
871 spin_unlock_irqrestore(&pmf_lock, flags);
872 if (func == NULL)
869 return -ENODEV; 873 return -ENODEV;
870 } 874 mutex_lock(&pmf_irq_mutex);
871 if (list_empty(&func->irq_clients)) 875 if (list_empty(&func->irq_clients))
872 func->dev->handlers->irq_enable(func); 876 func->dev->handlers->irq_enable(func);
873 list_add(&client->link, &func->irq_clients); 877 list_add(&client->link, &func->irq_clients);
874 client->func = func; 878 client->func = func;
875 spin_unlock_irqrestore(&pmf_lock, flags); 879 mutex_unlock(&pmf_irq_mutex);
876 880
877 return 0; 881 return 0;
878} 882}
@@ -881,16 +885,16 @@ EXPORT_SYMBOL_GPL(pmf_register_irq_client);
881void pmf_unregister_irq_client(struct pmf_irq_client *client) 885void pmf_unregister_irq_client(struct pmf_irq_client *client)
882{ 886{
883 struct pmf_function *func = client->func; 887 struct pmf_function *func = client->func;
884 unsigned long flags;
885 888
886 BUG_ON(func == NULL); 889 BUG_ON(func == NULL);
887 890
888 spin_lock_irqsave(&pmf_lock, flags); 891 mutex_lock(&pmf_irq_mutex);
889 client->func = NULL; 892 client->func = NULL;
890 list_del(&client->link); 893 list_del(&client->link);
891 if (list_empty(&func->irq_clients)) 894 if (list_empty(&func->irq_clients))
892 func->dev->handlers->irq_disable(func); 895 func->dev->handlers->irq_disable(func);
893 spin_unlock_irqrestore(&pmf_lock, flags); 896 mutex_unlock(&pmf_irq_mutex);
897 pmf_put_function(func);
894} 898}
895EXPORT_SYMBOL_GPL(pmf_unregister_irq_client); 899EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
896 900
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ee89d19c436a..1e28518c6121 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
389 389
390static int __init pSeries_probe(void) 390static int __init pSeries_probe(void)
391{ 391{
392 unsigned long root = of_get_flat_dt_root();
392 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), 393 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
393 "device_type", NULL); 394 "device_type", NULL);
394 if (dtype == NULL) 395 if (dtype == NULL)
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
396 if (strcmp(dtype, "chrp")) 397 if (strcmp(dtype, "chrp"))
397 return 0; 398 return 0;
398 399
400 /* Cell blades firmware claims to be chrp while it's not. Until this
401 * is fixed, we need to avoid those here.
402 */
403 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
404 of_flat_dt_is_compatible(root, "IBM,CBEA"))
405 return 0;
406
399 DBG("pSeries detected, looking for LPAR capability...\n"); 407 DBG("pSeries detected, looking for LPAR capability...\n");
400 408
401 /* Now try to figure out if we are running on LPAR */ 409 /* Now try to figure out if we are running on LPAR */
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index a93f5da6855d..40b42c88e6a7 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
69 "clock-frequency", 0); 69 "clock-frequency", 0);
70 cpu_data(id).prom_node = cpu_node; 70 cpu_data(id).prom_node = cpu_node;
71 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 71 cpu_data(id).mid = cpu_get_hwmid(cpu_node);
72
73 /* this is required to tune the scheduler correctly */
74 /* is it possible to have CPUs with different cache sizes? */
75 if (id == boot_cpu_id) {
76 int cache_line,cache_nlines;
77 cache_line = 0x20;
78 cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
79 cache_nlines = 0x8000;
80 cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
81 max_cache_size = cache_line * cache_nlines;
82 }
72 if (cpu_data(id).mid < 0) 83 if (cpu_data(id).mid < 0)
73 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 84 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
74} 85}
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 3eadac5e171e..31c5892f5acc 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -10,6 +10,7 @@
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/version.h> 11#include <linux/version.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/threads.h>
13#include <asm/thread_info.h> 14#include <asm/thread_info.h>
14#include <asm/asi.h> 15#include <asm/asi.h>
15#include <asm/pstate.h> 16#include <asm/pstate.h>
@@ -493,6 +494,35 @@ tlb_fixup_done:
493 call prom_init 494 call prom_init
494 mov %l7, %o0 ! OpenPROM cif handler 495 mov %l7, %o0 ! OpenPROM cif handler
495 496
497 /* Initialize current_thread_info()->cpu as early as possible.
498 * In order to do that accurately we have to patch up the get_cpuid()
499 * assembler sequences. And that, in turn, requires that we know
500 * if we are on a Starfire box or not. While we're here, patch up
501 * the sun4v sequences as well.
502 */
503 call check_if_starfire
504 nop
505 call per_cpu_patch
506 nop
507 call sun4v_patch
508 nop
509
510#ifdef CONFIG_SMP
511 call hard_smp_processor_id
512 nop
513 cmp %o0, NR_CPUS
514 blu,pt %xcc, 1f
515 nop
516 call boot_cpu_id_too_large
517 nop
518 /* Not reached... */
519
5201:
521#else
522 mov 0, %o0
523#endif
524 stb %o0, [%g6 + TI_CPU]
525
496 /* Off we go.... */ 526 /* Off we go.... */
497 call start_kernel 527 call start_kernel
498 nop 528 nop
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 2b7a1f316a93..0c0895202970 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
599 599
600/* SUN4V PCI configuration space accessors. */ 600/* SUN4V PCI configuration space accessors. */
601 601
602static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) 602struct pdev_entry {
603 struct pdev_entry *next;
604 u32 devhandle;
605 unsigned int bus;
606 unsigned int device;
607 unsigned int func;
608};
609
610#define PDEV_HTAB_SIZE 16
611#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
612static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
613
614static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
603{ 615{
604 if (bus == pbm->pci_first_busno) { 616 unsigned int val;
605 if (device == 0 && func == 0) 617
606 return 0; 618 val = (devhandle ^ (devhandle >> 4));
607 return 1; 619 val ^= bus;
620 val ^= device;
621 val ^= func;
622
623 return val & PDEV_HTAB_MASK;
624}
625
626static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
627{
628 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
629 struct pdev_entry **slot;
630
631 if (!p)
632 return -ENOMEM;
633
634 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
635 p->next = *slot;
636 *slot = p;
637
638 p->devhandle = devhandle;
639 p->bus = bus;
640 p->device = device;
641 p->func = func;
642
643 return 0;
644}
645
646/* Recursively descend into the OBP device tree, rooted at toplevel_node,
647 * looking for a PCI device matching bus and devfn.
648 */
649static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
650{
651 toplevel_node = prom_getchild(toplevel_node);
652
653 while (toplevel_node != 0) {
654 int ret = obp_find(pregs, toplevel_node, bus, devfn);
655
656 if (ret != 0)
657 return ret;
658
659 ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
660 sizeof(*pregs) * PROMREG_MAX);
661 if (ret == 0 || ret == -1)
662 goto next_sibling;
663
664 if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
665 ((pregs[0].phys_hi >> 8) & 0xff) == devfn)
666 break;
667
668 next_sibling:
669 toplevel_node = prom_getsibling(toplevel_node);
670 }
671
672 return toplevel_node;
673}
674
675static int pdev_htab_populate(struct pci_pbm_info *pbm)
676{
677 struct linux_prom_pci_registers pr[PROMREG_MAX];
678 u32 devhandle = pbm->devhandle;
679 unsigned int bus;
680
681 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
682 unsigned int devfn;
683
684 for (devfn = 0; devfn < 256; devfn++) {
685 unsigned int device = PCI_SLOT(devfn);
686 unsigned int func = PCI_FUNC(devfn);
687
688 if (obp_find(pr, pbm->prom_node, bus, devfn)) {
689 int err = pdev_htab_add(devhandle, bus,
690 device, func);
691 if (err)
692 return err;
693 }
694 }
695 }
696
697 return 0;
698}
699
700static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
701{
702 struct pdev_entry *p;
703
704 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
705 while (p) {
706 if (p->devhandle == devhandle &&
707 p->bus == bus &&
708 p->device == device &&
709 p->func == func)
710 break;
711
712 p = p->next;
608 } 713 }
609 714
715 return p;
716}
717
718static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
719{
610 if (bus < pbm->pci_first_busno || 720 if (bus < pbm->pci_first_busno ||
611 bus > pbm->pci_last_busno) 721 bus > pbm->pci_last_busno)
612 return 1; 722 return 1;
613 return 0; 723 return pdev_find(pbm->devhandle, bus, device, func) == NULL;
614} 724}
615 725
616static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 726static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
1063 1173
1064 pci_sun4v_get_bus_range(pbm); 1174 pci_sun4v_get_bus_range(pbm);
1065 pci_sun4v_iommu_init(pbm); 1175 pci_sun4v_iommu_init(pbm);
1176
1177 pdev_htab_populate(pbm);
1066} 1178}
1067 1179
1068void sun4v_pci_init(int node, char *model_name) 1180void sun4v_pci_init(int node, char *model_name)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 005167f82419..9cf1c88cd774 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE];
220 220
221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
222 222
223static void __init per_cpu_patch(void) 223void __init per_cpu_patch(void)
224{ 224{
225 struct cpuid_patch_entry *p; 225 struct cpuid_patch_entry *p;
226 unsigned long ver; 226 unsigned long ver;
@@ -280,7 +280,7 @@ static void __init per_cpu_patch(void)
280 } 280 }
281} 281}
282 282
283static void __init sun4v_patch(void) 283void __init sun4v_patch(void)
284{ 284{
285 struct sun4v_1insn_patch_entry *p1; 285 struct sun4v_1insn_patch_entry *p1;
286 struct sun4v_2insn_patch_entry *p2; 286 struct sun4v_2insn_patch_entry *p2;
@@ -315,6 +315,15 @@ static void __init sun4v_patch(void)
315 } 315 }
316} 316}
317 317
318#ifdef CONFIG_SMP
319void __init boot_cpu_id_too_large(int cpu)
320{
321 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
322 cpu, NR_CPUS);
323 prom_halt();
324}
325#endif
326
318void __init setup_arch(char **cmdline_p) 327void __init setup_arch(char **cmdline_p)
319{ 328{
320 /* Initialize PROM console and command line. */ 329 /* Initialize PROM console and command line. */
@@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p)
332 conswitchp = &prom_con; 341 conswitchp = &prom_con;
333#endif 342#endif
334 343
335 /* Work out if we are starfire early on */
336 check_if_starfire();
337
338 /* Now we know enough to patch the get_cpuid sequences
339 * used by trap code.
340 */
341 per_cpu_patch();
342
343 sun4v_patch();
344
345 boot_flags_init(*cmdline_p); 344 boot_flags_init(*cmdline_p);
346 345
347 idprom_init(); 346 idprom_init();
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 90eaca3ec9a6..f03d52d0b88d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1264,7 +1264,6 @@ void __init smp_tick_init(void)
1264 boot_cpu_id = hard_smp_processor_id(); 1264 boot_cpu_id = hard_smp_processor_id();
1265 current_tick_offset = timer_tick_offset; 1265 current_tick_offset = timer_tick_offset;
1266 1266
1267 cpu_set(boot_cpu_id, cpu_online_map);
1268 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; 1267 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1269} 1268}
1270 1269
@@ -1288,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
1288 return 0; 1287 return 0;
1289} 1288}
1290 1289
1290static void __init smp_tune_scheduling(void)
1291{
1292 int instance, node;
1293 unsigned int def, smallest = ~0U;
1294
1295 def = ((tlb_type == hypervisor) ?
1296 (3 * 1024 * 1024) :
1297 (4 * 1024 * 1024));
1298
1299 instance = 0;
1300 while (!cpu_find_by_instance(instance, &node, NULL)) {
1301 unsigned int val;
1302
1303 val = prom_getintdefault(node, "ecache-size", def);
1304 if (val < smallest)
1305 smallest = val;
1306
1307 instance++;
1308 }
1309
1310 /* Any value less than 256K is nonsense. */
1311 if (smallest < (256U * 1024U))
1312 smallest = 256 * 1024;
1313
1314 max_cache_size = smallest;
1315
1316 if (smallest < 1U * 1024U * 1024U)
1317 printk(KERN_INFO "Using max_cache_size of %uKB\n",
1318 smallest / 1024U);
1319 else
1320 printk(KERN_INFO "Using max_cache_size of %uMB\n",
1321 smallest / 1024U / 1024U);
1322}
1323
1291/* Constrain the number of cpus to max_cpus. */ 1324/* Constrain the number of cpus to max_cpus. */
1292void __init smp_prepare_cpus(unsigned int max_cpus) 1325void __init smp_prepare_cpus(unsigned int max_cpus)
1293{ 1326{
@@ -1323,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1323 } 1356 }
1324 1357
1325 smp_store_cpu_info(boot_cpu_id); 1358 smp_store_cpu_info(boot_cpu_id);
1359 smp_tune_scheduling();
1326} 1360}
1327 1361
1328/* Set this up early so that things like the scheduler can init 1362/* Set this up early so that things like the scheduler can init
@@ -1345,18 +1379,6 @@ void __init smp_setup_cpu_possible_map(void)
1345 1379
1346void __devinit smp_prepare_boot_cpu(void) 1380void __devinit smp_prepare_boot_cpu(void)
1347{ 1381{
1348 int cpu = hard_smp_processor_id();
1349
1350 if (cpu >= NR_CPUS) {
1351 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1352 prom_halt();
1353 }
1354
1355 current_thread_info()->cpu = cpu;
1356 __local_per_cpu_offset = __per_cpu_offset(cpu);
1357
1358 cpu_set(smp_processor_id(), cpu_online_map);
1359 cpu_set(smp_processor_id(), phys_cpu_present_map);
1360} 1382}
1361 1383
1362int __devinit __cpu_up(unsigned int cpu) 1384int __devinit __cpu_up(unsigned int cpu)
@@ -1433,4 +1455,7 @@ void __init setup_per_cpu_areas(void)
1433 1455
1434 for (i = 0; i < NR_CPUS; i++, ptr += size) 1456 for (i = 0; i < NR_CPUS; i++, ptr += size)
1435 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1457 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1458
1459 /* Setup %g5 for the boot cpu. */
1460 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1436} 1461}
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 2793a5d82380..563db528e031 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
1797 }; 1797 };
1798} 1798}
1799 1799
1800static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) 1800extern void __show_regs(struct pt_regs * regs);
1801
1802static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1801{ 1803{
1802 int cnt; 1804 int cnt;
1803 1805
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
1830 pfx, 1832 pfx,
1831 ent->err_raddr, ent->err_size, ent->err_cpu); 1833 ent->err_raddr, ent->err_size, ent->err_cpu);
1832 1834
1835 __show_regs(regs);
1836
1833 if ((cnt = atomic_read(ocnt)) != 0) { 1837 if ((cnt = atomic_read(ocnt)) != 0) {
1834 atomic_set(ocnt, 0); 1838 atomic_set(ocnt, 0);
1835 wmb(); 1839 wmb();
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1862 1866
1863 put_cpu(); 1867 put_cpu();
1864 1868
1865 sun4v_log_error(&local_copy, cpu, 1869 sun4v_log_error(regs, &local_copy, cpu,
1866 KERN_ERR "RESUMABLE ERROR", 1870 KERN_ERR "RESUMABLE ERROR",
1867 &sun4v_resum_oflow_cnt); 1871 &sun4v_resum_oflow_cnt);
1868} 1872}
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1910 } 1914 }
1911#endif 1915#endif
1912 1916
1913 sun4v_log_error(&local_copy, cpu, 1917 sun4v_log_error(regs, &local_copy, cpu,
1914 KERN_EMERG "NON-RESUMABLE ERROR", 1918 KERN_EMERG "NON-RESUMABLE ERROR",
1915 &sun4v_nonresum_oflow_cnt); 1919 &sun4v_nonresum_oflow_cnt);
1916 1920
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2200void die_if_kernel(char *str, struct pt_regs *regs) 2204void die_if_kernel(char *str, struct pt_regs *regs)
2201{ 2205{
2202 static int die_counter; 2206 static int die_counter;
2203 extern void __show_regs(struct pt_regs * regs);
2204 extern void smp_report_regs(void); 2207 extern void smp_report_regs(void);
2205 int count = 0; 2208 int count = 0;
2206 2209
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
index ba9cd3ccc2b2..1d230f693dc4 100644
--- a/arch/sparc64/lib/checksum.S
+++ b/arch/sparc64/lib/checksum.S
@@ -165,8 +165,9 @@ csum_partial_end_cruft:
165 sll %g1, 8, %g1 165 sll %g1, 8, %g1
166 or %o5, %g1, %o4 166 or %o5, %g1, %o4
167 167
1681: add %o2, %o4, %o2 1681: addcc %o2, %o4, %o2
169 addc %g0, %o2, %o2
169 170
170csum_partial_finish: 171csum_partial_finish:
171 retl 172 retl
172 mov %o2, %o0 173 srl %o2, 0, %o0
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
index 71af48839064..e566c770a0f6 100644
--- a/arch/sparc64/lib/csum_copy.S
+++ b/arch/sparc64/lib/csum_copy.S
@@ -221,11 +221,12 @@ FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
221 sll %g1, 8, %g1 221 sll %g1, 8, %g1
222 or %o5, %g1, %o4 222 or %o5, %g1, %o4
223 223
2241: add %o3, %o4, %o3 2241: addcc %o3, %o4, %o3
225 addc %g0, %o3, %o3
225 226
22670: 22770:
227 retl 228 retl
228 mov %o3, %o0 229 srl %o3, 0, %o0
229 230
23095: mov 0, GLOBAL_SPARE 23195: mov 0, GLOBAL_SPARE
231 brlez,pn %o2, 4f 232 brlez,pn %o2, 4f
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 7a0e04e34bf9..b65ca115ef77 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -33,5 +33,9 @@ include $(srctree)/arch/i386/Makefile.cpu
33# prevent gcc from keeping the stack 16 byte aligned. Taken from i386. 33# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
34cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) 34cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
35 35
36# Prevent sprintf in nfsd from being converted to strcpy and resulting in
37# an unresolved reference.
38cflags-y += -ffreestanding
39
36CFLAGS += $(cflags-y) 40CFLAGS += $(cflags-y)
37USER_CFLAGS += $(cflags-y) 41USER_CFLAGS += $(cflags-y)
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index efa3d33c0be6..310980b32173 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -120,20 +120,11 @@ extern int is_syscall(unsigned long addr);
120extern void free_irq(unsigned int, void *); 120extern void free_irq(unsigned int, void *);
121extern int cpu(void); 121extern int cpu(void);
122 122
123extern void time_init_kern(void);
124
123/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */ 125/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
124extern int __cant_sleep(void); 126extern int __cant_sleep(void);
125extern void segv_handler(int sig, union uml_pt_regs *regs); 127extern void segv_handler(int sig, union uml_pt_regs *regs);
126extern void sigio_handler(int sig, union uml_pt_regs *regs); 128extern void sigio_handler(int sig, union uml_pt_regs *regs);
127 129
128#endif 130#endif
129
130/*
131 * Overrides for Emacs so that we follow Linus's tabbing style.
132 * Emacs will notice this stuff at the end of the file and automatically
133 * adjust the settings for this buffer only. This must remain at the end
134 * of the file.
135 * ---------------------------------------------------------------------------
136 * Local variables:
137 * c-file-style: "linux"
138 * End:
139 */
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 528cf623f8b4..86f51d04c98d 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -84,6 +84,16 @@ void timer_irq(union uml_pt_regs *regs)
84 } 84 }
85} 85}
86 86
87
88void time_init_kern(void)
89{
90 unsigned long long nsecs;
91
92 nsecs = os_nsecs();
93 set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
94 -nsecs % BILLION);
95}
96
87void do_boot_timer_handler(struct sigcontext * sc) 97void do_boot_timer_handler(struct sigcontext * sc)
88{ 98{
89 struct pt_regs regs; 99 struct pt_regs regs;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 3a0ac38e978b..90912aaca7aa 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -59,7 +59,7 @@ static __init void do_uml_initcalls(void)
59 initcall_t *call; 59 initcall_t *call;
60 60
61 call = &__uml_initcall_start; 61 call = &__uml_initcall_start;
62 while (call < &__uml_initcall_end){; 62 while (call < &__uml_initcall_end){
63 (*call)(); 63 (*call)();
64 call++; 64 call++;
65 } 65 }
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 6f7626775acb..280c4fb9b585 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -81,20 +81,12 @@ void uml_idle_timer(void)
81 set_interval(ITIMER_REAL); 81 set_interval(ITIMER_REAL);
82} 82}
83 83
84extern void ktime_get_ts(struct timespec *ts);
85#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
86
87void time_init(void) 84void time_init(void)
88{ 85{
89 struct timespec now;
90
91 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR) 86 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
92 panic("Couldn't set SIGVTALRM handler"); 87 panic("Couldn't set SIGVTALRM handler");
93 set_interval(ITIMER_VIRTUAL); 88 set_interval(ITIMER_VIRTUAL);
94 89 time_init_kern();
95 do_posix_clock_monotonic_gettime(&now);
96 wall_to_monotonic.tv_sec = -now.tv_sec;
97 wall_to_monotonic.tv_nsec = -now.tv_nsec;
98} 90}
99 91
100unsigned long long os_nsecs(void) 92unsigned long long os_nsecs(void)
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 749dd1bfe60f..710d5fb807e1 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -99,11 +99,12 @@ long sys_ipc (uint call, int first, int second,
99 99
100 switch (call) { 100 switch (call) {
101 case SEMOP: 101 case SEMOP:
102 return sys_semtimedop(first, (struct sembuf *) ptr, second, 102 return sys_semtimedop(first, (struct sembuf __user *) ptr,
103 NULL); 103 second, NULL);
104 case SEMTIMEDOP: 104 case SEMTIMEDOP:
105 return sys_semtimedop(first, (struct sembuf *) ptr, second, 105 return sys_semtimedop(first, (struct sembuf __user *) ptr,
106 (const struct timespec *) fifth); 106 second,
107 (const struct timespec __user *) fifth);
107 case SEMGET: 108 case SEMGET:
108 return sys_semget (first, second, third); 109 return sys_semget (first, second, third);
109 case SEMCTL: { 110 case SEMCTL: {
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index a4c46a8af008..9edf114faf79 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -21,7 +21,7 @@
21#include "skas.h" 21#include "skas.h"
22 22
23static int copy_sc_from_user_skas(struct pt_regs *regs, 23static int copy_sc_from_user_skas(struct pt_regs *regs,
24 struct sigcontext *from) 24 struct sigcontext __user *from)
25{ 25{
26 int err = 0; 26 int err = 0;
27 27
@@ -54,7 +54,8 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
54 return(err); 54 return(err);
55} 55}
56 56
57int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, 57int copy_sc_to_user_skas(struct sigcontext __user *to,
58 struct _fpstate __user *to_fp,
58 struct pt_regs *regs, unsigned long mask, 59 struct pt_regs *regs, unsigned long mask,
59 unsigned long sp) 60 unsigned long sp)
60{ 61{
@@ -106,10 +107,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
106#endif 107#endif
107 108
108#ifdef CONFIG_MODE_TT 109#ifdef CONFIG_MODE_TT
109int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from, 110int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
110 int fpsize) 111 int fpsize)
111{ 112{
112 struct _fpstate *to_fp, *from_fp; 113 struct _fpstate *to_fp;
114 struct _fpstate __user *from_fp;
113 unsigned long sigs; 115 unsigned long sigs;
114 int err; 116 int err;
115 117
@@ -124,13 +126,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
124 return(err); 126 return(err);
125} 127}
126 128
127int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp, 129int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp,
128 struct sigcontext *from, int fpsize, unsigned long sp) 130 struct sigcontext *from, int fpsize, unsigned long sp)
129{ 131{
130 struct _fpstate *to_fp, *from_fp; 132 struct _fpstate __user *to_fp;
133 struct _fpstate *from_fp;
131 int err; 134 int err;
132 135
133 to_fp = (fp ? fp : (struct _fpstate *) (to + 1)); 136 to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
134 from_fp = from->fpstate; 137 from_fp = from->fpstate;
135 err = copy_to_user(to, from, sizeof(*to)); 138 err = copy_to_user(to, from, sizeof(*to));
136 /* The SP in the sigcontext is the updated one for the signal 139 /* The SP in the sigcontext is the updated one for the signal
@@ -158,7 +161,8 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
158 return(ret); 161 return(ret);
159} 162}
160 163
161static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, 164static int copy_sc_to_user(struct sigcontext __user *to,
165 struct _fpstate __user *fp,
162 struct pt_regs *from, unsigned long mask, 166 struct pt_regs *from, unsigned long mask,
163 unsigned long sp) 167 unsigned long sp)
164{ 168{
@@ -169,7 +173,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
169 173
170struct rt_sigframe 174struct rt_sigframe
171{ 175{
172 char *pretcode; 176 char __user *pretcode;
173 struct ucontext uc; 177 struct ucontext uc;
174 struct siginfo info; 178 struct siginfo info;
175}; 179};
@@ -188,7 +192,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
188 192
189 frame = (struct rt_sigframe __user *) 193 frame = (struct rt_sigframe __user *)
190 round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8; 194 round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8;
191 frame = (struct rt_sigframe *) ((unsigned long) frame - 128); 195 frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128);
192 196
193 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 197 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
194 goto out; 198 goto out;
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 6acee5c4ada6..6fce9f45dfdc 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -45,7 +45,7 @@ static long arch_prctl_tt(int code, unsigned long addr)
45 case ARCH_GET_GS: 45 case ARCH_GET_GS:
46 ret = arch_prctl(code, (unsigned long) &tmp); 46 ret = arch_prctl(code, (unsigned long) &tmp);
47 if(!ret) 47 if(!ret)
48 ret = put_user(tmp, &addr); 48 ret = put_user(tmp, (long __user *)addr);
49 break; 49 break;
50 default: 50 default:
51 ret = -EINVAL; 51 ret = -EINVAL;
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0de3ea938830..9cc7031b7151 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
271#include <linux/pci_ids.h> 271#include <linux/pci_ids.h>
272#include <linux/pci.h> 272#include <linux/pci.h>
273 273
274
275#ifdef CONFIG_ACPI
276
277static int nvidia_hpet_detected __initdata;
278
279static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
280{
281 nvidia_hpet_detected = 1;
282 return 0;
283}
284#endif
285
274/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC 286/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
275 off. Check for an Nvidia or VIA PCI bridge and turn it off. 287 off. Check for an Nvidia or VIA PCI bridge and turn it off.
276 Use pci direct infrastructure because this runs before the PCI subsystem. 288 Use pci direct infrastructure because this runs before the PCI subsystem.
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
317 return; 329 return;
318 case PCI_VENDOR_ID_NVIDIA: 330 case PCI_VENDOR_ID_NVIDIA:
319#ifdef CONFIG_ACPI 331#ifdef CONFIG_ACPI
320 /* All timer overrides on Nvidia 332 /*
321 seem to be wrong. Skip them. */ 333 * All timer overrides on Nvidia are
322 acpi_skip_timer_override = 1; 334 * wrong unless HPET is enabled.
323 printk(KERN_INFO 335 */
324 "Nvidia board detected. Ignoring ACPI timer override.\n"); 336 nvidia_hpet_detected = 0;
337 acpi_table_parse(ACPI_HPET,
338 nvidia_hpet_check);
339 if (nvidia_hpet_detected == 0) {
340 acpi_skip_timer_override = 1;
341 printk(KERN_INFO "Nvidia board "
342 "detected. Ignoring ACPI "
343 "timer override.\n");
344 }
325#endif 345#endif
326 /* RED-PEN skip them on mptables too? */ 346 /* RED-PEN skip them on mptables too? */
327 return; 347 return;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index e25a5d79ab27..a7caf35ca0c2 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
1648 * initialize elevator private data (as_data), and alloc a arq for 1648 * initialize elevator private data (as_data), and alloc a arq for
1649 * each request on the free lists 1649 * each request on the free lists
1650 */ 1650 */
1651static int as_init_queue(request_queue_t *q, elevator_t *e) 1651static void *as_init_queue(request_queue_t *q, elevator_t *e)
1652{ 1652{
1653 struct as_data *ad; 1653 struct as_data *ad;
1654 int i; 1654 int i;
1655 1655
1656 if (!arq_pool) 1656 if (!arq_pool)
1657 return -ENOMEM; 1657 return NULL;
1658 1658
1659 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); 1659 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
1660 if (!ad) 1660 if (!ad)
1661 return -ENOMEM; 1661 return NULL;
1662 memset(ad, 0, sizeof(*ad)); 1662 memset(ad, 0, sizeof(*ad));
1663 1663
1664 ad->q = q; /* Identify what queue the data belongs to */ 1664 ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1667 GFP_KERNEL, q->node); 1667 GFP_KERNEL, q->node);
1668 if (!ad->hash) { 1668 if (!ad->hash) {
1669 kfree(ad); 1669 kfree(ad);
1670 return -ENOMEM; 1670 return NULL;
1671 } 1671 }
1672 1672
1673 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 1673 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1675 if (!ad->arq_pool) { 1675 if (!ad->arq_pool) {
1676 kfree(ad->hash); 1676 kfree(ad->hash);
1677 kfree(ad); 1677 kfree(ad);
1678 return -ENOMEM; 1678 return NULL;
1679 } 1679 }
1680 1680
1681 /* anticipatory scheduling helpers */ 1681 /* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1696 ad->antic_expire = default_antic_expire; 1696 ad->antic_expire = default_antic_expire;
1697 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1697 ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
1698 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1698 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
1699 e->elevator_data = ad;
1700 1699
1701 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1700 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
1702 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1701 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
1703 if (ad->write_batch_count < 2) 1702 if (ad->write_batch_count < 2)
1704 ad->write_batch_count = 2; 1703 ad->write_batch_count = 2;
1705 1704
1706 return 0; 1705 return ad;
1707} 1706}
1708 1707
1709/* 1708/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 11ce6aaf1bd0..a46d030e092a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -133,6 +133,7 @@ struct cfq_data {
133 mempool_t *crq_pool; 133 mempool_t *crq_pool;
134 134
135 int rq_in_driver; 135 int rq_in_driver;
136 int hw_tag;
136 137
137 /* 138 /*
138 * schedule slice state info 139 * schedule slice state info
@@ -500,10 +501,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
500 501
501 /* 502 /*
502 * if queue was preempted, just add to front to be fair. busy_rr 503 * if queue was preempted, just add to front to be fair. busy_rr
503 * isn't sorted. 504 * isn't sorted, but insert at the back for fairness.
504 */ 505 */
505 if (preempted || list == &cfqd->busy_rr) { 506 if (preempted || list == &cfqd->busy_rr) {
506 list_add(&cfqq->cfq_list, list); 507 if (preempted)
508 list = list->prev;
509
510 list_add_tail(&cfqq->cfq_list, list);
507 return; 511 return;
508 } 512 }
509 513
@@ -664,6 +668,15 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
664 struct cfq_data *cfqd = q->elevator->elevator_data; 668 struct cfq_data *cfqd = q->elevator->elevator_data;
665 669
666 cfqd->rq_in_driver++; 670 cfqd->rq_in_driver++;
671
672 /*
673 * If the depth is larger 1, it really could be queueing. But lets
674 * make the mark a little higher - idling could still be good for
675 * low queueing, and a low queueing number could also just indicate
676 * a SCSI mid layer like behaviour where limit+1 is often seen.
677 */
678 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
679 cfqd->hw_tag = 1;
667} 680}
668 681
669static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 682static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
@@ -879,6 +892,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
879 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 892 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
880 893
881 /* 894 /*
895 * If no new queues are available, check if the busy list has some
896 * before falling back to idle io.
897 */
898 if (!cfqq && !list_empty(&cfqd->busy_rr))
899 cfqq = list_entry_cfqq(cfqd->busy_rr.next);
900
901 /*
882 * if we have idle queues and no rt or be queues had pending 902 * if we have idle queues and no rt or be queues had pending
883 * requests, either allow immediate service if the grace period 903 * requests, either allow immediate service if the grace period
884 * has passed or arm the idle grace timer 904 * has passed or arm the idle grace timer
@@ -1458,7 +1478,8 @@ retry:
1458 * set ->slice_left to allow preemption for a new process 1478 * set ->slice_left to allow preemption for a new process
1459 */ 1479 */
1460 cfqq->slice_left = 2 * cfqd->cfq_slice_idle; 1480 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1461 cfq_mark_cfqq_idle_window(cfqq); 1481 if (!cfqd->hw_tag)
1482 cfq_mark_cfqq_idle_window(cfqq);
1462 cfq_mark_cfqq_prio_changed(cfqq); 1483 cfq_mark_cfqq_prio_changed(cfqq);
1463 cfq_init_prio_data(cfqq); 1484 cfq_init_prio_data(cfqq);
1464 } 1485 }
@@ -1649,7 +1670,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1649{ 1670{
1650 int enable_idle = cfq_cfqq_idle_window(cfqq); 1671 int enable_idle = cfq_cfqq_idle_window(cfqq);
1651 1672
1652 if (!cic->ioc->task || !cfqd->cfq_slice_idle) 1673 if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
1653 enable_idle = 0; 1674 enable_idle = 0;
1654 else if (sample_valid(cic->ttime_samples)) { 1675 else if (sample_valid(cic->ttime_samples)) {
1655 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1676 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1740,14 +1761,24 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1740 1761
1741 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); 1762 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1742 1763
1764 cic = crq->io_context;
1765
1743 /* 1766 /*
1744 * we never wait for an async request and we don't allow preemption 1767 * we never wait for an async request and we don't allow preemption
1745 * of an async request. so just return early 1768 * of an async request. so just return early
1746 */ 1769 */
1747 if (!cfq_crq_is_sync(crq)) 1770 if (!cfq_crq_is_sync(crq)) {
1771 /*
1772 * sync process issued an async request, if it's waiting
1773 * then expire it and kick rq handling.
1774 */
1775 if (cic == cfqd->active_cic &&
1776 del_timer(&cfqd->idle_slice_timer)) {
1777 cfq_slice_expired(cfqd, 0);
1778 cfq_start_queueing(cfqd, cfqq);
1779 }
1748 return; 1780 return;
1749 1781 }
1750 cic = crq->io_context;
1751 1782
1752 cfq_update_io_thinktime(cfqd, cic); 1783 cfq_update_io_thinktime(cfqd, cic);
1753 cfq_update_io_seektime(cfqd, cic, crq); 1784 cfq_update_io_seektime(cfqd, cic, crq);
@@ -2165,10 +2196,9 @@ static void cfq_idle_class_timer(unsigned long data)
2165 * race with a non-idle queue, reset timer 2196 * race with a non-idle queue, reset timer
2166 */ 2197 */
2167 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2198 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2168 if (!time_after_eq(jiffies, end)) { 2199 if (!time_after_eq(jiffies, end))
2169 cfqd->idle_class_timer.expires = end; 2200 mod_timer(&cfqd->idle_class_timer, end);
2170 add_timer(&cfqd->idle_class_timer); 2201 else
2171 } else
2172 cfq_schedule_dispatch(cfqd); 2202 cfq_schedule_dispatch(cfqd);
2173 2203
2174 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2204 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
@@ -2221,14 +2251,14 @@ static void cfq_exit_queue(elevator_t *e)
2221 kfree(cfqd); 2251 kfree(cfqd);
2222} 2252}
2223 2253
2224static int cfq_init_queue(request_queue_t *q, elevator_t *e) 2254static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2225{ 2255{
2226 struct cfq_data *cfqd; 2256 struct cfq_data *cfqd;
2227 int i; 2257 int i;
2228 2258
2229 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); 2259 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
2230 if (!cfqd) 2260 if (!cfqd)
2231 return -ENOMEM; 2261 return NULL;
2232 2262
2233 memset(cfqd, 0, sizeof(*cfqd)); 2263 memset(cfqd, 0, sizeof(*cfqd));
2234 2264
@@ -2258,8 +2288,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2258 for (i = 0; i < CFQ_QHASH_ENTRIES; i++) 2288 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2259 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); 2289 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2260 2290
2261 e->elevator_data = cfqd;
2262
2263 cfqd->queue = q; 2291 cfqd->queue = q;
2264 2292
2265 cfqd->max_queued = q->nr_requests / 4; 2293 cfqd->max_queued = q->nr_requests / 4;
@@ -2286,14 +2314,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2286 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2314 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2287 cfqd->cfq_slice_idle = cfq_slice_idle; 2315 cfqd->cfq_slice_idle = cfq_slice_idle;
2288 2316
2289 return 0; 2317 return cfqd;
2290out_crqpool: 2318out_crqpool:
2291 kfree(cfqd->cfq_hash); 2319 kfree(cfqd->cfq_hash);
2292out_cfqhash: 2320out_cfqhash:
2293 kfree(cfqd->crq_hash); 2321 kfree(cfqd->crq_hash);
2294out_crqhash: 2322out_crqhash:
2295 kfree(cfqd); 2323 kfree(cfqd);
2296 return -ENOMEM; 2324 return NULL;
2297} 2325}
2298 2326
2299static void cfq_slab_kill(void) 2327static void cfq_slab_kill(void)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 399fa1e60e1f..3bd0415a9828 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
613 * initialize elevator private data (deadline_data), and alloc a drq for 613 * initialize elevator private data (deadline_data), and alloc a drq for
614 * each request on the free lists 614 * each request on the free lists
615 */ 615 */
616static int deadline_init_queue(request_queue_t *q, elevator_t *e) 616static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
617{ 617{
618 struct deadline_data *dd; 618 struct deadline_data *dd;
619 int i; 619 int i;
620 620
621 if (!drq_pool) 621 if (!drq_pool)
622 return -ENOMEM; 622 return NULL;
623 623
624 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 624 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
625 if (!dd) 625 if (!dd)
626 return -ENOMEM; 626 return NULL;
627 memset(dd, 0, sizeof(*dd)); 627 memset(dd, 0, sizeof(*dd));
628 628
629 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, 629 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
630 GFP_KERNEL, q->node); 630 GFP_KERNEL, q->node);
631 if (!dd->hash) { 631 if (!dd->hash) {
632 kfree(dd); 632 kfree(dd);
633 return -ENOMEM; 633 return NULL;
634 } 634 }
635 635
636 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 636 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
638 if (!dd->drq_pool) { 638 if (!dd->drq_pool) {
639 kfree(dd->hash); 639 kfree(dd->hash);
640 kfree(dd); 640 kfree(dd);
641 return -ENOMEM; 641 return NULL;
642 } 642 }
643 643
644 for (i = 0; i < DL_HASH_ENTRIES; i++) 644 for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
653 dd->writes_starved = writes_starved; 653 dd->writes_starved = writes_starved;
654 dd->front_merges = 1; 654 dd->front_merges = 1;
655 dd->fifo_batch = fifo_batch; 655 dd->fifo_batch = fifo_batch;
656 e->elevator_data = dd; 656 return dd;
657 return 0;
658} 657}
659 658
660static void deadline_put_request(request_queue_t *q, struct request *rq) 659static void deadline_put_request(request_queue_t *q, struct request *rq)
diff --git a/block/elevator.c b/block/elevator.c
index 8768a367fdde..a0afdd317cef 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
121 return e; 121 return e;
122} 122}
123 123
124static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) 124static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
125{ 125{
126 int ret = 0; 126 return eq->ops->elevator_init_fn(q, eq);
127}
127 128
129static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
130 void *data)
131{
128 q->elevator = eq; 132 q->elevator = eq;
129 133 eq->elevator_data = data;
130 if (eq->ops->elevator_init_fn)
131 ret = eq->ops->elevator_init_fn(q, eq);
132
133 return ret;
134} 134}
135 135
136static char chosen_elevator[16]; 136static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
181 struct elevator_type *e = NULL; 181 struct elevator_type *e = NULL;
182 struct elevator_queue *eq; 182 struct elevator_queue *eq;
183 int ret = 0; 183 int ret = 0;
184 void *data;
184 185
185 INIT_LIST_HEAD(&q->queue_head); 186 INIT_LIST_HEAD(&q->queue_head);
186 q->last_merge = NULL; 187 q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
202 if (!eq) 203 if (!eq)
203 return -ENOMEM; 204 return -ENOMEM;
204 205
205 ret = elevator_attach(q, eq); 206 data = elevator_init_queue(q, eq);
206 if (ret) 207 if (!data) {
207 kobject_put(&eq->kobj); 208 kobject_put(&eq->kobj);
209 return -ENOMEM;
210 }
208 211
212 elevator_attach(q, eq, data);
209 return ret; 213 return ret;
210} 214}
211 215
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
722 return error; 726 return error;
723} 727}
724 728
729static void __elv_unregister_queue(elevator_t *e)
730{
731 kobject_uevent(&e->kobj, KOBJ_REMOVE);
732 kobject_del(&e->kobj);
733}
734
725void elv_unregister_queue(struct request_queue *q) 735void elv_unregister_queue(struct request_queue *q)
726{ 736{
727 if (q) { 737 if (q)
728 elevator_t *e = q->elevator; 738 __elv_unregister_queue(q->elevator);
729 kobject_uevent(&e->kobj, KOBJ_REMOVE);
730 kobject_del(&e->kobj);
731 }
732} 739}
733 740
734int elv_register(struct elevator_type *e) 741int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
780static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) 787static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
781{ 788{
782 elevator_t *old_elevator, *e; 789 elevator_t *old_elevator, *e;
790 void *data;
783 791
784 /* 792 /*
785 * Allocate new elevator 793 * Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
788 if (!e) 796 if (!e)
789 return 0; 797 return 0;
790 798
799 data = elevator_init_queue(q, e);
800 if (!data) {
801 kobject_put(&e->kobj);
802 return 0;
803 }
804
791 /* 805 /*
792 * Turn on BYPASS and drain all requests w/ elevator private data 806 * Turn on BYPASS and drain all requests w/ elevator private data
793 */ 807 */
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
806 elv_drain_elevator(q); 820 elv_drain_elevator(q);
807 } 821 }
808 822
809 spin_unlock_irq(q->queue_lock);
810
811 /* 823 /*
812 * unregister old elevator data 824 * Remember old elevator.
813 */ 825 */
814 elv_unregister_queue(q);
815 old_elevator = q->elevator; 826 old_elevator = q->elevator;
816 827
817 /* 828 /*
818 * attach and start new elevator 829 * attach and start new elevator
819 */ 830 */
820 if (elevator_attach(q, e)) 831 elevator_attach(q, e, data);
821 goto fail; 832
833 spin_unlock_irq(q->queue_lock);
834
835 __elv_unregister_queue(old_elevator);
822 836
823 if (elv_register_queue(q)) 837 if (elv_register_queue(q))
824 goto fail_register; 838 goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
837 */ 851 */
838 elevator_exit(e); 852 elevator_exit(e);
839 e = NULL; 853 e = NULL;
840fail:
841 q->elevator = old_elevator; 854 q->elevator = old_elevator;
842 elv_register_queue(q); 855 elv_register_queue(q);
843 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 856 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index f370e4a7fe6d..56a7c620574f 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
65 return list_entry(rq->queuelist.next, struct request, queuelist); 65 return list_entry(rq->queuelist.next, struct request, queuelist);
66} 66}
67 67
68static int noop_init_queue(request_queue_t *q, elevator_t *e) 68static void *noop_init_queue(request_queue_t *q, elevator_t *e)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
72 nd = kmalloc(sizeof(*nd), GFP_KERNEL); 72 nd = kmalloc(sizeof(*nd), GFP_KERNEL);
73 if (!nd) 73 if (!nd)
74 return -ENOMEM; 74 return NULL;
75 INIT_LIST_HEAD(&nd->queue); 75 INIT_LIST_HEAD(&nd->queue);
76 e->elevator_data = nd; 76 return nd;
77 return 0;
78} 77}
79 78
80static void noop_exit_queue(elevator_t *e) 79static void noop_exit_queue(elevator_t *e)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index abbdb37a7f5f..f36db22ce1ae 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
577 return_VALUE(-EBUSY); 577 return_VALUE(-EBUSY);
578 } 578 }
579 579
580 WARN_ON(!performance);
581
580 pr->performance = performance; 582 pr->performance = performance;
581 583
582 if (acpi_processor_get_performance_info(pr)) { 584 if (acpi_processor_get_performance_info(pr)) {
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
609 return_VOID; 611 return_VOID;
610 } 612 }
611 613
612 kfree(pr->performance->states); 614 if (pr->performance)
615 kfree(pr->performance->states);
613 pr->performance = NULL; 616 pr->performance = NULL;
614 617
615 acpi_cpufreq_remove_file(pr); 618 acpi_cpufreq_remove_file(pr);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f5b01c6d498e..fb919bfb2824 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
41obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 41obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
42obj-$(CONFIG_SX) += sx.o generic_serial.o 42obj-$(CONFIG_SX) += sx.o generic_serial.o
43obj-$(CONFIG_RIO) += rio/ generic_serial.o 43obj-$(CONFIG_RIO) += rio/ generic_serial.o
44obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
45obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o 44obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
46obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 45obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
46obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
47obj-$(CONFIG_RAW_DRIVER) += raw.o 47obj-$(CONFIG_RAW_DRIVER) += raw.o
48obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 48obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
49obj-$(CONFIG_MMTIMER) += mmtimer.o 49obj-$(CONFIG_MMTIMER) += mmtimer.o
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ede365d05387..b9371d5bf790 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1384,8 +1384,10 @@ do_it_again:
1384 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, 1384 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
1385 * we won't get any more characters. 1385 * we won't get any more characters.
1386 */ 1386 */
1387 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) 1387 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
1388 n_tty_set_room(tty);
1388 check_unthrottle(tty); 1389 check_unthrottle(tty);
1390 }
1389 1391
1390 if (b - buf >= minimum) 1392 if (b - buf >= minimum)
1391 break; 1393 break;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 128b2632512d..eab5394da666 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -149,7 +149,7 @@ struct cm4000_dev {
149#define ZERO_DEV(dev) \ 149#define ZERO_DEV(dev) \
150 memset(&dev->atr_csum,0, \ 150 memset(&dev->atr_csum,0, \
151 sizeof(struct cm4000_dev) - \ 151 sizeof(struct cm4000_dev) - \
152 /*link*/ sizeof(struct pcmcia_device) - \ 152 /*link*/ sizeof(struct pcmcia_device *) - \
153 /*node*/ sizeof(dev_node_t) - \ 153 /*node*/ sizeof(dev_node_t) - \
154 /*atr*/ MAX_ATR*sizeof(char) - \ 154 /*atr*/ MAX_ATR*sizeof(char) - \
155 /*rbuf*/ 512*sizeof(char) - \ 155 /*rbuf*/ 512*sizeof(char) - \
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 8a23fb54c693..5413dc43b9f1 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -845,7 +845,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
845 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 845 &sbp2_highlevel, ud->ne->host, &sbp2_ops,
846 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 846 sizeof(struct sbp2_status_block), sizeof(quadlet_t),
847 0x010000000000ULL, CSR1212_ALL_SPACE_END); 847 0x010000000000ULL, CSR1212_ALL_SPACE_END);
848 if (!scsi_id->status_fifo_addr) { 848 if (scsi_id->status_fifo_addr == ~0ULL) {
849 SBP2_ERR("failed to allocate status FIFO address range"); 849 SBP2_ERR("failed to allocate status FIFO address range");
850 goto failed_alloc; 850 goto failed_alloc;
851 } 851 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a54da42849ae..8406839b91cf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -275,6 +275,7 @@ static void ipoib_ib_handle_wc(struct net_device *dev,
275 spin_lock_irqsave(&priv->tx_lock, flags); 275 spin_lock_irqsave(&priv->tx_lock, flags);
276 ++priv->tx_tail; 276 ++priv->tx_tail;
277 if (netif_queue_stopped(dev) && 277 if (netif_queue_stopped(dev) &&
278 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
278 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) 279 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
279 netif_wake_queue(dev); 280 netif_wake_queue(dev);
280 spin_unlock_irqrestore(&priv->tx_lock, flags); 281 spin_unlock_irqrestore(&priv->tx_lock, flags);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 9080853fe283..a30084076ac8 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1605,6 +1605,21 @@ mpt_resume(struct pci_dev *pdev)
1605} 1605}
1606#endif 1606#endif
1607 1607
1608static int
1609mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase)
1610{
1611 if ((MptDriverClass[index] == MPTSPI_DRIVER &&
1612 ioc->bus_type != SPI) ||
1613 (MptDriverClass[index] == MPTFC_DRIVER &&
1614 ioc->bus_type != FC) ||
1615 (MptDriverClass[index] == MPTSAS_DRIVER &&
1616 ioc->bus_type != SAS))
1617 /* make sure we only call the relevant reset handler
1618 * for the bus */
1619 return 0;
1620 return (MptResetHandlers[index])(ioc, reset_phase);
1621}
1622
1608/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1623/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1609/* 1624/*
1610 * mpt_do_ioc_recovery - Initialize or recover MPT adapter. 1625 * mpt_do_ioc_recovery - Initialize or recover MPT adapter.
@@ -1885,14 +1900,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1885 if ((ret == 0) && MptResetHandlers[ii]) { 1900 if ((ret == 0) && MptResetHandlers[ii]) {
1886 dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n", 1901 dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n",
1887 ioc->name, ii)); 1902 ioc->name, ii));
1888 rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET); 1903 rc += mpt_signal_reset(ii, ioc, MPT_IOC_POST_RESET);
1889 handlers++; 1904 handlers++;
1890 } 1905 }
1891 1906
1892 if (alt_ioc_ready && MptResetHandlers[ii]) { 1907 if (alt_ioc_ready && MptResetHandlers[ii]) {
1893 drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", 1908 drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
1894 ioc->name, ioc->alt_ioc->name, ii)); 1909 ioc->name, ioc->alt_ioc->name, ii));
1895 rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); 1910 rc += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_POST_RESET);
1896 handlers++; 1911 handlers++;
1897 } 1912 }
1898 } 1913 }
@@ -3267,11 +3282,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3267 if (MptResetHandlers[ii]) { 3282 if (MptResetHandlers[ii]) {
3268 dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n", 3283 dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n",
3269 ioc->name, ii)); 3284 ioc->name, ii));
3270 r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET); 3285 r += mpt_signal_reset(ii, ioc, MPT_IOC_PRE_RESET);
3271 if (ioc->alt_ioc) { 3286 if (ioc->alt_ioc) {
3272 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n", 3287 dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n",
3273 ioc->name, ioc->alt_ioc->name, ii)); 3288 ioc->name, ioc->alt_ioc->name, ii));
3274 r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET); 3289 r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_PRE_RESET);
3275 } 3290 }
3276 } 3291 }
3277 } 3292 }
@@ -5706,11 +5721,11 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5706 if (MptResetHandlers[ii]) { 5721 if (MptResetHandlers[ii]) {
5707 dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n", 5722 dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n",
5708 ioc->name, ii)); 5723 ioc->name, ii));
5709 r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET); 5724 r += mpt_signal_reset(ii, ioc, MPT_IOC_SETUP_RESET);
5710 if (ioc->alt_ioc) { 5725 if (ioc->alt_ioc) {
5711 dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n", 5726 dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n",
5712 ioc->name, ioc->alt_ioc->name, ii)); 5727 ioc->name, ioc->alt_ioc->name, ii));
5713 r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET); 5728 r += mpt_signal_reset(ii, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
5714 } 5729 }
5715 } 5730 }
5716 } 5731 }
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index f2a4d382ea19..3201de053943 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
831 return rc; 831 return rc;
832} 832}
833 833
834#ifdef CONFIG_PM
834/* 835/*
835 * spi module resume handler 836 * spi module resume handler
836 */ 837 */
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
846 847
847 return rc; 848 return rc;
848} 849}
850#endif
849 851
850/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 852/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
851/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 853/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5ea133c59afb..7bd4d85d0b42 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -55,6 +55,7 @@ struct i2o_exec_wait {
55 u32 m; /* message id */ 55 u32 m; /* message id */
56 struct i2o_message *msg; /* pointer to the reply message */ 56 struct i2o_message *msg; /* pointer to the reply message */
57 struct list_head list; /* node in global wait list */ 57 struct list_head list; /* node in global wait list */
58 spinlock_t lock; /* lock before modifying */
58}; 59};
59 60
60/* Work struct needed to handle LCT NOTIFY replies */ 61/* Work struct needed to handle LCT NOTIFY replies */
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
87 return NULL; 88 return NULL;
88 89
89 INIT_LIST_HEAD(&wait->list); 90 INIT_LIST_HEAD(&wait->list);
91 spin_lock_init(&wait->lock);
90 92
91 return wait; 93 return wait;
92}; 94};
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
125 DECLARE_WAIT_QUEUE_HEAD(wq); 127 DECLARE_WAIT_QUEUE_HEAD(wq);
126 struct i2o_exec_wait *wait; 128 struct i2o_exec_wait *wait;
127 static u32 tcntxt = 0x80000000; 129 static u32 tcntxt = 0x80000000;
130 long flags;
128 int rc = 0; 131 int rc = 0;
129 132
130 wait = i2o_exec_wait_alloc(); 133 wait = i2o_exec_wait_alloc();
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
146 wait->tcntxt = tcntxt++; 149 wait->tcntxt = tcntxt++;
147 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); 150 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
148 151
152 wait->wq = &wq;
153 /*
154 * we add elements to the head, because if a entry in the list will
155 * never be removed, we have to iterate over it every time
156 */
157 list_add(&wait->list, &i2o_exec_wait_list);
158
149 /* 159 /*
150 * Post the message to the controller. At some point later it will 160 * Post the message to the controller. At some point later it will
151 * return. If we time out before it returns then complete will be zero. 161 * return. If we time out before it returns then complete will be zero.
152 */ 162 */
153 i2o_msg_post(c, msg); 163 i2o_msg_post(c, msg);
154 164
155 if (!wait->complete) { 165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
156 wait->wq = &wq;
157 /*
158 * we add elements add the head, because if a entry in the list
159 * will never be removed, we have to iterate over it every time
160 */
161 list_add(&wait->list, &i2o_exec_wait_list);
162
163 wait_event_interruptible_timeout(wq, wait->complete,
164 timeout * HZ);
165 166
166 wait->wq = NULL; 167 spin_lock_irqsave(&wait->lock, flags);
167 }
168 168
169 barrier(); 169 wait->wq = NULL;
170 170
171 if (wait->complete) { 171 if (wait->complete)
172 rc = le32_to_cpu(wait->msg->body[0]) >> 24; 172 rc = le32_to_cpu(wait->msg->body[0]) >> 24;
173 i2o_flush_reply(c, wait->m); 173 else {
174 i2o_exec_wait_free(wait);
175 } else {
176 /* 174 /*
177 * We cannot remove it now. This is important. When it does 175 * We cannot remove it now. This is important. When it does
178 * terminate (which it must do if the controller has not 176 * terminate (which it must do if the controller has not
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
186 rc = -ETIMEDOUT; 184 rc = -ETIMEDOUT;
187 } 185 }
188 186
187 spin_unlock_irqrestore(&wait->lock, flags);
188
189 if (rc != -ETIMEDOUT) {
190 i2o_flush_reply(c, wait->m);
191 i2o_exec_wait_free(wait);
192 }
193
189 return rc; 194 return rc;
190}; 195};
191 196
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
213{ 218{
214 struct i2o_exec_wait *wait, *tmp; 219 struct i2o_exec_wait *wait, *tmp;
215 unsigned long flags; 220 unsigned long flags;
216 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
217 int rc = 1; 221 int rc = 1;
218 222
219 /* 223 /*
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
223 * already expired. Not much we can do about that except log it for 227 * already expired. Not much we can do about that except log it for
224 * debug purposes, increase timeout, and recompile. 228 * debug purposes, increase timeout, and recompile.
225 */ 229 */
226 spin_lock_irqsave(&lock, flags);
227 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { 230 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
228 if (wait->tcntxt == context) { 231 if (wait->tcntxt == context) {
229 list_del(&wait->list); 232 spin_lock_irqsave(&wait->lock, flags);
230 233
231 spin_unlock_irqrestore(&lock, flags); 234 list_del(&wait->list);
232 235
233 wait->m = m; 236 wait->m = m;
234 wait->msg = msg; 237 wait->msg = msg;
235 wait->complete = 1; 238 wait->complete = 1;
236 239
237 barrier(); 240 if (wait->wq)
238
239 if (wait->wq) {
240 wake_up_interruptible(wait->wq);
241 rc = 0; 241 rc = 0;
242 } else { 242 else
243 rc = -1;
244
245 spin_unlock_irqrestore(&wait->lock, flags);
246
247 if (rc) {
243 struct device *dev; 248 struct device *dev;
244 249
245 dev = &c->pdev->dev; 250 dev = &c->pdev->dev;
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
248 c->name); 253 c->name);
249 i2o_dma_free(dev, &wait->dma); 254 i2o_dma_free(dev, &wait->dma);
250 i2o_exec_wait_free(wait); 255 i2o_exec_wait_free(wait);
251 rc = -1; 256 } else
252 } 257 wake_up_interruptible(wait->wq);
253 258
254 return rc; 259 return rc;
255 } 260 }
256 } 261 }
257 262
258 spin_unlock_irqrestore(&lock, flags);
259
260 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, 263 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
261 context); 264 context);
262 265
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
322static int i2o_exec_probe(struct device *dev) 325static int i2o_exec_probe(struct device *dev)
323{ 326{
324 struct i2o_device *i2o_dev = to_i2o_device(dev); 327 struct i2o_device *i2o_dev = to_i2o_device(dev);
325 struct i2o_controller *c = i2o_dev->iop;
326 328
327 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); 329 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
328 330
329 c->exec = i2o_dev;
330
331 i2o_exec_lct_notify(c, c->lct->change_ind + 1);
332
333 device_create_file(dev, &dev_attr_vendor_id); 331 device_create_file(dev, &dev_attr_vendor_id);
334 device_create_file(dev, &dev_attr_product_id); 332 device_create_file(dev, &dev_attr_product_id);
335 333
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
523 struct device *dev; 521 struct device *dev;
524 struct i2o_message *msg; 522 struct i2o_message *msg;
525 523
524 down(&c->lct_lock);
525
526 dev = &c->pdev->dev; 526 dev = &c->pdev->dev;
527 527
528 if (i2o_dma_realloc 528 if (i2o_dma_realloc
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
545 545
546 i2o_msg_post(c, msg); 546 i2o_msg_post(c, msg);
547 547
548 up(&c->lct_lock);
549
548 return 0; 550 return 0;
549}; 551};
550 552
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 492167446936..febbdd4e0605 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
804 804
805 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
806 i2o_iop_reset(c); 806 i2o_iop_reset(c);
807
808 put_device(&c->device);
809} 807}
810 808
811/** 809/**
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
1059 1057
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); 1058 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc 1059 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, 1060 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
1063 I2O_MSG_INPOOL_MIN)) { 1061 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c); 1062 kfree(c);
1065 return ERR_PTR(-ENOMEM); 1063 return ERR_PTR(-ENOMEM);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 003b077c2324..45bcf098e762 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -84,7 +84,7 @@ config MMC_WBSD
84 84
85config MMC_AU1X 85config MMC_AU1X
86 tristate "Alchemy AU1XX0 MMC Card Interface support" 86 tristate "Alchemy AU1XX0 MMC Card Interface support"
87 depends on SOC_AU1X00 && MMC 87 depends on MMC && SOC_AU1200
88 help 88 help
89 This selects the AMD Alchemy(R) Multimedia card interface. 89 This selects the AMD Alchemy(R) Multimedia card interface.
90 If you have a Alchemy platform with a MMC slot, say Y or M here. 90 If you have a Alchemy platform with a MMC slot, say Y or M here.
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca35c6f4..d1c705b412c2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -870,13 +870,16 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
870 *data = 0; 870 *data = 0;
871 871
872 /* Hook up test interrupt handler just for this test */ 872 /* Hook up test interrupt handler just for this test */
873 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 873 if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
874 netdev)) {
874 shared_int = FALSE; 875 shared_int = FALSE;
875 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, 876 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
876 netdev->name, netdev)){ 877 netdev->name, netdev)){
877 *data = 1; 878 *data = 1;
878 return -1; 879 return -1;
879 } 880 }
881 DPRINTK(PROBE,INFO, "testing %s interrupt\n",
882 (shared_int ? "shared" : "unshared"));
880 883
881 /* Disable all the interrupts */ 884 /* Disable all the interrupts */
882 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); 885 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed15fcaedaf9..97e71a4fe8eb 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3519,7 +3519,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3519 buffer_info = &rx_ring->buffer_info[i]; 3519 buffer_info = &rx_ring->buffer_info[i];
3520 3520
3521 while (rx_desc->status & E1000_RXD_STAT_DD) { 3521 while (rx_desc->status & E1000_RXD_STAT_DD) {
3522 struct sk_buff *skb, *next_skb; 3522 struct sk_buff *skb;
3523 u8 status; 3523 u8 status;
3524#ifdef CONFIG_E1000_NAPI 3524#ifdef CONFIG_E1000_NAPI
3525 if (*work_done >= work_to_do) 3525 if (*work_done >= work_to_do)
@@ -3537,8 +3537,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3537 prefetch(next_rxd); 3537 prefetch(next_rxd);
3538 3538
3539 next_buffer = &rx_ring->buffer_info[i]; 3539 next_buffer = &rx_ring->buffer_info[i];
3540 next_skb = next_buffer->skb;
3541 prefetch(next_skb->data - NET_IP_ALIGN);
3542 3540
3543 cleaned = TRUE; 3541 cleaned = TRUE;
3544 cleaned_count++; 3542 cleaned_count++;
@@ -3668,7 +3666,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3668 struct e1000_buffer *buffer_info, *next_buffer; 3666 struct e1000_buffer *buffer_info, *next_buffer;
3669 struct e1000_ps_page *ps_page; 3667 struct e1000_ps_page *ps_page;
3670 struct e1000_ps_page_dma *ps_page_dma; 3668 struct e1000_ps_page_dma *ps_page_dma;
3671 struct sk_buff *skb, *next_skb; 3669 struct sk_buff *skb;
3672 unsigned int i, j; 3670 unsigned int i, j;
3673 uint32_t length, staterr; 3671 uint32_t length, staterr;
3674 int cleaned_count = 0; 3672 int cleaned_count = 0;
@@ -3697,8 +3695,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3697 prefetch(next_rxd); 3695 prefetch(next_rxd);
3698 3696
3699 next_buffer = &rx_ring->buffer_info[i]; 3697 next_buffer = &rx_ring->buffer_info[i];
3700 next_skb = next_buffer->skb;
3701 prefetch(next_skb->data - NET_IP_ALIGN);
3702 3698
3703 cleaned = TRUE; 3699 cleaned = TRUE;
3704 cleaned_count++; 3700 cleaned_count++;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 705e1229d89d..feb5b223cd60 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2615,6 +2615,18 @@ static int nv_nway_reset(struct net_device *dev)
2615 return ret; 2615 return ret;
2616} 2616}
2617 2617
2618#ifdef NETIF_F_TSO
2619static int nv_set_tso(struct net_device *dev, u32 value)
2620{
2621 struct fe_priv *np = netdev_priv(dev);
2622
2623 if ((np->driver_data & DEV_HAS_CHECKSUM))
2624 return ethtool_op_set_tso(dev, value);
2625 else
2626 return value ? -EOPNOTSUPP : 0;
2627}
2628#endif
2629
2618static struct ethtool_ops ops = { 2630static struct ethtool_ops ops = {
2619 .get_drvinfo = nv_get_drvinfo, 2631 .get_drvinfo = nv_get_drvinfo,
2620 .get_link = ethtool_op_get_link, 2632 .get_link = ethtool_op_get_link,
@@ -2626,6 +2638,10 @@ static struct ethtool_ops ops = {
2626 .get_regs = nv_get_regs, 2638 .get_regs = nv_get_regs,
2627 .nway_reset = nv_nway_reset, 2639 .nway_reset = nv_nway_reset,
2628 .get_perm_addr = ethtool_op_get_perm_addr, 2640 .get_perm_addr = ethtool_op_get_perm_addr,
2641#ifdef NETIF_F_TSO
2642 .get_tso = ethtool_op_get_tso,
2643 .set_tso = nv_set_tso
2644#endif
2629}; 2645};
2630 2646
2631static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 2647static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 66e74f740261..bf58db29e2ed 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -107,7 +107,7 @@ static int init_netconsole(void)
107 107
108 if(!configured) { 108 if(!configured) {
109 printk("netconsole: not configured, aborting\n"); 109 printk("netconsole: not configured, aborting\n");
110 return -EINVAL; 110 return 0;
111 } 111 }
112 112
113 if(netpoll_setup(&np)) 113 if(netpoll_setup(&np))
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 4260c2128f47..a8f6bfc96fd2 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1204,7 +1204,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1204 1204
1205 dev->last_rx = jiffies; 1205 dev->last_rx = jiffies;
1206 lp->linux_stats.rx_packets++; 1206 lp->linux_stats.rx_packets++;
1207 lp->linux_stats.rx_bytes += skb->len; 1207 lp->linux_stats.rx_bytes += pkt_len;
1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1208 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1209 continue; 1209 continue;
1210 } else { 1210 } else {
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 475dc930380f..0d101a18026a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -861,6 +861,9 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
861 * give dev_queue_xmit something it can free. 861 * give dev_queue_xmit something it can free.
862 */ 862 */
863 skb2 = skb_clone(skb, GFP_ATOMIC); 863 skb2 = skb_clone(skb, GFP_ATOMIC);
864
865 if (skb2 == NULL)
866 goto abort;
864 } 867 }
865 868
866 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr)); 869 ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 49ad60b72657..862c226dbbe2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.58" 72#define DRV_MODULE_VERSION "3.59"
73#define DRV_MODULE_RELDATE "May 22, 2006" 73#define DRV_MODULE_RELDATE "June 8, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
4485/* tp->lock is held. */ 4485/* tp->lock is held. */
4486static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 4486static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4487{ 4487{
4488 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4488 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4489 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 4489 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4490 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4491 4490
4492 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 4491 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4493 switch (kind) { 4492 switch (kind) {
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
4568 void (*write_op)(struct tg3 *, u32, u32); 4567 void (*write_op)(struct tg3 *, u32, u32);
4569 int i; 4568 int i;
4570 4569
4571 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4570 tg3_nvram_lock(tp);
4572 tg3_nvram_lock(tp); 4571
4573 /* No matching tg3_nvram_unlock() after this because 4572 /* No matching tg3_nvram_unlock() after this because
4574 * chip reset below will undo the nvram lock. 4573 * chip reset below will undo the nvram lock.
4575 */ 4574 */
4576 tp->nvram_lock_cnt = 0; 4575 tp->nvram_lock_cnt = 0;
4577 }
4578 4576
4579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 4577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 4578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
4727 tw32_f(MAC_MODE, 0); 4725 tw32_f(MAC_MODE, 0);
4728 udelay(40); 4726 udelay(40);
4729 4727
4730 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4728 /* Wait for firmware initialization to complete. */
4731 /* Wait for firmware initialization to complete. */ 4729 for (i = 0; i < 100000; i++) {
4732 for (i = 0; i < 100000; i++) { 4730 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4733 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 4731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4732 break;
4735 break; 4733 udelay(10);
4736 udelay(10); 4734 }
4737 } 4735
4738 if (i >= 100000) { 4736 /* Chip might not be fitted with firmare. Some Sun onboard
4739 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, " 4737 * parts are configured like that. So don't signal the timeout
4740 "firmware will not restart magic=%08x\n", 4738 * of the above loop as an error, but do report the lack of
4741 tp->dev->name, val); 4739 * running firmware once.
4742 return -ENODEV; 4740 */
4743 } 4741 if (i >= 100000 &&
4742 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4743 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4744
4745 printk(KERN_INFO PFX "%s: No firmware running.\n",
4746 tp->dev->name);
4744 } 4747 }
4745 4748
4746 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 4749 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
9075{ 9078{
9076 int j; 9079 int j;
9077 9080
9078 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9079 return;
9080
9081 tw32_f(GRC_EEPROM_ADDR, 9081 tw32_f(GRC_EEPROM_ADDR,
9082 (EEPROM_ADDR_FSM_RESET | 9082 (EEPROM_ADDR_FSM_RESET |
9083 (EEPROM_DEFAULT_CLOCK_PERIOD << 9083 (EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9210{ 9210{
9211 int ret; 9211 int ret;
9212 9212
9213 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9214 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9215 return -EINVAL;
9216 }
9217
9218 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 9213 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9219 return tg3_nvram_read_using_eeprom(tp, offset, val); 9214 return tg3_nvram_read_using_eeprom(tp, offset, val);
9220 9215
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9447{ 9442{
9448 int ret; 9443 int ret;
9449 9444
9450 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9451 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9452 return -EINVAL;
9453 }
9454
9455 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 9445 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9456 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 9446 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9457 ~GRC_LCLCTRL_GPIO_OUTPUT1); 9447 ~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9578 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9568 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9579 tp->misc_host_ctrl); 9569 tp->misc_host_ctrl);
9580 9570
9571 /* The memory arbiter has to be enabled in order for SRAM accesses
9572 * to succeed. Normally on powerup the tg3 chip firmware will make
9573 * sure it is enabled, but other entities such as system netboot
9574 * code might disable it.
9575 */
9576 val = tr32(MEMARB_MODE);
9577 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9578
9581 tp->phy_id = PHY_ID_INVALID; 9579 tp->phy_id = PHY_ID_INVALID;
9582 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9580 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9583 9581
9584 /* Do not even try poking around in here on Sun parts. */ 9582 /* Assume an onboard device by default. */
9585 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { 9583 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9586 /* All SUN chips are built-in LOMs. */
9587 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9588 return;
9589 }
9590 9584
9591 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9585 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9592 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9586 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9686 9680
9687 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) 9681 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9688 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 9682 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9683 else
9684 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9689 9685
9690 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9686 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9691 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 9687 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9834 int i; 9830 int i;
9835 u32 magic; 9831 u32 magic;
9836 9832
9837 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9838 /* Sun decided not to put the necessary bits in the
9839 * NVRAM of their onboard tg3 parts :(
9840 */
9841 strcpy(tp->board_part_number, "Sun 570X");
9842 return;
9843 }
9844
9845 if (tg3_nvram_read_swab(tp, 0x0, &magic)) 9833 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9846 return; 9834 goto out_not_found;
9847 9835
9848 if (magic == TG3_EEPROM_MAGIC) { 9836 if (magic == TG3_EEPROM_MAGIC) {
9849 for (i = 0; i < 256; i += 4) { 9837 for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9874 break; 9862 break;
9875 msleep(1); 9863 msleep(1);
9876 } 9864 }
9865 if (!(tmp16 & 0x8000))
9866 goto out_not_found;
9867
9877 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, 9868 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9878 &tmp); 9869 &tmp);
9879 tmp = cpu_to_le32(tmp); 9870 tmp = cpu_to_le32(tmp);
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9965 } 9956 }
9966} 9957}
9967 9958
9968#ifdef CONFIG_SPARC64
9969static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9970{
9971 struct pci_dev *pdev = tp->pdev;
9972 struct pcidev_cookie *pcp = pdev->sysdata;
9973
9974 if (pcp != NULL) {
9975 int node = pcp->prom_node;
9976 u32 venid;
9977 int err;
9978
9979 err = prom_getproperty(node, "subsystem-vendor-id",
9980 (char *) &venid, sizeof(venid));
9981 if (err == 0 || err == -1)
9982 return 0;
9983 if (venid == PCI_VENDOR_ID_SUN)
9984 return 1;
9985
9986 /* TG3 chips onboard the SunBlade-2500 don't have the
9987 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9988 * are distinguishable from non-Sun variants by being
9989 * named "network" by the firmware. Non-Sun cards will
9990 * show up as being named "ethernet".
9991 */
9992 if (!strcmp(pcp->prom_name, "network"))
9993 return 1;
9994 }
9995 return 0;
9996}
9997#endif
9998
9999static int __devinit tg3_get_invariants(struct tg3 *tp) 9959static int __devinit tg3_get_invariants(struct tg3 *tp)
10000{ 9960{
10001 static struct pci_device_id write_reorder_chipsets[] = { 9961 static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10012 u16 pci_cmd; 9972 u16 pci_cmd;
10013 int err; 9973 int err;
10014 9974
10015#ifdef CONFIG_SPARC64
10016 if (tg3_is_sun_570X(tp))
10017 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
10018#endif
10019
10020 /* Force memory write invalidate off. If we leave it on, 9975 /* Force memory write invalidate off. If we leave it on,
10021 * then on 5700_BX chips we have to enable a workaround. 9976 * then on 5700_BX chips we have to enable a workaround.
10022 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 9977 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10312 if (tp->write32 == tg3_write_indirect_reg32 || 10267 if (tp->write32 == tg3_write_indirect_reg32 ||
10313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 10268 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10269 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) || 10270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10316 (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
10317 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 10271 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10318 10272
10319 /* Get eeprom hw config before calling tg3_set_power_state(). 10273 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10594#endif 10548#endif
10595 10549
10596 mac_offset = 0x7c; 10550 mac_offset = 0x7c;
10597 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 10551 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10598 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10599 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 10552 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10600 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 10553 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10601 mac_offset = 0xcc; 10554 mac_offset = 0xcc;
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10622 } 10575 }
10623 if (!addr_ok) { 10576 if (!addr_ok) {
10624 /* Next, try NVRAM. */ 10577 /* Next, try NVRAM. */
10625 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) && 10578 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10626 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10627 !tg3_nvram_read(tp, mac_offset + 4, &lo)) { 10579 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10628 dev->dev_addr[0] = ((hi >> 16) & 0xff); 10580 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10629 dev->dev_addr[1] = ((hi >> 24) & 0xff); 10581 dev->dev_addr[1] = ((hi >> 24) & 0xff);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0e29b885d449..ff0faab94bd5 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2184,7 +2184,7 @@ struct tg3 {
2184#define TG3_FLAG_INIT_COMPLETE 0x80000000 2184#define TG3_FLAG_INIT_COMPLETE 0x80000000
2185 u32 tg3_flags2; 2185 u32 tg3_flags2;
2186#define TG3_FLG2_RESTART_TIMER 0x00000001 2186#define TG3_FLG2_RESTART_TIMER 0x00000001
2187#define TG3_FLG2_SUN_570X 0x00000002 2187/* 0x00000002 available */
2188#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 2188#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
2189#define TG3_FLG2_IS_5788 0x00000008 2189#define TG3_FLG2_IS_5788 0x00000008
2190#define TG3_FLG2_MAX_RXPEND_64 0x00000010 2190#define TG3_FLG2_MAX_RXPEND_64 0x00000010
@@ -2216,6 +2216,7 @@ struct tg3 {
2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2217#define TG3_FLG2_1SHOT_MSI 0x10000000 2217#define TG3_FLG2_1SHOT_MSI 0x10000000
2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2219#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2219 2220
2220 u32 split_mode_max_reqs; 2221 u32 split_mode_max_reqs;
2221#define SPLIT_MODE_5704_MAX_REQ 3 2222#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index bbecba02e697..d0318e525ba7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -624,25 +624,28 @@ err_destroy_tx0:
624static u16 generate_cookie(struct bcm43xx_dmaring *ring, 624static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625 int slot) 625 int slot)
626{ 626{
627 u16 cookie = 0x0000; 627 u16 cookie = 0xF000;
628 628
629 /* Use the upper 4 bits of the cookie as 629 /* Use the upper 4 bits of the cookie as
630 * DMA controller ID and store the slot number 630 * DMA controller ID and store the slot number
631 * in the lower 12 bits 631 * in the lower 12 bits.
632 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path.
632 */ 634 */
633 switch (ring->mmio_base) { 635 switch (ring->mmio_base) {
634 default: 636 default:
635 assert(0); 637 assert(0);
636 case BCM43xx_MMIO_DMA1_BASE: 638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000;
637 break; 640 break;
638 case BCM43xx_MMIO_DMA2_BASE: 641 case BCM43xx_MMIO_DMA2_BASE:
639 cookie = 0x1000; 642 cookie = 0xB000;
640 break; 643 break;
641 case BCM43xx_MMIO_DMA3_BASE: 644 case BCM43xx_MMIO_DMA3_BASE:
642 cookie = 0x2000; 645 cookie = 0xC000;
643 break; 646 break;
644 case BCM43xx_MMIO_DMA4_BASE: 647 case BCM43xx_MMIO_DMA4_BASE:
645 cookie = 0x3000; 648 cookie = 0xD000;
646 break; 649 break;
647 } 650 }
648 assert(((u16)slot & 0xF000) == 0x0000); 651 assert(((u16)slot & 0xF000) == 0x0000);
@@ -660,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
660 struct bcm43xx_dmaring *ring = NULL; 663 struct bcm43xx_dmaring *ring = NULL;
661 664
662 switch (cookie & 0xF000) { 665 switch (cookie & 0xF000) {
663 case 0x0000: 666 case 0xA000:
664 ring = dma->tx_ring0; 667 ring = dma->tx_ring0;
665 break; 668 break;
666 case 0x1000: 669 case 0xB000:
667 ring = dma->tx_ring1; 670 ring = dma->tx_ring1;
668 break; 671 break;
669 case 0x2000: 672 case 0xC000:
670 ring = dma->tx_ring2; 673 ring = dma->tx_ring2;
671 break; 674 break;
672 case 0x3000: 675 case 0xD000:
673 ring = dma->tx_ring3; 676 ring = dma->tx_ring3;
674 break; 677 break;
675 default: 678 default:
@@ -839,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
839 /* We received an xmit status. */ 842 /* We received an xmit status. */
840 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
841 struct bcm43xx_xmitstatus stat; 844 struct bcm43xx_xmitstatus stat;
845 int i = 0;
842 846
843 stat.cookie = le16_to_cpu(hw->cookie); 847 stat.cookie = le16_to_cpu(hw->cookie);
848 while (stat.cookie == 0) {
849 if (unlikely(++i >= 10000)) {
850 assert(0);
851 break;
852 }
853 udelay(2);
854 barrier();
855 stat.cookie = le16_to_cpu(hw->cookie);
856 }
844 stat.flags = hw->flags; 857 stat.flags = hw->flags;
845 stat.cnt1 = hw->cnt1; 858 stat.cnt1 = hw->cnt1;
846 stat.cnt2 = hw->cnt2; 859 stat.cnt2 = hw->cnt2;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1456759936c5..10e1a905c144 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
285 * Default resume method for devices that have no driver provided resume, 285 * Default resume method for devices that have no driver provided resume,
286 * or not even a driver at all. 286 * or not even a driver at all.
287 */ 287 */
288static void pci_default_resume(struct pci_dev *pci_dev) 288static int pci_default_resume(struct pci_dev *pci_dev)
289{ 289{
290 int retval; 290 int retval = 0;
291 291
292 /* restore the PCI config space */ 292 /* restore the PCI config space */
293 pci_restore_state(pci_dev); 293 pci_restore_state(pci_dev);
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
297 /* if the device was busmaster before the suspend, make it busmaster again */ 297 /* if the device was busmaster before the suspend, make it busmaster again */
298 if (pci_dev->is_busmaster) 298 if (pci_dev->is_busmaster)
299 pci_set_master(pci_dev); 299 pci_set_master(pci_dev);
300
301 return retval;
300} 302}
301 303
302static int pci_device_resume(struct device * dev) 304static int pci_device_resume(struct device * dev)
303{ 305{
306 int error;
304 struct pci_dev * pci_dev = to_pci_dev(dev); 307 struct pci_dev * pci_dev = to_pci_dev(dev);
305 struct pci_driver * drv = pci_dev->driver; 308 struct pci_driver * drv = pci_dev->driver;
306 309
307 if (drv && drv->resume) 310 if (drv && drv->resume)
308 drv->resume(pci_dev); 311 error = drv->resume(pci_dev);
309 else 312 else
310 pci_default_resume(pci_dev); 313 error = pci_default_resume(pci_dev);
311 return 0; 314 return error;
312} 315}
313 316
314static void pci_device_shutdown(struct device *dev) 317static void pci_device_shutdown(struct device *dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2329f941a0dc..12286275b1c8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -461,9 +461,23 @@ int
461pci_restore_state(struct pci_dev *dev) 461pci_restore_state(struct pci_dev *dev)
462{ 462{
463 int i; 463 int i;
464 int val;
464 465
465 for (i = 0; i < 16; i++) 466 /*
466 pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); 467 * The Base Address register should be programmed before the command
468 * register(s)
469 */
470 for (i = 15; i >= 0; i--) {
471 pci_read_config_dword(dev, i * 4, &val);
472 if (val != dev->saved_config_space[i]) {
473 printk(KERN_DEBUG "PM: Writing back config space on "
474 "device %s at offset %x (was %x, writing %x)\n",
475 pci_name(dev), i,
476 val, (int)dev->saved_config_space[i]);
477 pci_write_config_dword(dev,i * 4,
478 dev->saved_config_space[i]);
479 }
480 }
467 pci_restore_msi_state(dev); 481 pci_restore_msi_state(dev);
468 pci_restore_msix_state(dev); 482 pci_restore_msix_state(dev);
469 return 0; 483 return 0;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 48d3b3d30c21..74b3124e8247 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1143,6 +1143,12 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1143{ 1143{
1144 struct pcmcia_socket *s = pcmcia_get_socket(skt); 1144 struct pcmcia_socket *s = pcmcia_get_socket(skt);
1145 1145
1146 if (!s) {
1147 printk(KERN_ERR "PCMCIA obtaining reference to socket %p " \
1148 "failed, event 0x%x lost!\n", skt, event);
1149 return -ENODEV;
1150 }
1151
1146 ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n", 1152 ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n",
1147 event, priority, skt); 1153 event, priority, skt);
1148 1154
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f6e7ee04f3dc..8c0d1a6739ad 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -48,33 +48,33 @@ static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
48 struct platform_device *pdev = to_platform_device(dev); 48 struct platform_device *pdev = to_platform_device(dev);
49 struct m48t86_ops *ops = pdev->dev.platform_data; 49 struct m48t86_ops *ops = pdev->dev.platform_data;
50 50
51 reg = ops->readb(M48T86_REG_B); 51 reg = ops->readbyte(M48T86_REG_B);
52 52
53 if (reg & M48T86_REG_B_DM) { 53 if (reg & M48T86_REG_B_DM) {
54 /* data (binary) mode */ 54 /* data (binary) mode */
55 tm->tm_sec = ops->readb(M48T86_REG_SEC); 55 tm->tm_sec = ops->readbyte(M48T86_REG_SEC);
56 tm->tm_min = ops->readb(M48T86_REG_MIN); 56 tm->tm_min = ops->readbyte(M48T86_REG_MIN);
57 tm->tm_hour = ops->readb(M48T86_REG_HOUR) & 0x3F; 57 tm->tm_hour = ops->readbyte(M48T86_REG_HOUR) & 0x3F;
58 tm->tm_mday = ops->readb(M48T86_REG_DOM); 58 tm->tm_mday = ops->readbyte(M48T86_REG_DOM);
59 /* tm_mon is 0-11 */ 59 /* tm_mon is 0-11 */
60 tm->tm_mon = ops->readb(M48T86_REG_MONTH) - 1; 60 tm->tm_mon = ops->readbyte(M48T86_REG_MONTH) - 1;
61 tm->tm_year = ops->readb(M48T86_REG_YEAR) + 100; 61 tm->tm_year = ops->readbyte(M48T86_REG_YEAR) + 100;
62 tm->tm_wday = ops->readb(M48T86_REG_DOW); 62 tm->tm_wday = ops->readbyte(M48T86_REG_DOW);
63 } else { 63 } else {
64 /* bcd mode */ 64 /* bcd mode */
65 tm->tm_sec = BCD2BIN(ops->readb(M48T86_REG_SEC)); 65 tm->tm_sec = BCD2BIN(ops->readbyte(M48T86_REG_SEC));
66 tm->tm_min = BCD2BIN(ops->readb(M48T86_REG_MIN)); 66 tm->tm_min = BCD2BIN(ops->readbyte(M48T86_REG_MIN));
67 tm->tm_hour = BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F); 67 tm->tm_hour = BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F);
68 tm->tm_mday = BCD2BIN(ops->readb(M48T86_REG_DOM)); 68 tm->tm_mday = BCD2BIN(ops->readbyte(M48T86_REG_DOM));
69 /* tm_mon is 0-11 */ 69 /* tm_mon is 0-11 */
70 tm->tm_mon = BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1; 70 tm->tm_mon = BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1;
71 tm->tm_year = BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100; 71 tm->tm_year = BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100;
72 tm->tm_wday = BCD2BIN(ops->readb(M48T86_REG_DOW)); 72 tm->tm_wday = BCD2BIN(ops->readbyte(M48T86_REG_DOW));
73 } 73 }
74 74
75 /* correct the hour if the clock is in 12h mode */ 75 /* correct the hour if the clock is in 12h mode */
76 if (!(reg & M48T86_REG_B_H24)) 76 if (!(reg & M48T86_REG_B_H24))
77 if (ops->readb(M48T86_REG_HOUR) & 0x80) 77 if (ops->readbyte(M48T86_REG_HOUR) & 0x80)
78 tm->tm_hour += 12; 78 tm->tm_hour += 12;
79 79
80 return 0; 80 return 0;
@@ -86,35 +86,35 @@ static int m48t86_rtc_set_time(struct device *dev, struct rtc_time *tm)
86 struct platform_device *pdev = to_platform_device(dev); 86 struct platform_device *pdev = to_platform_device(dev);
87 struct m48t86_ops *ops = pdev->dev.platform_data; 87 struct m48t86_ops *ops = pdev->dev.platform_data;
88 88
89 reg = ops->readb(M48T86_REG_B); 89 reg = ops->readbyte(M48T86_REG_B);
90 90
91 /* update flag and 24h mode */ 91 /* update flag and 24h mode */
92 reg |= M48T86_REG_B_SET | M48T86_REG_B_H24; 92 reg |= M48T86_REG_B_SET | M48T86_REG_B_H24;
93 ops->writeb(reg, M48T86_REG_B); 93 ops->writebyte(reg, M48T86_REG_B);
94 94
95 if (reg & M48T86_REG_B_DM) { 95 if (reg & M48T86_REG_B_DM) {
96 /* data (binary) mode */ 96 /* data (binary) mode */
97 ops->writeb(tm->tm_sec, M48T86_REG_SEC); 97 ops->writebyte(tm->tm_sec, M48T86_REG_SEC);
98 ops->writeb(tm->tm_min, M48T86_REG_MIN); 98 ops->writebyte(tm->tm_min, M48T86_REG_MIN);
99 ops->writeb(tm->tm_hour, M48T86_REG_HOUR); 99 ops->writebyte(tm->tm_hour, M48T86_REG_HOUR);
100 ops->writeb(tm->tm_mday, M48T86_REG_DOM); 100 ops->writebyte(tm->tm_mday, M48T86_REG_DOM);
101 ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH); 101 ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH);
102 ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR); 102 ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR);
103 ops->writeb(tm->tm_wday, M48T86_REG_DOW); 103 ops->writebyte(tm->tm_wday, M48T86_REG_DOW);
104 } else { 104 } else {
105 /* bcd mode */ 105 /* bcd mode */
106 ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC); 106 ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
107 ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN); 107 ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
108 ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR); 108 ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
109 ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM); 109 ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
110 ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH); 110 ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
111 ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR); 111 ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
112 ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW); 112 ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
113 } 113 }
114 114
115 /* update ended */ 115 /* update ended */
116 reg &= ~M48T86_REG_B_SET; 116 reg &= ~M48T86_REG_B_SET;
117 ops->writeb(reg, M48T86_REG_B); 117 ops->writebyte(reg, M48T86_REG_B);
118 118
119 return 0; 119 return 0;
120} 120}
@@ -125,12 +125,12 @@ static int m48t86_rtc_proc(struct device *dev, struct seq_file *seq)
125 struct platform_device *pdev = to_platform_device(dev); 125 struct platform_device *pdev = to_platform_device(dev);
126 struct m48t86_ops *ops = pdev->dev.platform_data; 126 struct m48t86_ops *ops = pdev->dev.platform_data;
127 127
128 reg = ops->readb(M48T86_REG_B); 128 reg = ops->readbyte(M48T86_REG_B);
129 129
130 seq_printf(seq, "mode\t\t: %s\n", 130 seq_printf(seq, "mode\t\t: %s\n",
131 (reg & M48T86_REG_B_DM) ? "binary" : "bcd"); 131 (reg & M48T86_REG_B_DM) ? "binary" : "bcd");
132 132
133 reg = ops->readb(M48T86_REG_D); 133 reg = ops->readbyte(M48T86_REG_D);
134 134
135 seq_printf(seq, "battery\t\t: %s\n", 135 seq_printf(seq, "battery\t\t: %s\n",
136 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); 136 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
@@ -157,7 +157,7 @@ static int __devinit m48t86_rtc_probe(struct platform_device *dev)
157 platform_set_drvdata(dev, rtc); 157 platform_set_drvdata(dev, rtc);
158 158
159 /* read battery status */ 159 /* read battery status */
160 reg = ops->readb(M48T86_REG_D); 160 reg = ops->readbyte(M48T86_REG_D);
161 dev_info(&dev->dev, "battery %s\n", 161 dev_info(&dev->dev, "battery %s\n",
162 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted"); 162 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
163 163
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 74a257b23383..e210f89a2449 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -45,11 +45,11 @@ struct pgid {
45 union { 45 union {
46 __u8 fc; /* SPID function code */ 46 __u8 fc; /* SPID function code */
47 struct path_state ps; /* SNID path state */ 47 struct path_state ps; /* SNID path state */
48 } inf; 48 } __attribute__ ((packed)) inf;
49 union { 49 union {
50 __u32 cpu_addr : 16; /* CPU address */ 50 __u32 cpu_addr : 16; /* CPU address */
51 struct extended_cssid ext_cssid; 51 struct extended_cssid ext_cssid;
52 } pgid_high; 52 } __attribute__ ((packed)) pgid_high;
53 __u32 cpu_id : 24; /* CPU identification */ 53 __u32 cpu_id : 24; /* CPU identification */
54 __u32 cpu_model : 16; /* CPU model */ 54 __u32 cpu_model : 16; /* CPU model */
55 __u32 tod_high; /* high word TOD clock */ 55 __u32 tod_high; /* high word TOD clock */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 180b3bf8b90d..49ec562d7f60 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -749,7 +749,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
749 /* Unit check but no sense data. Need basic sense. */ 749 /* Unit check but no sense data. Need basic sense. */
750 if (ccw_device_do_sense(cdev, irb) != 0) 750 if (ccw_device_do_sense(cdev, irb) != 0)
751 goto call_handler_unsol; 751 goto call_handler_unsol;
752 memcpy(irb, &cdev->private->irb, sizeof(struct irb)); 752 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
753 cdev->private->state = DEV_STATE_W4SENSE; 753 cdev->private->state = DEV_STATE_W4SENSE;
754 cdev->private->intparm = 0; 754 cdev->private->intparm = 0;
755 return; 755 return;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index fee843fab1c7..108910f512e4 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -982,6 +982,12 @@ static int device_check(ppa_struct *dev)
982 return -ENODEV; 982 return -ENODEV;
983} 983}
984 984
985static int ppa_adjust_queue(struct scsi_device *device)
986{
987 blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
988 return 0;
989}
990
985static struct scsi_host_template ppa_template = { 991static struct scsi_host_template ppa_template = {
986 .module = THIS_MODULE, 992 .module = THIS_MODULE,
987 .proc_name = "ppa", 993 .proc_name = "ppa",
@@ -997,6 +1003,7 @@ static struct scsi_host_template ppa_template = {
997 .cmd_per_lun = 1, 1003 .cmd_per_lun = 1,
998 .use_clustering = ENABLE_CLUSTERING, 1004 .use_clustering = ENABLE_CLUSTERING,
999 .can_queue = 1, 1005 .can_queue = 1,
1006 .slave_alloc = ppa_adjust_queue,
1000}; 1007};
1001 1008
1002/*************************************************************************** 1009/***************************************************************************
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index f7264fd611c2..cb9082fd7e2f 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -454,7 +454,7 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
454 */ 454 */
455 msleep(10); 455 msleep(10);
456 456
457 prb->ctrl = PRB_CTRL_SRST; 457 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
458 prb->fis[1] = 0; /* no PM yet */ 458 prb->fis[1] = 0; /* no PM yet */
459 459
460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 460 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
@@ -551,9 +551,9 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
551 551
552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
553 if (qc->tf.flags & ATA_TFLAG_WRITE) 553 if (qc->tf.flags & ATA_TFLAG_WRITE)
554 prb->ctrl = PRB_CTRL_PACKET_WRITE; 554 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE);
555 else 555 else
556 prb->ctrl = PRB_CTRL_PACKET_READ; 556 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ);
557 } else 557 } else
558 prb->ctrl = 0; 558 prb->ctrl = 0;
559 559
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 941c1e15c899..62f8cb7b3d2b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -165,6 +165,7 @@ static struct {
165 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 165 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
166 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 166 {"HP", "C1557A", NULL, BLIST_FORCELUN},
167 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 167 {"HP", "C3323-300", "4269", BLIST_NOTQ},
168 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
168 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, 169 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
169 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 170 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
170 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, 171 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 764a8b375ead..faee4757c03a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -367,7 +367,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
367 int nsegs, unsigned bufflen, gfp_t gfp) 367 int nsegs, unsigned bufflen, gfp_t gfp)
368{ 368{
369 struct request_queue *q = rq->q; 369 struct request_queue *q = rq->q;
370 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
371 unsigned int data_len = 0, len, bytes, off; 371 unsigned int data_len = 0, len, bytes, off;
372 struct page *page; 372 struct page *page;
373 struct bio *bio = NULL; 373 struct bio *bio = NULL;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 8b6d65e21bae..f3b16066387c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -955,7 +955,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
955 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 955 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
957 957
958 if (rphy->scsi_target_id == -1) 958 if (rphy->identify.device_type != SAS_END_DEVICE ||
959 rphy->scsi_target_id == -1)
959 continue; 960 continue;
960 961
961 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 962 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) &&
@@ -977,7 +978,6 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
977#define SETUP_TEMPLATE(attrb, field, perm, test) \ 978#define SETUP_TEMPLATE(attrb, field, perm, test) \
978 i->private_##attrb[count] = class_device_attr_##field; \ 979 i->private_##attrb[count] = class_device_attr_##field; \
979 i->private_##attrb[count].attr.mode = perm; \ 980 i->private_##attrb[count].attr.mode = perm; \
980 i->private_##attrb[count].store = NULL; \
981 i->attrb[count] = &i->private_##attrb[count]; \ 981 i->attrb[count] = &i->private_##attrb[count]; \
982 if (test) \ 982 if (test) \
983 count++ 983 count++
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 953eb8c171d6..47ba1a79adcd 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1745 fbcon_redraw_move(vc, p, 0, t, count); 1745 fbcon_redraw_move(vc, p, 0, t, count);
1746 ypan_up_redraw(vc, t, count); 1746 ypan_up_redraw(vc, t, count);
1747 if (vc->vc_rows - b > 0) 1747 if (vc->vc_rows - b > 0)
1748 fbcon_redraw_move(vc, p, b - count, 1748 fbcon_redraw_move(vc, p, b,
1749 vc->vc_rows - b, b); 1749 vc->vc_rows - b, b);
1750 } else 1750 } else
1751 fbcon_redraw_move(vc, p, t + count, b - t - count, t); 1751 fbcon_redraw_move(vc, p, t + count, b - t - count, t);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 85d166cdcae4..b55b4ea9a676 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
67static int debugfs_mknod(struct inode *dir, struct dentry *dentry, 67static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
68 int mode, dev_t dev) 68 int mode, dev_t dev)
69{ 69{
70 struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev); 70 struct inode *inode;
71 int error = -EPERM; 71 int error = -EPERM;
72 72
73 if (dentry->d_inode) 73 if (dentry->d_inode)
74 return -EEXIST; 74 return -EEXIST;
75 75
76 inode = debugfs_get_inode(dir->i_sb, mode, dev);
76 if (inode) { 77 if (inode) {
77 d_instantiate(dentry, inode); 78 d_instantiate(dentry, inode);
78 dget(dentry); 79 dget(dentry);
diff --git a/fs/namei.c b/fs/namei.c
index 96723ae83c89..d6e2ee251736 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1080,8 +1080,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1080 nd->flags = flags; 1080 nd->flags = flags;
1081 nd->depth = 0; 1081 nd->depth = 0;
1082 1082
1083 read_lock(&current->fs->lock);
1084 if (*name=='/') { 1083 if (*name=='/') {
1084 read_lock(&current->fs->lock);
1085 if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) { 1085 if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
1086 nd->mnt = mntget(current->fs->altrootmnt); 1086 nd->mnt = mntget(current->fs->altrootmnt);
1087 nd->dentry = dget(current->fs->altroot); 1087 nd->dentry = dget(current->fs->altroot);
@@ -1092,33 +1092,35 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1092 } 1092 }
1093 nd->mnt = mntget(current->fs->rootmnt); 1093 nd->mnt = mntget(current->fs->rootmnt);
1094 nd->dentry = dget(current->fs->root); 1094 nd->dentry = dget(current->fs->root);
1095 read_unlock(&current->fs->lock);
1095 } else if (dfd == AT_FDCWD) { 1096 } else if (dfd == AT_FDCWD) {
1097 read_lock(&current->fs->lock);
1096 nd->mnt = mntget(current->fs->pwdmnt); 1098 nd->mnt = mntget(current->fs->pwdmnt);
1097 nd->dentry = dget(current->fs->pwd); 1099 nd->dentry = dget(current->fs->pwd);
1100 read_unlock(&current->fs->lock);
1098 } else { 1101 } else {
1099 struct dentry *dentry; 1102 struct dentry *dentry;
1100 1103
1101 file = fget_light(dfd, &fput_needed); 1104 file = fget_light(dfd, &fput_needed);
1102 retval = -EBADF; 1105 retval = -EBADF;
1103 if (!file) 1106 if (!file)
1104 goto unlock_fail; 1107 goto out_fail;
1105 1108
1106 dentry = file->f_dentry; 1109 dentry = file->f_dentry;
1107 1110
1108 retval = -ENOTDIR; 1111 retval = -ENOTDIR;
1109 if (!S_ISDIR(dentry->d_inode->i_mode)) 1112 if (!S_ISDIR(dentry->d_inode->i_mode))
1110 goto fput_unlock_fail; 1113 goto fput_fail;
1111 1114
1112 retval = file_permission(file, MAY_EXEC); 1115 retval = file_permission(file, MAY_EXEC);
1113 if (retval) 1116 if (retval)
1114 goto fput_unlock_fail; 1117 goto fput_fail;
1115 1118
1116 nd->mnt = mntget(file->f_vfsmnt); 1119 nd->mnt = mntget(file->f_vfsmnt);
1117 nd->dentry = dget(dentry); 1120 nd->dentry = dget(dentry);
1118 1121
1119 fput_light(file, fput_needed); 1122 fput_light(file, fput_needed);
1120 } 1123 }
1121 read_unlock(&current->fs->lock);
1122 current->total_link_count = 0; 1124 current->total_link_count = 0;
1123 retval = link_path_walk(name, nd); 1125 retval = link_path_walk(name, nd);
1124out: 1126out:
@@ -1127,13 +1129,12 @@ out:
1127 nd->dentry->d_inode)) 1129 nd->dentry->d_inode))
1128 audit_inode(name, nd->dentry->d_inode, flags); 1130 audit_inode(name, nd->dentry->d_inode, flags);
1129 } 1131 }
1132out_fail:
1130 return retval; 1133 return retval;
1131 1134
1132fput_unlock_fail: 1135fput_fail:
1133 fput_light(file, fput_needed); 1136 fput_light(file, fput_needed);
1134unlock_fail: 1137 goto out_fail;
1135 read_unlock(&current->fs->lock);
1136 return retval;
1137} 1138}
1138 1139
1139int fastcall path_lookup(const char *name, unsigned int flags, 1140int fastcall path_lookup(const char *name, unsigned int flags,
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 9950706abdf8..e1432102be05 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -45,10 +45,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
45#define hard_smp_processor_id() __hard_smp_processor_id() 45#define hard_smp_processor_id() __hard_smp_processor_id()
46#define raw_smp_processor_id() (current_thread_info()->cpu) 46#define raw_smp_processor_id() (current_thread_info()->cpu)
47 47
48extern cpumask_t cpu_present_mask;
49extern cpumask_t cpu_online_map;
50extern int smp_num_cpus; 48extern int smp_num_cpus;
51#define cpu_possible_map cpu_present_mask 49#define cpu_possible_map cpu_present_map
52 50
53int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); 51int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
54 52
diff --git a/include/asm-arm/arch-ixp23xx/memory.h b/include/asm-arm/arch-ixp23xx/memory.h
index 6e19f46d54d1..c85fc06a043c 100644
--- a/include/asm-arm/arch-ixp23xx/memory.h
+++ b/include/asm-arm/arch-ixp23xx/memory.h
@@ -49,7 +49,7 @@ static inline int __ixp23xx_arch_is_coherent(void)
49{ 49{
50 extern unsigned int processor_id; 50 extern unsigned int processor_id;
51 51
52 if (((processor_id & 15) >= 2) || machine_is_roadrunner()) 52 if (((processor_id & 15) >= 4) || machine_is_roadrunner())
53 return 1; 53 return 1;
54 54
55 return 0; 55 return 0;
diff --git a/include/asm-arm/arch-l7200/serial_l7200.h b/include/asm-arm/arch-l7200/serial_l7200.h
index 238c595d97ea..b1008a9d23e5 100644
--- a/include/asm-arm/arch-l7200/serial_l7200.h
+++ b/include/asm-arm/arch-l7200/serial_l7200.h
@@ -28,7 +28,7 @@
28#define UARTDR 0x00 /* Tx/Rx data */ 28#define UARTDR 0x00 /* Tx/Rx data */
29#define RXSTAT 0x04 /* Rx status */ 29#define RXSTAT 0x04 /* Rx status */
30#define H_UBRLCR 0x08 /* mode register high */ 30#define H_UBRLCR 0x08 /* mode register high */
31#define M_UBRLCR 0x0C /* mode reg mid (MSB of buad)*/ 31#define M_UBRLCR 0x0C /* mode reg mid (MSB of baud)*/
32#define L_UBRLCR 0x10 /* mode reg low (LSB of baud)*/ 32#define L_UBRLCR 0x10 /* mode reg low (LSB of baud)*/
33#define UARTCON 0x14 /* control register */ 33#define UARTCON 0x14 /* control register */
34#define UARTFLG 0x18 /* flag register */ 34#define UARTFLG 0x18 /* flag register */
diff --git a/include/asm-arm/arch-l7200/uncompress.h b/include/asm-arm/arch-l7200/uncompress.h
index 9fcd40aee3e3..04be2a088639 100644
--- a/include/asm-arm/arch-l7200/uncompress.h
+++ b/include/asm-arm/arch-l7200/uncompress.h
@@ -6,7 +6,7 @@
6 * Changelog: 6 * Changelog:
7 * 05-01-2000 SJH Created 7 * 05-01-2000 SJH Created
8 * 05-13-2000 SJH Filled in function bodies 8 * 05-13-2000 SJH Filled in function bodies
9 * 07-26-2000 SJH Removed hard coded buad rate 9 * 07-26-2000 SJH Removed hard coded baud rate
10 */ 10 */
11 11
12#include <asm/hardware.h> 12#include <asm/hardware.h>
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 358e4d309ceb..c2059a3a0621 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
159#define lazy_mmu_prot_update(pte) do { } while (0) 159#define lazy_mmu_prot_update(pte) do { } while (0)
160#endif 160#endif
161 161
162#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE 162#ifndef __HAVE_ARCH_MOVE_PTE
163#define move_pte(pte, prot, old_addr, new_addr) (pte) 163#define move_pte(pte, prot, old_addr, new_addr) (pte)
164#else
165#define move_pte(pte, prot, old_addr, new_addr) \
166({ \
167 pte_t newpte = (pte); \
168 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
169 pte_page(pte) == ZERO_PAGE(old_addr)) \
170 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
171 newpte; \
172})
173#endif 164#endif
174 165
175/* 166/*
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 42520cc84b0f..1386af1cb7d9 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -129,6 +129,7 @@
129#if defined (CONFIG_CPU_R4300) \ 129#if defined (CONFIG_CPU_R4300) \
130 || defined (CONFIG_CPU_R4X00) \ 130 || defined (CONFIG_CPU_R4X00) \
131 || defined (CONFIG_CPU_R5000) \ 131 || defined (CONFIG_CPU_R5000) \
132 || defined (CONFIG_CPU_RM7000) \
132 || defined (CONFIG_CPU_NEVADA) \ 133 || defined (CONFIG_CPU_NEVADA) \
133 || defined (CONFIG_CPU_TX49XX) \ 134 || defined (CONFIG_CPU_TX49XX) \
134 || defined (CONFIG_CPU_MIPS64) 135 || defined (CONFIG_CPU_MIPS64)
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
index 64dd45150f64..928f30f8c45c 100644
--- a/include/asm-mips/delay.h
+++ b/include/asm-mips/delay.h
@@ -19,20 +19,22 @@ static inline void __delay(unsigned long loops)
19{ 19{
20 if (sizeof(long) == 4) 20 if (sizeof(long) == 4)
21 __asm__ __volatile__ ( 21 __asm__ __volatile__ (
22 ".set\tnoreorder\n" 22 " .set noreorder \n"
23 "1:\tbnez\t%0,1b\n\t" 23 " .align 3 \n"
24 "subu\t%0,1\n\t" 24 "1: bnez %0, 1b \n"
25 ".set\treorder" 25 " subu %0, 1 \n"
26 " .set reorder \n"
26 : "=r" (loops) 27 : "=r" (loops)
27 : "0" (loops)); 28 : "0" (loops));
28 else if (sizeof(long) == 8) 29 else if (sizeof(long) == 8)
29 __asm__ __volatile__ ( 30 __asm__ __volatile__ (
30 ".set\tnoreorder\n" 31 " .set noreorder \n"
31 "1:\tbnez\t%0,1b\n\t" 32 " .align 3 \n"
32 "dsubu\t%0,1\n\t" 33 "1: bnez %0, 1b \n"
33 ".set\treorder" 34 " dsubu %0, 1 \n"
34 :"=r" (loops) 35 " .set reorder \n"
35 :"0" (loops)); 36 : "=r" (loops)
37 : "0" (loops));
36} 38}
37 39
38 40
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index a1eab136ff6c..4035ec79ecd4 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -139,9 +139,11 @@ typedef struct { unsigned long pgprot; } pgprot_t;
139 139
140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 140#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
141 141
142#ifndef CONFIG_SPARSEMEM
142#ifndef CONFIG_NEED_MULTIPLE_NODES 143#ifndef CONFIG_NEED_MULTIPLE_NODES
143#define pfn_valid(pfn) ((pfn) < max_mapnr) 144#define pfn_valid(pfn) ((pfn) < max_mapnr)
144#endif 145#endif
146#endif
145 147
146#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 148#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
147#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 149#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4d6bc45df594..087c20769256 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -177,48 +177,67 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
177 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 177 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
178 178
179/* 179/*
180 * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset 180 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
181 * into this range:
182 */ 181 */
183#define PTE_FILE_MAX_BITS 27 182#define PTE_FILE_MAX_BITS 28
184 183
185#define pte_to_pgoff(_pte) \ 184#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
186 ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 )) 185 (((_pte).pte >> 2 ) & 0x38) | \
186 (((_pte).pte >> 10) << 6 ))
187 187
188#define pgoff_to_pte(off) \ 188#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
189 ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE }) 189 (((off) & 0x38) << 2 ) | \
190 (((off) >> 6 ) << 10) | \
191 _PAGE_FILE })
190 192
191#else 193#else
192 194
193/* Swap entries must have VALID and GLOBAL bits cleared. */ 195/* Swap entries must have VALID and GLOBAL bits cleared. */
196#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
197#define __swp_type(x) (((x).val >> 2) & 0x1f)
198#define __swp_offset(x) ((x).val >> 7)
199#define __swp_entry(type,offset) \
200 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
201#else
194#define __swp_type(x) (((x).val >> 8) & 0x1f) 202#define __swp_type(x) (((x).val >> 8) & 0x1f)
195#define __swp_offset(x) ((x).val >> 13) 203#define __swp_offset(x) ((x).val >> 13)
196#define __swp_entry(type,offset) \ 204#define __swp_entry(type,offset) \
197 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 205 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
206#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
198 207
208#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
199/* 209/*
200 * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset 210 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
201 * into this range:
202 */ 211 */
203#define PTE_FILE_MAX_BITS 27 212#define PTE_FILE_MAX_BITS 30
204 213
205#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 214#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
206 /* fixme */ 215#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
208#define pgoff_to_pte(off) \
209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
210 216
211#else 217#else
212#define pte_to_pgoff(_pte) \ 218/*
213 ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 219 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
220 */
221#define PTE_FILE_MAX_BITS 28
222
223#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
224 (((_pte).pte >> 2) & 0x8) | \
225 (((_pte).pte >> 8) << 4))
214 226
215#define pgoff_to_pte(off) \ 227#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
216 ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 228 (((off) & 0x8) << 2) | \
229 (((off) >> 4) << 8) | \
230 _PAGE_FILE })
217#endif 231#endif
218 232
219#endif 233#endif
220 234
235#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
236#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
237#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
238#else
221#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 239#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
222#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 240#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
241#endif
223 242
224#endif /* _ASM_PGTABLE_32_H */ 243#endif /* _ASM_PGTABLE_32_H */
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 82166b254b27..2faf5c9ff127 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -224,15 +224,12 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
224#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 224#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
225 225
226/* 226/*
227 * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset 227 * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
228 * into this range: 228 * make things easier, and only use the upper 56 bits for the page offset...
229 */ 229 */
230#define PTE_FILE_MAX_BITS 32 230#define PTE_FILE_MAX_BITS 56
231 231
232#define pte_to_pgoff(_pte) \ 232#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
233 ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 233#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
234
235#define pgoff_to_pte(off) \
236 ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
237 234
238#endif /* _ASM_PGTABLE_64_H */ 235#endif /* _ASM_PGTABLE_64_H */
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 174a3cda8c26..d0af2a3b0152 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -70,7 +70,15 @@ extern unsigned long zero_page_mask;
70#define ZERO_PAGE(vaddr) \ 70#define ZERO_PAGE(vaddr) \
71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) 71 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
72 72
73#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE 73#define __HAVE_ARCH_MOVE_PTE
74#define move_pte(pte, prot, old_addr, new_addr) \
75({ \
76 pte_t newpte = (pte); \
77 if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \
78 pte_page(pte) == ZERO_PAGE(old_addr)) \
79 newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \
80 newpte; \
81})
74 82
75extern void paging_init(void); 83extern void paging_init(void);
76 84
@@ -345,8 +353,9 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
345#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 353#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
346static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 354static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
347{ 355{
348 pte.pte_low &= _PAGE_CHG_MASK; 356 pte.pte_low &= _PAGE_CHG_MASK;
349 pte.pte_low |= pgprot_val(newprot); 357 pte.pte_high &= ~0x3f;
358 pte.pte_low |= pgprot_val(newprot);
350 pte.pte_high |= pgprot_val(newprot) & 0x3f; 359 pte.pte_high |= pgprot_val(newprot) & 0x3f;
351 return pte; 360 return pte;
352} 361}
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 75c6fe7c2126..e14e4b69de21 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -48,7 +48,6 @@ extern struct call_data_struct *call_data;
48#define SMP_CALL_FUNCTION 0x2 48#define SMP_CALL_FUNCTION 0x2
49 49
50extern cpumask_t phys_cpu_present_map; 50extern cpumask_t phys_cpu_present_map;
51extern cpumask_t cpu_online_map;
52#define cpu_possible_map phys_cpu_present_map 51#define cpu_possible_map phys_cpu_present_map
53 52
54extern cpumask_t cpu_callout_map; 53extern cpumask_t cpu_callout_map;
@@ -86,9 +85,9 @@ extern void prom_init_secondary(void);
86extern void plat_smp_setup(void); 85extern void plat_smp_setup(void);
87 86
88/* 87/*
89 * Called after init_IRQ but before __cpu_up. 88 * Called in smp_prepare_cpus.
90 */ 89 */
91extern void prom_prepare_cpus(unsigned int max_cpus); 90extern void plat_prepare_cpus(unsigned int max_cpus);
92 91
93/* 92/*
94 * Last chance for the board code to finish SMP initialization before 93 * Last chance for the board code to finish SMP initialization before
diff --git a/include/asm-mips/sparsemem.h b/include/asm-mips/sparsemem.h
new file mode 100644
index 000000000000..795ac6c23203
--- /dev/null
+++ b/include/asm-mips/sparsemem.h
@@ -0,0 +1,14 @@
1#ifndef _MIPS_SPARSEMEM_H
2#define _MIPS_SPARSEMEM_H
3#ifdef CONFIG_SPARSEMEM
4
5/*
6 * SECTION_SIZE_BITS 2^N: how big each section will be
7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
8 */
9#define SECTION_SIZE_BITS 28
10#define MAX_PHYSMEM_BITS 35
11
12#endif /* CONFIG_SPARSEMEM */
13#endif /* _MIPS_SPARSEMEM_H */
14
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index 40c25e166a9b..1802775568b9 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -11,23 +11,24 @@
11#define __futex_atomic_fixup \ 11#define __futex_atomic_fixup \
12 ".section __ex_table,\"a\"\n" \ 12 ".section __ex_table,\"a\"\n" \
13 " .align 4\n" \ 13 " .align 4\n" \
14 " .long 0b,2b,1b,2b\n" \ 14 " .long 0b,4b,2b,4b,3b,4b\n" \
15 ".previous" 15 ".previous"
16#else /* __s390x__ */ 16#else /* __s390x__ */
17#define __futex_atomic_fixup \ 17#define __futex_atomic_fixup \
18 ".section __ex_table,\"a\"\n" \ 18 ".section __ex_table,\"a\"\n" \
19 " .align 8\n" \ 19 " .align 8\n" \
20 " .quad 0b,2b,1b,2b\n" \ 20 " .quad 0b,4b,2b,4b,3b,4b\n" \
21 ".previous" 21 ".previous"
22#endif /* __s390x__ */ 22#endif /* __s390x__ */
23 23
24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
25 asm volatile(" l %1,0(%6)\n" \ 25 asm volatile(" sacf 256\n" \
26 "0: " insn \ 26 "0: l %1,0(%6)\n" \
27 " cs %1,%2,0(%6)\n" \ 27 "1: " insn \
28 "1: jl 0b\n" \ 28 "2: cs %1,%2,0(%6)\n" \
29 "3: jl 1b\n" \
29 " lhi %0,0\n" \ 30 " lhi %0,0\n" \
30 "2:\n" \ 31 "4: sacf 0\n" \
31 __futex_atomic_fixup \ 32 __futex_atomic_fixup \
32 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ 33 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
33 "=m" (*uaddr) \ 34 "=m" (*uaddr) \
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index db0606c1abd4..bea727904287 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -98,8 +98,8 @@
98#define __LC_KERNEL_ASCE 0xD58 98#define __LC_KERNEL_ASCE 0xD58
99#define __LC_USER_ASCE 0xD60 99#define __LC_USER_ASCE 0xD60
100#define __LC_PANIC_STACK 0xD68 100#define __LC_PANIC_STACK 0xD68
101#define __LC_CPUID 0xD90 101#define __LC_CPUID 0xD80
102#define __LC_CPUADDR 0xD98 102#define __LC_CPUADDR 0xD88
103#define __LC_IPLDEV 0xDB8 103#define __LC_IPLDEV 0xDB8
104#define __LC_JIFFY_TIMER 0xDC0 104#define __LC_JIFFY_TIMER 0xDC0
105#define __LC_CURRENT 0xDD8 105#define __LC_CURRENT 0xDD8
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index c44e7466534e..cd464f469a2c 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -689,6 +689,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
689#define pte_clear(mm,addr,ptep) \ 689#define pte_clear(mm,addr,ptep) \
690 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 690 set_pte_at((mm), (addr), (ptep), __pte(0UL))
691 691
692#ifdef DCACHE_ALIASING_POSSIBLE
693#define __HAVE_ARCH_MOVE_PTE
694#define move_pte(pte, prot, old_addr, new_addr) \
695({ \
696 pte_t newpte = (pte); \
697 if (tlb_type != hypervisor && pte_present(pte)) { \
698 unsigned long this_pfn = pte_pfn(pte); \
699 \
700 if (pfn_valid(this_pfn) && \
701 (((old_addr) ^ (new_addr)) & (1 << 13))) \
702 flush_dcache_page_all(current->mm, \
703 pfn_to_page(this_pfn)); \
704 } \
705 newpte; \
706})
707#endif
708
692extern pgd_t swapper_pg_dir[2048]; 709extern pgd_t swapper_pg_dir[2048];
693extern pmd_t swapper_low_pmd_dir[2048]; 710extern pmd_t swapper_low_pmd_dir[2048];
694 711
diff --git a/include/asm-um/irqflags.h b/include/asm-um/irqflags.h
new file mode 100644
index 000000000000..659b9abdfdba
--- /dev/null
+++ b/include/asm-um/irqflags.h
@@ -0,0 +1,6 @@
1#ifndef __UM_IRQFLAGS_H
2#define __UM_IRQFLAGS_H
3
4/* Empty for now */
5
6#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index bea5a015f667..16c734af9193 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -41,11 +41,11 @@
41 41
42#define __get_user(x, ptr) \ 42#define __get_user(x, ptr) \
43({ \ 43({ \
44 const __typeof__(ptr) __private_ptr = ptr; \ 44 const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
45 __typeof__(x) __private_val; \ 45 __typeof__(x) __private_val; \
46 int __private_ret = -EFAULT; \ 46 int __private_ret = -EFAULT; \
47 (x) = (__typeof__(*(__private_ptr)))0; \ 47 (x) = (__typeof__(*(__private_ptr)))0; \
48 if (__copy_from_user((void *) &__private_val, (__private_ptr), \ 48 if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
49 sizeof(*(__private_ptr))) == 0) { \ 49 sizeof(*(__private_ptr))) == 0) { \
50 (x) = (__typeof__(*(__private_ptr))) __private_val; \ 50 (x) = (__typeof__(*(__private_ptr))) __private_val; \
51 __private_ret = 0; \ 51 __private_ret = 0; \
@@ -62,7 +62,7 @@
62 62
63#define __put_user(x, ptr) \ 63#define __put_user(x, ptr) \
64({ \ 64({ \
65 __typeof__(ptr) __private_ptr = ptr; \ 65 __typeof__(*(ptr)) __user *__private_ptr = ptr; \
66 __typeof__(*(__private_ptr)) __private_val; \ 66 __typeof__(*(__private_ptr)) __private_val; \
67 int __private_ret = -EFAULT; \ 67 int __private_ret = -EFAULT; \
68 __private_val = (__typeof__(*(__private_ptr))) (x); \ 68 __private_val = (__typeof__(*(__private_ptr))) (x); \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ad133fcfb239..1713ace808bf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
21typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 21typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
22typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 22typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
23 23
24typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); 24typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
25typedef void (elevator_exit_fn) (elevator_t *); 25typedef void (elevator_exit_fn) (elevator_t *);
26 26
27struct elevator_ops 27struct elevator_ops
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index dd7d627bf66f..c115e9e840b4 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
1114 1114
1115 mmsg->mfa = readl(c->in_port); 1115 mmsg->mfa = readl(c->in_port);
1116 if (unlikely(mmsg->mfa >= c->in_queue.len)) { 1116 if (unlikely(mmsg->mfa >= c->in_queue.len)) {
1117 u32 mfa = mmsg->mfa;
1118
1117 mempool_free(mmsg, c->in_msg.mempool); 1119 mempool_free(mmsg, c->in_msg.mempool);
1118 if(mmsg->mfa == I2O_QUEUE_EMPTY) 1120
1121 if (mfa == I2O_QUEUE_EMPTY)
1119 return ERR_PTR(-EBUSY); 1122 return ERR_PTR(-EBUSY);
1120 return ERR_PTR(-EFAULT); 1123 return ERR_PTR(-EFAULT);
1121 } 1124 }
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
index 9065199319d0..915d6b4f0f89 100644
--- a/include/linux/m48t86.h
+++ b/include/linux/m48t86.h
@@ -11,6 +11,6 @@
11 11
12struct m48t86_ops 12struct m48t86_ops
13{ 13{
14 void (*writeb)(unsigned char value, unsigned long addr); 14 void (*writebyte)(unsigned char value, unsigned long addr);
15 unsigned char (*readb)(unsigned long addr); 15 unsigned char (*readbyte)(unsigned long addr);
16}; 16};
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 6a7621b2b12b..f5fdca1d67e6 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -36,6 +36,7 @@
36#include <linux/nodemask.h> 36#include <linux/nodemask.h>
37 37
38struct vm_area_struct; 38struct vm_area_struct;
39struct mm_struct;
39 40
40#ifdef CONFIG_NUMA 41#ifdef CONFIG_NUMA
41 42
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36740354d4db..2d8337150493 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,6 +15,7 @@
15#include <linux/seqlock.h> 15#include <linux/seqlock.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <asm/atomic.h> 17#include <asm/atomic.h>
18#include <asm/page.h>
18 19
19/* Free memory management - zoned buddy allocator. */ 20/* Free memory management - zoned buddy allocator. */
20#ifndef CONFIG_FORCE_MAX_ZONEORDER 21#ifndef CONFIG_FORCE_MAX_ZONEORDER
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 4877e35ae202..936ef82ed76a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -50,7 +50,7 @@
50extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); 50extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
51extern acpi_status pci_osc_support_set(u32 flags); 51extern acpi_status pci_osc_support_set(u32 flags);
52#else 52#else
53#if !defined(acpi_status) 53#if !defined(AE_ERROR)
54typedef u32 acpi_status; 54typedef u32 acpi_status;
55#define AE_ERROR (acpi_status) (0x0001) 55#define AE_ERROR (acpi_status) (0x0001)
56#endif 56#endif
diff --git a/mm/slab.c b/mm/slab.c
index d31a06bfbea5..f1b644eb39d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -207,11 +207,6 @@ typedef unsigned int kmem_bufctl_t;
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
209 209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/* 210/*
216 * struct slab 211 * struct slab
217 * 212 *
@@ -1356,12 +1351,6 @@ void __init kmem_cache_init(void)
1356 NULL, NULL); 1351 NULL, NULL);
1357 } 1352 }
1358 1353
1359 /* Inc off-slab bufctl limit until the ceiling is hit. */
1360 if (!(OFF_SLAB(sizes->cs_cachep))) {
1361 offslab_limit = sizes->cs_size - sizeof(struct slab);
1362 offslab_limit /= sizeof(kmem_bufctl_t);
1363 }
1364
1365 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1354 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1366 sizes->cs_size, 1355 sizes->cs_size,
1367 ARCH_KMALLOC_MINALIGN, 1356 ARCH_KMALLOC_MINALIGN,
@@ -1780,6 +1769,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
1780static size_t calculate_slab_order(struct kmem_cache *cachep, 1769static size_t calculate_slab_order(struct kmem_cache *cachep,
1781 size_t size, size_t align, unsigned long flags) 1770 size_t size, size_t align, unsigned long flags)
1782{ 1771{
1772 unsigned long offslab_limit;
1783 size_t left_over = 0; 1773 size_t left_over = 0;
1784 int gfporder; 1774 int gfporder;
1785 1775
@@ -1791,9 +1781,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1791 if (!num) 1781 if (!num)
1792 continue; 1782 continue;
1793 1783
1794 /* More than offslab_limit objects will cause problems */ 1784 if (flags & CFLGS_OFF_SLAB) {
1795 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit) 1785 /*
1796 break; 1786 * Max number of objs-per-slab for caches which
1787 * use off-slab slabs. Needed to avoid a possible
1788 * looping condition in cache_grow().
1789 */
1790 offslab_limit = size - sizeof(struct slab);
1791 offslab_limit /= sizeof(kmem_bufctl_t);
1792
1793 if (num > offslab_limit)
1794 break;
1795 }
1797 1796
1798 /* Found something acceptable - save it away */ 1797 /* Found something acceptable - save it away */
1799 cachep->num = num; 1798 cachep->num = num;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63a8cb6..440a733fe2e9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1061loop_again: 1061loop_again:
1062 total_scanned = 0; 1062 total_scanned = 0;
1063 nr_reclaimed = 0; 1063 nr_reclaimed = 0;
1064 sc.may_writepage = !laptop_mode, 1064 sc.may_writepage = !laptop_mode;
1065 sc.nr_mapped = read_page_state(nr_mapped); 1065 sc.nr_mapped = read_page_state(nr_mapped);
1066 1066
1067 inc_page_state(pageoutrun); 1067 inc_page_state(pageoutrun);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ad1c7af65ec8..f5d47bf4f967 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -300,25 +300,20 @@ int br_add_bridge(const char *name)
300 rtnl_lock(); 300 rtnl_lock();
301 if (strchr(dev->name, '%')) { 301 if (strchr(dev->name, '%')) {
302 ret = dev_alloc_name(dev, dev->name); 302 ret = dev_alloc_name(dev, dev->name);
303 if (ret < 0) 303 if (ret < 0) {
304 goto err1; 304 free_netdev(dev);
305 goto out;
306 }
305 } 307 }
306 308
307 ret = register_netdevice(dev); 309 ret = register_netdevice(dev);
308 if (ret) 310 if (ret)
309 goto err2; 311 goto out;
310 312
311 ret = br_sysfs_addbr(dev); 313 ret = br_sysfs_addbr(dev);
312 if (ret) 314 if (ret)
313 goto err3; 315 unregister_netdevice(dev);
314 rtnl_unlock(); 316 out:
315 return 0;
316
317 err3:
318 unregister_netdev(dev);
319 err2:
320 free_netdev(dev);
321 err1:
322 rtnl_unlock(); 317 rtnl_unlock();
323 return ret; 318 return ret;
324} 319}
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 69b74a9a0fc3..7cef1d8ace27 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,6 +3,5 @@
3# 3#
4 4
5obj-y += eth.o 5obj-y += eth.o
6obj-$(CONFIG_SYSCTL) += sysctl_net_ether.o
7obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o 6obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
8obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o 7obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/sysctl_net_ether.c b/net/ethernet/sysctl_net_ether.c
deleted file mode 100644
index 66b39fc342d2..000000000000
--- a/net/ethernet/sysctl_net_ether.c
+++ /dev/null
@@ -1,14 +0,0 @@
1/* -*- linux-c -*-
2 * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem.
3 *
4 * Begun April 1, 1996, Mike Shaver.
5 * Added /proc/sys/net/ether directory entry (empty =) ). [MS]
6 */
7
8#include <linux/mm.h>
9#include <linux/sysctl.h>
10#include <linux/if_ether.h>
11
12ctl_table ether_table[] = {
13 {0}
14};
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index b72fa55dfb84..ba7c63ca5bb1 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -135,7 +135,8 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
135 135
136 /* Do additive increase */ 136 /* Do additive increase */
137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) { 137 if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
138 tp->snd_cwnd_cnt += ca->ai; 138 /* cwnd = cwnd + a(w) / cwnd */
139 tp->snd_cwnd_cnt += ca->ai + 1;
139 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 140 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
140 tp->snd_cwnd_cnt -= tp->snd_cwnd; 141 tp->snd_cwnd_cnt -= tp->snd_cwnd;
141 tp->snd_cwnd++; 142 tp->snd_cwnd++;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 743016baa048..f33c9dddaa12 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -642,7 +642,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
642 * eventually). The difference is that pulled data not copied, but 642 * eventually). The difference is that pulled data not copied, but
643 * immediately discarded. 643 * immediately discarded.
644 */ 644 */
645static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) 645static void __pskb_trim_head(struct sk_buff *skb, int len)
646{ 646{
647 int i, k, eat; 647 int i, k, eat;
648 648
@@ -667,7 +667,6 @@ static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
667 skb->tail = skb->data; 667 skb->tail = skb->data;
668 skb->data_len -= len; 668 skb->data_len -= len;
669 skb->len = skb->data_len; 669 skb->len = skb->data_len;
670 return skb->tail;
671} 670}
672 671
673int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 672int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
@@ -676,12 +675,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
676 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 675 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
677 return -ENOMEM; 676 return -ENOMEM;
678 677
679 if (len <= skb_headlen(skb)) { 678 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
679 if (unlikely(len < skb_headlen(skb)))
680 __skb_pull(skb, len); 680 __skb_pull(skb, len);
681 } else { 681 else
682 if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) 682 __pskb_trim_head(skb, len - skb_headlen(skb));
683 return -ENOMEM;
684 }
685 683
686 TCP_SKB_CB(skb)->seq += len; 684 TCP_SKB_CB(skb)->seq += len;
687 skb->ip_summed = CHECKSUM_HW; 685 skb->ip_summed = CHECKSUM_HW;
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 7029618f5719..a16528657b4c 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -884,7 +884,8 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now)
884 if (now) { 884 if (now) {
885 /* Send down empty frame to trigger speed change */ 885 /* Send down empty frame to trigger speed change */
886 skb = dev_alloc_skb(0); 886 skb = dev_alloc_skb(0);
887 irlap_queue_xmit(self, skb); 887 if (skb)
888 irlap_queue_xmit(self, skb);
888 } 889 }
889} 890}
890 891
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 55538f6b60ff..58a1b6b42ddd 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -37,14 +37,6 @@ struct ctl_table net_table[] = {
37 .mode = 0555, 37 .mode = 0555,
38 .child = core_table, 38 .child = core_table,
39 }, 39 },
40#ifdef CONFIG_NET
41 {
42 .ctl_name = NET_ETHER,
43 .procname = "ethernet",
44 .mode = 0555,
45 .child = ether_table,
46 },
47#endif
48#ifdef CONFIG_INET 40#ifdef CONFIG_INET
49 { 41 {
50 .ctl_name = NET_IPV4, 42 .ctl_name = NET_IPV4,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 21dad415b896..90b4cdc0c948 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4422,6 +4422,7 @@ void selinux_complete_init(void)
4422 4422
4423 /* Set up any superblocks initialized prior to the policy load. */ 4423 /* Set up any superblocks initialized prior to the policy load. */
4424 printk(KERN_INFO "SELinux: Setting up existing superblocks.\n"); 4424 printk(KERN_INFO "SELinux: Setting up existing superblocks.\n");
4425 spin_lock(&sb_lock);
4425 spin_lock(&sb_security_lock); 4426 spin_lock(&sb_security_lock);
4426next_sb: 4427next_sb:
4427 if (!list_empty(&superblock_security_head)) { 4428 if (!list_empty(&superblock_security_head)) {
@@ -4430,19 +4431,20 @@ next_sb:
4430 struct superblock_security_struct, 4431 struct superblock_security_struct,
4431 list); 4432 list);
4432 struct super_block *sb = sbsec->sb; 4433 struct super_block *sb = sbsec->sb;
4433 spin_lock(&sb_lock);
4434 sb->s_count++; 4434 sb->s_count++;
4435 spin_unlock(&sb_lock);
4436 spin_unlock(&sb_security_lock); 4435 spin_unlock(&sb_security_lock);
4436 spin_unlock(&sb_lock);
4437 down_read(&sb->s_umount); 4437 down_read(&sb->s_umount);
4438 if (sb->s_root) 4438 if (sb->s_root)
4439 superblock_doinit(sb, NULL); 4439 superblock_doinit(sb, NULL);
4440 drop_super(sb); 4440 drop_super(sb);
4441 spin_lock(&sb_lock);
4441 spin_lock(&sb_security_lock); 4442 spin_lock(&sb_security_lock);
4442 list_del_init(&sbsec->list); 4443 list_del_init(&sbsec->list);
4443 goto next_sb; 4444 goto next_sb;
4444 } 4445 }
4445 spin_unlock(&sb_security_lock); 4446 spin_unlock(&sb_security_lock);
4447 spin_unlock(&sb_lock);
4446} 4448}
4447 4449
4448/* SELinux requires early initialization in order to label 4450/* SELinux requires early initialization in order to label