aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-06-19 09:21:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-06-19 09:21:08 -0400
commit2b3d6e2f23362b71de173649002d915d14e07622 (patch)
tree6c1aab54bb16b4f0d06cd3b838e3e41d8488fb98
parent7d5513d58d072cf38cae9c886653aadac38ef4a9 (diff)
parent427abfa28afedffadfca9dd8b067eb6d36bac53f (diff)
Merge branch 'master'
-rw-r--r--Documentation/memory-barriers.txt348
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c8
-rw-r--r--arch/arm/mach-imx/irq.c2
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c5
-rw-r--r--arch/arm/mach-pxa/spitz.c1
-rw-r--r--arch/arm/mach-sa1100/neponset.c8
-rw-r--r--arch/arm/mach-versatile/core.c5
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c23
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/mm/hash_native_64.c4
-rw-r--r--arch/powerpc/platforms/cell/setup.c11
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/sparc/kernel/smp.c11
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c124
-rw-r--r--arch/sparc64/kernel/smp.c35
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/traps.c11
-rw-r--r--arch/x86_64/kernel/io_apic.c30
-rw-r--r--block/as-iosched.c13
-rw-r--r--block/cfq-iosched.c21
-rw-r--r--block/deadline-iosched.c13
-rw-r--r--block/elevator.c55
-rw-r--r--block/noop-iosched.c7
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/n_tty.c4
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/exec-osm.c72
-rw-r--r--drivers/message/i2o/iop.c4
-rw-r--r--drivers/net/e1000/e1000_ethtool.c5
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/sky2.c53
-rw-r--r--drivers/net/tg3.c144
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c31
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c18
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--fs/bio.c5
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/locks.c2
-rw-r--r--include/asm-arm/arch-pxa/ohci.h2
-rw-r--r--include/asm-powerpc/cputable.h2
-rw-r--r--include/asm-s390/futex.h15
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/posix-cpu-timers.c48
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/tcp_input.c4
63 files changed, 856 insertions, 409 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c61d8b876fdb..4710845dbac4 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -19,6 +19,7 @@ Contents:
19 - Control dependencies. 19 - Control dependencies.
20 - SMP barrier pairing. 20 - SMP barrier pairing.
21 - Examples of memory barrier sequences. 21 - Examples of memory barrier sequences.
22 - Read memory barriers vs load speculation.
22 23
23 (*) Explicit kernel barriers. 24 (*) Explicit kernel barriers.
24 25
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
248 we may get either of: 249 we may get either of:
249 250
250 STORE *A = X; Y = LOAD *A; 251 STORE *A = X; Y = LOAD *A;
251 STORE *A = Y; 252 STORE *A = Y = X;
252 253
253 254
254========================= 255=========================
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
344 345
345 (4) General memory barriers. 346 (4) General memory barriers.
346 347
347 A general memory barrier is a combination of both a read memory barrier 348 A general memory barrier gives a guarantee that all the LOAD and STORE
348 and a write memory barrier. It is a partial ordering over both loads and 349 operations specified before the barrier will appear to happen before all
349 stores. 350 the LOAD and STORE operations specified after the barrier with respect to
351 the other components of the system.
352
353 A general memory barrier is a partial ordering over both loads and stores.
350 354
351 General memory barriers imply both read and write memory barriers, and so 355 General memory barriers imply both read and write memory barriers, and so
352 can substitute for either. 356 can substitute for either.
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
546 =============== =============== 550 =============== ===============
547 a = 1; 551 a = 1;
548 <write barrier> 552 <write barrier>
549 b = 2; x = a; 553 b = 2; x = b;
550 <read barrier> 554 <read barrier>
551 y = b; 555 y = a;
552 556
553Or: 557Or:
554 558
@@ -563,6 +567,18 @@ Or:
563Basically, the read barrier always has to be there, even though it can be of 567Basically, the read barrier always has to be there, even though it can be of
564the "weaker" type. 568the "weaker" type.
565 569
570[!] Note that the stores before the write barrier would normally be expected to
571match the loads after the read barrier or data dependency barrier, and vice
572versa:
573
574 CPU 1 CPU 2
575 =============== ===============
576 a = 1; }---- --->{ v = c
577 b = 2; } \ / { w = d
578 <write barrier> \ <read barrier>
579 c = 3; } / \ { x = a;
580 d = 4; }---- --->{ y = b;
581
566 582
567EXAMPLES OF MEMORY BARRIER SEQUENCES 583EXAMPLES OF MEMORY BARRIER SEQUENCES
568------------------------------------ 584------------------------------------
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
600 | | +------+ 616 | | +------+
601 +-------+ : : 617 +-------+ : :
602 | 618 |
603 | Sequence in which stores committed to memory system 619 | Sequence in which stores are committed to the
604 | by CPU 1 620 | memory system by CPU 1
605 V 621 V
606 622
607 623
@@ -683,14 +699,12 @@ then the following will occur:
683 | : : | | 699 | : : | |
684 | : : | CPU 2 | 700 | : : | CPU 2 |
685 | +-------+ | | 701 | +-------+ | |
686 \ | X->9 |------>| | 702 | | X->9 |------>| |
687 \ +-------+ | | 703 | +-------+ | |
688 ----->| B->2 | | | 704 Makes sure all effects ---> \ ddddddddddddddddd | |
689 +-------+ | | 705 prior to the store of C \ +-------+ | |
690 Makes sure all effects ---> ddddddddddddddddd | | 706 are perceptible to ----->| B->2 |------>| |
691 prior to the store of C +-------+ | | 707 subsequent loads +-------+ | |
692 are perceptible to | B->2 |------>| |
693 successive loads +-------+ | |
694 : : +-------+ 708 : : +-------+
695 709
696 710
@@ -699,73 +713,239 @@ following sequence of events:
699 713
700 CPU 1 CPU 2 714 CPU 1 CPU 2
701 ======================= ======================= 715 ======================= =======================
716 { A = 0, B = 9 }
702 STORE A=1 717 STORE A=1
703 STORE B=2
704 STORE C=3
705 <write barrier> 718 <write barrier>
706 STORE D=4 719 STORE B=2
707 STORE E=5
708 LOAD A
709 LOAD B 720 LOAD B
710 LOAD C 721 LOAD A
711 LOAD D
712 LOAD E
713 722
714Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in 723Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
715some effectively random order, despite the write barrier issued by CPU 1: 724some effectively random order, despite the write barrier issued by CPU 1:
716 725
717 +-------+ : : 726 +-------+ : : : :
718 | | +------+ 727 | | +------+ +-------+
719 | |------>| C=3 | } 728 | |------>| A=1 |------ --->| A->0 |
720 | | : +------+ } 729 | | +------+ \ +-------+
721 | | : | A=1 | } 730 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
722 | | : +------+ } 731 | | +------+ | +-------+
723 | CPU 1 | : | B=2 | }--- 732 | |------>| B=2 |--- | : :
724 | | +------+ } \ 733 | | +------+ \ | : : +-------+
725 | | wwwwwwwwwwwww} \ 734 +-------+ : : \ | +-------+ | |
726 | | +------+ } \ : : +-------+ 735 ---------->| B->2 |------>| |
727 | | : | E=5 | } \ +-------+ | | 736 | +-------+ | CPU 2 |
728 | | : +------+ } \ { | C->3 |------>| | 737 | | A->0 |------>| |
729 | |------>| D=4 | } \ { +-------+ : | | 738 | +-------+ | |
730 | | +------+ \ { | E->5 | : | | 739 | : : +-------+
731 +-------+ : : \ { +-------+ : | | 740 \ : :
732 Transfer -->{ | A->1 | : | CPU 2 | 741 \ +-------+
733 from CPU 1 { +-------+ : | | 742 ---->| A->1 |
734 to CPU 2 { | D->4 | : | | 743 +-------+
735 { +-------+ : | | 744 : :
736 { | B->2 |------>| |
737 +-------+ | |
738 : : +-------+
739
740
741If, however, a read barrier were to be placed between the load of C and the
742load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
743perceived correctly by CPU 2.
744 745
745 +-------+ : : 746
746 | | +------+ 747If, however, a read barrier were to be placed between the load of E and the
747 | |------>| C=3 | } 748load of A on CPU 2:
748 | | : +------+ } 749
749 | | : | A=1 | }--- 750 CPU 1 CPU 2
750 | | : +------+ } \ 751 ======================= =======================
751 | CPU 1 | : | B=2 | } \ 752 { A = 0, B = 9 }
752 | | +------+ \ 753 STORE A=1
753 | | wwwwwwwwwwwwwwww \ 754 <write barrier>
754 | | +------+ \ : : +-------+ 755 STORE B=2
755 | | : | E=5 | } \ +-------+ | | 756 LOAD B
756 | | : +------+ }--- \ { | C->3 |------>| | 757 <read barrier>
757 | |------>| D=4 | } \ \ { +-------+ : | | 758 LOAD A
758 | | +------+ \ -->{ | B->2 | : | | 759
759 +-------+ : : \ { +-------+ : | | 760then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
760 \ { | A->1 | : | CPU 2 | 7612:
761 \ +-------+ | | 762
762 At this point the read ----> \ rrrrrrrrrrrrrrrrr | | 763 +-------+ : : : :
763 barrier causes all effects \ +-------+ | | 764 | | +------+ +-------+
764 prior to the storage of C \ { | E->5 | : | | 765 | |------>| A=1 |------ --->| A->0 |
765 to be perceptible to CPU 2 -->{ +-------+ : | | 766 | | +------+ \ +-------+
766 { | D->4 |------>| | 767 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
767 +-------+ | | 768 | | +------+ | +-------+
768 : : +-------+ 769 | |------>| B=2 |--- | : :
770 | | +------+ \ | : : +-------+
771 +-------+ : : \ | +-------+ | |
772 ---------->| B->2 |------>| |
773 | +-------+ | CPU 2 |
774 | : : | |
775 | : : | |
776 At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
777 barrier causes all effects \ +-------+ | |
778 prior to the storage of B ---->| A->1 |------>| |
779 to be perceptible to CPU 2 +-------+ | |
780 : : +-------+
781
782
783To illustrate this more completely, consider what could happen if the code
784contained a load of A either side of the read barrier:
785
786 CPU 1 CPU 2
787 ======================= =======================
788 { A = 0, B = 9 }
789 STORE A=1
790 <write barrier>
791 STORE B=2
792 LOAD B
793 LOAD A [first load of A]
794 <read barrier>
795 LOAD A [second load of A]
796
797Even though the two loads of A both occur after the load of B, they may both
798come up with different values:
799
800 +-------+ : : : :
801 | | +------+ +-------+
802 | |------>| A=1 |------ --->| A->0 |
803 | | +------+ \ +-------+
804 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
805 | | +------+ | +-------+
806 | |------>| B=2 |--- | : :
807 | | +------+ \ | : : +-------+
808 +-------+ : : \ | +-------+ | |
809 ---------->| B->2 |------>| |
810 | +-------+ | CPU 2 |
811 | : : | |
812 | : : | |
813 | +-------+ | |
814 | | A->0 |------>| 1st |
815 | +-------+ | |
816 At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
817 barrier causes all effects \ +-------+ | |
818 prior to the storage of B ---->| A->1 |------>| 2nd |
819 to be perceptible to CPU 2 +-------+ | |
820 : : +-------+
821
822
823But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
824before the read barrier completes anyway:
825
826 +-------+ : : : :
827 | | +------+ +-------+
828 | |------>| A=1 |------ --->| A->0 |
829 | | +------+ \ +-------+
830 | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
831 | | +------+ | +-------+
832 | |------>| B=2 |--- | : :
833 | | +------+ \ | : : +-------+
834 +-------+ : : \ | +-------+ | |
835 ---------->| B->2 |------>| |
836 | +-------+ | CPU 2 |
837 | : : | |
838 \ : : | |
839 \ +-------+ | |
840 ---->| A->1 |------>| 1st |
841 +-------+ | |
842 rrrrrrrrrrrrrrrrr | |
843 +-------+ | |
844 | A->1 |------>| 2nd |
845 +-------+ | |
846 : : +-------+
847
848
849The guarantee is that the second load will always come up with A == 1 if the
850load of B came up with B == 2. No such guarantee exists for the first load of
851A; that may come up with either A == 0 or A == 1.
852
853
854READ MEMORY BARRIERS VS LOAD SPECULATION
855----------------------------------------
856
857Many CPUs speculate with loads: that is they see that they will need to load an
858item from memory, and they find a time where they're not using the bus for any
859other loads, and so do the load in advance - even though they haven't actually
860got to that point in the instruction execution flow yet. This permits the
861actual load instruction to potentially complete immediately because the CPU
862already has the value to hand.
863
864It may turn out that the CPU didn't actually need the value - perhaps because a
865branch circumvented the load - in which case it can discard the value or just
866cache it for later use.
867
868Consider:
869
870 CPU 1 CPU 2
871 ======================= =======================
872 LOAD B
873 DIVIDE } Divide instructions generally
874 DIVIDE } take a long time to perform
875 LOAD A
876
877Which might appear as this:
878
879 : : +-------+
880 +-------+ | |
881 --->| B->2 |------>| |
882 +-------+ | CPU 2 |
883 : :DIVIDE | |
884 +-------+ | |
885 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
886 division speculates on the +-------+ ~ | |
887 LOAD of A : : ~ | |
888 : :DIVIDE | |
889 : : ~ | |
890 Once the divisions are complete --> : : ~-->| |
891 the CPU can then perform the : : | |
892 LOAD with immediate effect : : +-------+
893
894
895Placing a read barrier or a data dependency barrier just before the second
896load:
897
898 CPU 1 CPU 2
899 ======================= =======================
900 LOAD B
901 DIVIDE
902 DIVIDE
903 <read barrier>
904 LOAD A
905
906will force any value speculatively obtained to be reconsidered to an extent
907dependent on the type of barrier used. If there was no change made to the
908speculated memory location, then the speculated value will just be used:
909
910 : : +-------+
911 +-------+ | |
912 --->| B->2 |------>| |
913 +-------+ | CPU 2 |
914 : :DIVIDE | |
915 +-------+ | |
916 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
917 division speculates on the +-------+ ~ | |
918 LOAD of A : : ~ | |
919 : :DIVIDE | |
920 : : ~ | |
921 : : ~ | |
922 rrrrrrrrrrrrrrrr~ | |
923 : : ~ | |
924 : : ~-->| |
925 : : | |
926 : : +-------+
927
928
929but if there was an update or an invalidation from another CPU pending, then
930the speculation will be cancelled and the value reloaded:
931
932 : : +-------+
933 +-------+ | |
934 --->| B->2 |------>| |
935 +-------+ | CPU 2 |
936 : :DIVIDE | |
937 +-------+ | |
938 The CPU being busy doing a ---> --->| A->0 |~~~~ | |
939 division speculates on the +-------+ ~ | |
940 LOAD of A : : ~ | |
941 : :DIVIDE | |
942 : : ~ | |
943 : : ~ | |
944 rrrrrrrrrrrrrrrrr | |
945 +-------+ | |
946 The speculation is discarded ---> --->| A->1 |------>| |
947 and an updated value is +-------+ | |
948 retrieved : : +-------+
769 949
770 950
771======================== 951========================
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
901=============================== 1081===============================
902 1082
903Some of the other functions in the linux kernel imply memory barriers, amongst 1083Some of the other functions in the linux kernel imply memory barriers, amongst
904which are locking, scheduling and memory allocation functions. 1084which are locking and scheduling functions.
905 1085
906This specification is a _minimum_ guarantee; any particular architecture may 1086This specification is a _minimum_ guarantee; any particular architecture may
907provide more substantial guarantees, but these may not be relied upon outside 1087provide more substantial guarantees, but these may not be relied upon outside
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
966 barriers is that the effects instructions outside of a critical section may 1146 barriers is that the effects instructions outside of a critical section may
967 seep into the inside of the critical section. 1147 seep into the inside of the critical section.
968 1148
1149A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
1150because it is possible for an access preceding the LOCK to happen after the
1151LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
1152two accesses can themselves then cross:
1153
1154 *A = a;
1155 LOCK
1156 UNLOCK
1157 *B = b;
1158
1159may occur as:
1160
1161 LOCK, STORE *B, STORE *A, UNLOCK
1162
969Locks and semaphores may not provide any guarantee of ordering on UP compiled 1163Locks and semaphores may not provide any guarantee of ordering on UP compiled
970systems, and so cannot be counted on in such a situation to actually achieve 1164systems, and so cannot be counted on in such a situation to actually achieve
971anything at all - especially with respect to I/O accesses - unless combined 1165anything at all - especially with respect to I/O accesses - unless combined
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
1016 1210
1017 (*) schedule() and similar imply full memory barriers. 1211 (*) schedule() and similar imply full memory barriers.
1018 1212
1019 (*) Memory allocation and release functions imply full memory barriers.
1020
1021 1213
1022================================= 1214=================================
1023INTER-CPU LOCKING BARRIER EFFECTS 1215INTER-CPU LOCKING BARRIER EFFECTS
diff --git a/Makefile b/Makefile
index a3a7baad8555..1700d3f6ea22 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 17 3SUBLEVEL = 17
4EXTRAVERSION =-rc6 4EXTRAVERSION =
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8290b69da202..213c7850d5fb 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -453,7 +453,7 @@ config ALPHA_IRONGATE
453 453
454config GENERIC_HWEIGHT 454config GENERIC_HWEIGHT
455 bool 455 bool
456 default y if !ALPHA_EV6 && !ALPHA_EV67 456 default y if !ALPHA_EV67
457 457
458config ALPHA_AVANTI 458config ALPHA_AVANTI
459 bool 459 bool
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 9be01b0c3f48..e24566b88a78 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
111 } 111 }
112} 112}
113 113
114static unsigned char ts72xx_rtc_readb(unsigned long addr) 114static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
115{ 115{
116 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); 116 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
117 return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE); 117 return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
118} 118}
119 119
120static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr) 120static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
121{ 121{
122 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE); 122 __raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
123 __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE); 123 __raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
124} 124}
125 125
126static struct m48t86_ops ts72xx_rtc_ops = { 126static struct m48t86_ops ts72xx_rtc_ops = {
127 .readb = ts72xx_rtc_readb, 127 .readbyte = ts72xx_rtc_readbyte,
128 .writeb = ts72xx_rtc_writeb, 128 .writebyte = ts72xx_rtc_writebyte,
129}; 129};
130 130
131static struct platform_device ts72xx_rtc_device = { 131static struct platform_device ts72xx_rtc_device = {
diff --git a/arch/arm/mach-imx/irq.c b/arch/arm/mach-imx/irq.c
index eeb8a6d4a399..a5de5f1da9f2 100644
--- a/arch/arm/mach-imx/irq.c
+++ b/arch/arm/mach-imx/irq.c
@@ -127,7 +127,7 @@ static void
127imx_gpio_ack_irq(unsigned int irq) 127imx_gpio_ack_irq(unsigned int irq)
128{ 128{
129 DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq); 129 DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
130 ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32); 130 ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
131} 131}
132 132
133static void 133static void
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a0724f2b24ce..9f55f5ae1044 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
232 for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) { 232 for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
233 if (i == 11) 233 if (i == 11)
234 i = 22; 234 i = 22;
235 if (i == IRQ_CP_CPPLDINT)
236 i++;
237 if (i == 29) 235 if (i == 29)
238 break; 236 break;
239 set_irq_chip(i, &pic_chip); 237 set_irq_chip(i, &pic_chip);
@@ -259,8 +257,7 @@ static void __init intcp_init_irq(void)
259 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 257 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
260 } 258 }
261 259
262 set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq); 260 set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
263 pic_unmask_irq(IRQ_CP_CPPLDINT);
264} 261}
265 262
266/* 263/*
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 19b372df544a..44bcb8097c7a 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev)
371static struct pxaohci_platform_data spitz_ohci_platform_data = { 371static struct pxaohci_platform_data spitz_ohci_platform_data = {
372 .port_mode = PMM_NPS_MODE, 372 .port_mode = PMM_NPS_MODE,
373 .init = spitz_ohci_init, 373 .init = spitz_ohci_init,
374 .power_budget = 150,
374}; 375};
375 376
376 377
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 9e02bc3712a0..af6d2775cf82 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg
59 if (irr & (IRR_ETHERNET | IRR_USAR)) { 59 if (irr & (IRR_ETHERNET | IRR_USAR)) {
60 desc->chip->mask(irq); 60 desc->chip->mask(irq);
61 61
62 /*
63 * Ack the interrupt now to prevent re-entering
64 * this neponset handler. Again, this is safe
65 * since we'll check the IRR register prior to
66 * leaving.
67 */
68 desc->chip->ack(irq);
69
62 if (irr & IRR_ETHERNET) { 70 if (irr & IRR_ETHERNET) {
63 d = irq_desc + IRQ_NEPONSET_SMC9196; 71 d = irq_desc + IRQ_NEPONSET_SMC9196;
64 desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs); 72 desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 799697d32dec..cebd48a3dae4 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -112,10 +112,9 @@ void __init versatile_init_irq(void)
112{ 112{
113 unsigned int i; 113 unsigned int i;
114 114
115 vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31)); 115 vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0);
116 116
117 set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq); 117 set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
118 enable_irq(IRQ_VICSOURCE31);
119 118
120 /* Do second interrupt controller */ 119 /* Do second interrupt controller */
121 writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); 120 writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 2e3b643a4dc4..1649a175a206 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -5,17 +5,34 @@
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/pci.h> 7#include <linux/pci.h>
8#include <linux/acpi.h>
9
8#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
9#include <asm/acpi.h> 11#include <asm/acpi.h>
10#include <asm/apic.h> 12#include <asm/apic.h>
11 13
14#ifdef CONFIG_ACPI
15
16static int nvidia_hpet_detected __initdata;
17
18static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
19{
20 nvidia_hpet_detected = 1;
21 return 0;
22}
23#endif
24
12static int __init check_bridge(int vendor, int device) 25static int __init check_bridge(int vendor, int device)
13{ 26{
14#ifdef CONFIG_ACPI 27#ifdef CONFIG_ACPI
15 /* According to Nvidia all timer overrides are bogus. Just ignore 28 /* According to Nvidia all timer overrides are bogus unless HPET
16 them all. */ 29 is enabled. */
17 if (vendor == PCI_VENDOR_ID_NVIDIA) { 30 if (vendor == PCI_VENDOR_ID_NVIDIA) {
18 acpi_skip_timer_override = 1; 31 nvidia_hpet_detected = 0;
32 acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
33 if (nvidia_hpet_detected == 0) {
34 acpi_skip_timer_override = 1;
35 }
19 } 36 }
20#endif 37#endif
21 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { 38 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 846e1639ef7c..dd6b0e3386ce 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
1547 if (efi_enabled) 1547 if (efi_enabled)
1548 efi_map_memmap(); 1548 efi_map_memmap();
1549 1549
1550#ifdef CONFIG_X86_IO_APIC
1551 check_acpi_pci(); /* Checks more than just ACPI actually */
1552#endif
1553
1554#ifdef CONFIG_ACPI 1550#ifdef CONFIG_ACPI
1555 /* 1551 /*
1556 * Parse the ACPI tables for possible boot-time SMP configuration. 1552 * Parse the ACPI tables for possible boot-time SMP configuration.
1557 */ 1553 */
1558 acpi_boot_table_init(); 1554 acpi_boot_table_init();
1555#endif
1556
1557#ifdef CONFIG_X86_IO_APIC
1558 check_acpi_pci(); /* Checks more than just ACPI actually */
1559#endif
1560
1561#ifdef CONFIG_ACPI
1559 acpi_boot_init(); 1562 acpi_boot_init();
1560 1563
1561#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) 1564#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41e9ab40cd54..f70bd090dacd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
822 /* try calling the ibm,client-architecture-support method */ 822 /* try calling the ibm,client-architecture-support method */
823 if (call_prom_ret("call-method", 3, 2, &ret, 823 if (call_prom_ret("call-method", 3, 2, &ret,
824 ADDR("ibm,client-architecture-support"), 824 ADDR("ibm,client-architecture-support"),
825 root,
825 ADDR(ibm_architecture_vec)) == 0) { 826 ADDR(ibm_architecture_vec)) == 0) {
826 /* the call exists... */ 827 /* the call exists... */
827 if (ret) 828 if (ret)
@@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
1622 if (strstr(p, RELOC("Power Macintosh")) || 1623 if (strstr(p, RELOC("Power Macintosh")) ||
1623 strstr(p, RELOC("MacRISC"))) 1624 strstr(p, RELOC("MacRISC")))
1624 return PLATFORM_POWERMAC; 1625 return PLATFORM_POWERMAC;
1626#ifdef CONFIG_PPC64
1627 /* We must make sure we don't detect the IBM Cell
1628 * blades as pSeries due to some firmware issues,
1629 * so we do it here.
1630 */
1631 if (strstr(p, RELOC("IBM,CBEA")) ||
1632 strstr(p, RELOC("IBM,CPBW-1.0")))
1633 return PLATFORM_GENERIC;
1634#endif /* CONFIG_PPC64 */
1625 i += sl + 1; 1635 i += sl + 1;
1626 } 1636 }
1627 } 1637 }
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 01e3c08cb550..8fdeca2d4597 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
803 if (__get_user(cmcp, &ucp->uc_regs)) 803 if (__get_user(cmcp, &ucp->uc_regs))
804 return -EFAULT; 804 return -EFAULT;
805 mcp = (struct mcontext __user *)(u64)cmcp; 805 mcp = (struct mcontext __user *)(u64)cmcp;
806 /* no need to check access_ok(mcp), since mcp < 4GB */
806 } 807 }
807#else 808#else
808 if (__get_user(mcp, &ucp->uc_regs)) 809 if (__get_user(mcp, &ucp->uc_regs))
809 return -EFAULT; 810 return -EFAULT;
811 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
812 return -EFAULT;
810#endif 813#endif
811 restore_sigmask(&set); 814 restore_sigmask(&set);
812 if (restore_user_regs(regs, mcp, sig)) 815 if (restore_user_regs(regs, mcp, sig))
@@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
908{ 911{
909 struct sig_dbg_op op; 912 struct sig_dbg_op op;
910 int i; 913 int i;
914 unsigned char tmp;
911 unsigned long new_msr = regs->msr; 915 unsigned long new_msr = regs->msr;
912#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 916#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
913 unsigned long new_dbcr0 = current->thread.dbcr0; 917 unsigned long new_dbcr0 = current->thread.dbcr0;
914#endif 918#endif
915 919
916 for (i=0; i<ndbg; i++) { 920 for (i=0; i<ndbg; i++) {
917 if (__copy_from_user(&op, dbg, sizeof(op))) 921 if (copy_from_user(&op, dbg + i, sizeof(op)))
918 return -EFAULT; 922 return -EFAULT;
919 switch (op.dbg_type) { 923 switch (op.dbg_type) {
920 case SIG_DBG_SINGLE_STEPPING: 924 case SIG_DBG_SINGLE_STEPPING:
@@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
959 current->thread.dbcr0 = new_dbcr0; 963 current->thread.dbcr0 = new_dbcr0;
960#endif 964#endif
961 965
966 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
967 || __get_user(tmp, (u8 __user *) ctx)
968 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
969 return -EFAULT;
970
962 /* 971 /*
963 * If we get a fault copying the context into the kernel's 972 * If we get a fault copying the context into the kernel's
964 * image of the user's registers, we can't just return -EFAULT 973 * image of the user's registers, we can't just return -EFAULT
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27f65b95184d..c2db642f4cdd 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
182 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 182 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
183 if (err) 183 if (err)
184 return err; 184 return err;
185 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
186 return -EFAULT;
185 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 187 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
186 if (v_regs != 0 && (msr & MSR_VEC) != 0) 188 if (v_regs != 0 && (msr & MSR_VEC) != 0)
187 err |= __copy_from_user(current->thread.vr, v_regs, 189 err |= __copy_from_user(current->thread.vr, v_regs,
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 33654d1b1b43..994856e55b7c 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
52 default: 52 default:
53 penc = mmu_psize_defs[psize].penc; 53 penc = mmu_psize_defs[psize].penc;
54 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 54 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
55 va |= (0x7f >> (8 - penc)) << 12; 55 va |= penc << 12;
56 asm volatile("tlbie %0,1" : : "r" (va) : "memory"); 56 asm volatile("tlbie %0,1" : : "r" (va) : "memory");
57 break; 57 break;
58 } 58 }
@@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
74 default: 74 default:
75 penc = mmu_psize_defs[psize].penc; 75 penc = mmu_psize_defs[psize].penc;
76 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 76 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
77 va |= (0x7f >> (8 - penc)) << 12; 77 va |= penc << 12;
78 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 78 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
79 : : "r"(va) : "memory"); 79 : : "r"(va) : "memory");
80 break; 80 break;
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6574b22b3cf3..fd3e5609e3e0 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
125 125
126static int __init cell_probe(void) 126static int __init cell_probe(void)
127{ 127{
128 /* XXX This is temporary, the Cell maintainer will come up with
129 * more appropriate detection logic
130 */
131 unsigned long root = of_get_flat_dt_root(); 128 unsigned long root = of_get_flat_dt_root();
132 if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
133 return 0;
134 129
135 return 1; 130 if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
131 of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
132 return 1;
133
134 return 0;
136} 135}
137 136
138/* 137/*
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5f79f01c44f2..3ba87835757e 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
389 389
390static int __init pSeries_probe(void) 390static int __init pSeries_probe(void)
391{ 391{
392 unsigned long root = of_get_flat_dt_root();
392 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(), 393 char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
393 "device_type", NULL); 394 "device_type", NULL);
394 if (dtype == NULL) 395 if (dtype == NULL)
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
396 if (strcmp(dtype, "chrp")) 397 if (strcmp(dtype, "chrp"))
397 return 0; 398 return 0;
398 399
400 /* Cell blades firmware claims to be chrp while it's not. Until this
401 * is fixed, we need to avoid those here.
402 */
403 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
404 of_flat_dt_is_compatible(root, "IBM,CBEA"))
405 return 0;
406
399 DBG("pSeries detected, looking for LPAR capability...\n"); 407 DBG("pSeries detected, looking for LPAR capability...\n");
400 408
401 /* Now try to figure out if we are running on LPAR */ 409 /* Now try to figure out if we are running on LPAR */
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index a93f5da6855d..40b42c88e6a7 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
69 "clock-frequency", 0); 69 "clock-frequency", 0);
70 cpu_data(id).prom_node = cpu_node; 70 cpu_data(id).prom_node = cpu_node;
71 cpu_data(id).mid = cpu_get_hwmid(cpu_node); 71 cpu_data(id).mid = cpu_get_hwmid(cpu_node);
72
73 /* this is required to tune the scheduler correctly */
74 /* is it possible to have CPUs with different cache sizes? */
75 if (id == boot_cpu_id) {
76 int cache_line,cache_nlines;
77 cache_line = 0x20;
78 cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
79 cache_nlines = 0x8000;
80 cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
81 max_cache_size = cache_line * cache_nlines;
82 }
72 if (cpu_data(id).mid < 0) 83 if (cpu_data(id).mid < 0)
73 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); 84 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
74} 85}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 2b7a1f316a93..0c0895202970 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
599 599
600/* SUN4V PCI configuration space accessors. */ 600/* SUN4V PCI configuration space accessors. */
601 601
602static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) 602struct pdev_entry {
603 struct pdev_entry *next;
604 u32 devhandle;
605 unsigned int bus;
606 unsigned int device;
607 unsigned int func;
608};
609
610#define PDEV_HTAB_SIZE 16
611#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
612static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
613
614static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
603{ 615{
604 if (bus == pbm->pci_first_busno) { 616 unsigned int val;
605 if (device == 0 && func == 0) 617
606 return 0; 618 val = (devhandle ^ (devhandle >> 4));
607 return 1; 619 val ^= bus;
620 val ^= device;
621 val ^= func;
622
623 return val & PDEV_HTAB_MASK;
624}
625
626static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
627{
628 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
629 struct pdev_entry **slot;
630
631 if (!p)
632 return -ENOMEM;
633
634 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
635 p->next = *slot;
636 *slot = p;
637
638 p->devhandle = devhandle;
639 p->bus = bus;
640 p->device = device;
641 p->func = func;
642
643 return 0;
644}
645
646/* Recursively descend into the OBP device tree, rooted at toplevel_node,
647 * looking for a PCI device matching bus and devfn.
648 */
649static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
650{
651 toplevel_node = prom_getchild(toplevel_node);
652
653 while (toplevel_node != 0) {
654 int ret = obp_find(pregs, toplevel_node, bus, devfn);
655
656 if (ret != 0)
657 return ret;
658
659 ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
660 sizeof(*pregs) * PROMREG_MAX);
661 if (ret == 0 || ret == -1)
662 goto next_sibling;
663
664 if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
665 ((pregs[0].phys_hi >> 8) & 0xff) == devfn)
666 break;
667
668 next_sibling:
669 toplevel_node = prom_getsibling(toplevel_node);
670 }
671
672 return toplevel_node;
673}
674
675static int pdev_htab_populate(struct pci_pbm_info *pbm)
676{
677 struct linux_prom_pci_registers pr[PROMREG_MAX];
678 u32 devhandle = pbm->devhandle;
679 unsigned int bus;
680
681 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
682 unsigned int devfn;
683
684 for (devfn = 0; devfn < 256; devfn++) {
685 unsigned int device = PCI_SLOT(devfn);
686 unsigned int func = PCI_FUNC(devfn);
687
688 if (obp_find(pr, pbm->prom_node, bus, devfn)) {
689 int err = pdev_htab_add(devhandle, bus,
690 device, func);
691 if (err)
692 return err;
693 }
694 }
695 }
696
697 return 0;
698}
699
700static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
701{
702 struct pdev_entry *p;
703
704 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
705 while (p) {
706 if (p->devhandle == devhandle &&
707 p->bus == bus &&
708 p->device == device &&
709 p->func == func)
710 break;
711
712 p = p->next;
608 } 713 }
609 714
715 return p;
716}
717
718static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
719{
610 if (bus < pbm->pci_first_busno || 720 if (bus < pbm->pci_first_busno ||
611 bus > pbm->pci_last_busno) 721 bus > pbm->pci_last_busno)
612 return 1; 722 return 1;
613 return 0; 723 return pdev_find(pbm->devhandle, bus, device, func) == NULL;
614} 724}
615 725
616static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 726static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
1063 1173
1064 pci_sun4v_get_bus_range(pbm); 1174 pci_sun4v_get_bus_range(pbm);
1065 pci_sun4v_iommu_init(pbm); 1175 pci_sun4v_iommu_init(pbm);
1176
1177 pdev_htab_populate(pbm);
1066} 1178}
1067 1179
1068void sun4v_pci_init(int node, char *model_name) 1180void sun4v_pci_init(int node, char *model_name)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 4e8cd79156e0..f03d52d0b88d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1287,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
1287 return 0; 1287 return 0;
1288} 1288}
1289 1289
1290static void __init smp_tune_scheduling(void)
1291{
1292 int instance, node;
1293 unsigned int def, smallest = ~0U;
1294
1295 def = ((tlb_type == hypervisor) ?
1296 (3 * 1024 * 1024) :
1297 (4 * 1024 * 1024));
1298
1299 instance = 0;
1300 while (!cpu_find_by_instance(instance, &node, NULL)) {
1301 unsigned int val;
1302
1303 val = prom_getintdefault(node, "ecache-size", def);
1304 if (val < smallest)
1305 smallest = val;
1306
1307 instance++;
1308 }
1309
1310 /* Any value less than 256K is nonsense. */
1311 if (smallest < (256U * 1024U))
1312 smallest = 256 * 1024;
1313
1314 max_cache_size = smallest;
1315
1316 if (smallest < 1U * 1024U * 1024U)
1317 printk(KERN_INFO "Using max_cache_size of %uKB\n",
1318 smallest / 1024U);
1319 else
1320 printk(KERN_INFO "Using max_cache_size of %uMB\n",
1321 smallest / 1024U / 1024U);
1322}
1323
1290/* Constrain the number of cpus to max_cpus. */ 1324/* Constrain the number of cpus to max_cpus. */
1291void __init smp_prepare_cpus(unsigned int max_cpus) 1325void __init smp_prepare_cpus(unsigned int max_cpus)
1292{ 1326{
@@ -1322,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1322 } 1356 }
1323 1357
1324 smp_store_cpu_info(boot_cpu_id); 1358 smp_store_cpu_info(boot_cpu_id);
1359 smp_tune_scheduling();
1325} 1360}
1326 1361
1327/* Set this up early so that things like the scheduler can init 1362/* Set this up early so that things like the scheduler can init
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 62d8a99271ea..38e569f786dd 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext);
297EXPORT_SYMBOL(svr4_setcontext); 297EXPORT_SYMBOL(svr4_setcontext);
298EXPORT_SYMBOL(compat_sys_ioctl); 298EXPORT_SYMBOL(compat_sys_ioctl);
299EXPORT_SYMBOL(sparc32_open); 299EXPORT_SYMBOL(sparc32_open);
300EXPORT_SYMBOL(sys_close);
301#endif 300#endif
302 301
303/* Special internal versions of library functions. */ 302/* Special internal versions of library functions. */
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 2793a5d82380..563db528e031 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
1797 }; 1797 };
1798} 1798}
1799 1799
1800static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) 1800extern void __show_regs(struct pt_regs * regs);
1801
1802static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1801{ 1803{
1802 int cnt; 1804 int cnt;
1803 1805
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
1830 pfx, 1832 pfx,
1831 ent->err_raddr, ent->err_size, ent->err_cpu); 1833 ent->err_raddr, ent->err_size, ent->err_cpu);
1832 1834
1835 __show_regs(regs);
1836
1833 if ((cnt = atomic_read(ocnt)) != 0) { 1837 if ((cnt = atomic_read(ocnt)) != 0) {
1834 atomic_set(ocnt, 0); 1838 atomic_set(ocnt, 0);
1835 wmb(); 1839 wmb();
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1862 1866
1863 put_cpu(); 1867 put_cpu();
1864 1868
1865 sun4v_log_error(&local_copy, cpu, 1869 sun4v_log_error(regs, &local_copy, cpu,
1866 KERN_ERR "RESUMABLE ERROR", 1870 KERN_ERR "RESUMABLE ERROR",
1867 &sun4v_resum_oflow_cnt); 1871 &sun4v_resum_oflow_cnt);
1868} 1872}
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1910 } 1914 }
1911#endif 1915#endif
1912 1916
1913 sun4v_log_error(&local_copy, cpu, 1917 sun4v_log_error(regs, &local_copy, cpu,
1914 KERN_EMERG "NON-RESUMABLE ERROR", 1918 KERN_EMERG "NON-RESUMABLE ERROR",
1915 &sun4v_nonresum_oflow_cnt); 1919 &sun4v_nonresum_oflow_cnt);
1916 1920
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2200void die_if_kernel(char *str, struct pt_regs *regs) 2204void die_if_kernel(char *str, struct pt_regs *regs)
2201{ 2205{
2202 static int die_counter; 2206 static int die_counter;
2203 extern void __show_regs(struct pt_regs * regs);
2204 extern void smp_report_regs(void); 2207 extern void smp_report_regs(void);
2205 int count = 0; 2208 int count = 0;
2206 2209
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0de3ea938830..9cc7031b7151 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
271#include <linux/pci_ids.h> 271#include <linux/pci_ids.h>
272#include <linux/pci.h> 272#include <linux/pci.h>
273 273
274
275#ifdef CONFIG_ACPI
276
277static int nvidia_hpet_detected __initdata;
278
279static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
280{
281 nvidia_hpet_detected = 1;
282 return 0;
283}
284#endif
285
274/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC 286/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
275 off. Check for an Nvidia or VIA PCI bridge and turn it off. 287 off. Check for an Nvidia or VIA PCI bridge and turn it off.
276 Use pci direct infrastructure because this runs before the PCI subsystem. 288 Use pci direct infrastructure because this runs before the PCI subsystem.
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
317 return; 329 return;
318 case PCI_VENDOR_ID_NVIDIA: 330 case PCI_VENDOR_ID_NVIDIA:
319#ifdef CONFIG_ACPI 331#ifdef CONFIG_ACPI
320 /* All timer overrides on Nvidia 332 /*
321 seem to be wrong. Skip them. */ 333 * All timer overrides on Nvidia are
322 acpi_skip_timer_override = 1; 334 * wrong unless HPET is enabled.
323 printk(KERN_INFO 335 */
324 "Nvidia board detected. Ignoring ACPI timer override.\n"); 336 nvidia_hpet_detected = 0;
337 acpi_table_parse(ACPI_HPET,
338 nvidia_hpet_check);
339 if (nvidia_hpet_detected == 0) {
340 acpi_skip_timer_override = 1;
341 printk(KERN_INFO "Nvidia board "
342 "detected. Ignoring ACPI "
343 "timer override.\n");
344 }
325#endif 345#endif
326 /* RED-PEN skip them on mptables too? */ 346 /* RED-PEN skip them on mptables too? */
327 return; 347 return;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index e25a5d79ab27..a7caf35ca0c2 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
1648 * initialize elevator private data (as_data), and alloc a arq for 1648 * initialize elevator private data (as_data), and alloc a arq for
1649 * each request on the free lists 1649 * each request on the free lists
1650 */ 1650 */
1651static int as_init_queue(request_queue_t *q, elevator_t *e) 1651static void *as_init_queue(request_queue_t *q, elevator_t *e)
1652{ 1652{
1653 struct as_data *ad; 1653 struct as_data *ad;
1654 int i; 1654 int i;
1655 1655
1656 if (!arq_pool) 1656 if (!arq_pool)
1657 return -ENOMEM; 1657 return NULL;
1658 1658
1659 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); 1659 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
1660 if (!ad) 1660 if (!ad)
1661 return -ENOMEM; 1661 return NULL;
1662 memset(ad, 0, sizeof(*ad)); 1662 memset(ad, 0, sizeof(*ad));
1663 1663
1664 ad->q = q; /* Identify what queue the data belongs to */ 1664 ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1667 GFP_KERNEL, q->node); 1667 GFP_KERNEL, q->node);
1668 if (!ad->hash) { 1668 if (!ad->hash) {
1669 kfree(ad); 1669 kfree(ad);
1670 return -ENOMEM; 1670 return NULL;
1671 } 1671 }
1672 1672
1673 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 1673 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1675 if (!ad->arq_pool) { 1675 if (!ad->arq_pool) {
1676 kfree(ad->hash); 1676 kfree(ad->hash);
1677 kfree(ad); 1677 kfree(ad);
1678 return -ENOMEM; 1678 return NULL;
1679 } 1679 }
1680 1680
1681 /* anticipatory scheduling helpers */ 1681 /* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
1696 ad->antic_expire = default_antic_expire; 1696 ad->antic_expire = default_antic_expire;
1697 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1697 ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
1698 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1698 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
1699 e->elevator_data = ad;
1700 1699
1701 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1700 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
1702 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1701 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
1703 if (ad->write_batch_count < 2) 1702 if (ad->write_batch_count < 2)
1704 ad->write_batch_count = 2; 1703 ad->write_batch_count = 2;
1705 1704
1706 return 0; 1705 return ad;
1707} 1706}
1708 1707
1709/* 1708/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8e9d84825e1c..052b17487625 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1323,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1323 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); 1323 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1324 1324
1325 if (cic) { 1325 if (cic) {
1326 RB_CLEAR(&cic->rb_node); 1326 memset(cic, 0, sizeof(*cic));
1327 cic->key = NULL; 1327 RB_CLEAR_COLOR(&cic->rb_node);
1328 cic->cfqq[ASYNC] = NULL;
1329 cic->cfqq[SYNC] = NULL;
1330 cic->last_end_request = jiffies; 1328 cic->last_end_request = jiffies;
1331 cic->ttime_total = 0; 1329 INIT_LIST_HEAD(&cic->queue_list);
1332 cic->ttime_samples = 0;
1333 cic->ttime_mean = 0;
1334 cic->dtor = cfq_free_io_context; 1330 cic->dtor = cfq_free_io_context;
1335 cic->exit = cfq_exit_io_context; 1331 cic->exit = cfq_exit_io_context;
1336 INIT_LIST_HEAD(&cic->queue_list);
1337 atomic_inc(&ioc_count); 1332 atomic_inc(&ioc_count);
1338 } 1333 }
1339 1334
@@ -2251,14 +2246,14 @@ static void cfq_exit_queue(elevator_t *e)
2251 kfree(cfqd); 2246 kfree(cfqd);
2252} 2247}
2253 2248
2254static int cfq_init_queue(request_queue_t *q, elevator_t *e) 2249static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
2255{ 2250{
2256 struct cfq_data *cfqd; 2251 struct cfq_data *cfqd;
2257 int i; 2252 int i;
2258 2253
2259 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); 2254 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
2260 if (!cfqd) 2255 if (!cfqd)
2261 return -ENOMEM; 2256 return NULL;
2262 2257
2263 memset(cfqd, 0, sizeof(*cfqd)); 2258 memset(cfqd, 0, sizeof(*cfqd));
2264 2259
@@ -2288,8 +2283,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2288 for (i = 0; i < CFQ_QHASH_ENTRIES; i++) 2283 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2289 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); 2284 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2290 2285
2291 e->elevator_data = cfqd;
2292
2293 cfqd->queue = q; 2286 cfqd->queue = q;
2294 2287
2295 cfqd->max_queued = q->nr_requests / 4; 2288 cfqd->max_queued = q->nr_requests / 4;
@@ -2316,14 +2309,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2316 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2309 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2317 cfqd->cfq_slice_idle = cfq_slice_idle; 2310 cfqd->cfq_slice_idle = cfq_slice_idle;
2318 2311
2319 return 0; 2312 return cfqd;
2320out_crqpool: 2313out_crqpool:
2321 kfree(cfqd->cfq_hash); 2314 kfree(cfqd->cfq_hash);
2322out_cfqhash: 2315out_cfqhash:
2323 kfree(cfqd->crq_hash); 2316 kfree(cfqd->crq_hash);
2324out_crqhash: 2317out_crqhash:
2325 kfree(cfqd); 2318 kfree(cfqd);
2326 return -ENOMEM; 2319 return NULL;
2327} 2320}
2328 2321
2329static void cfq_slab_kill(void) 2322static void cfq_slab_kill(void)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 399fa1e60e1f..3bd0415a9828 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
613 * initialize elevator private data (deadline_data), and alloc a drq for 613 * initialize elevator private data (deadline_data), and alloc a drq for
614 * each request on the free lists 614 * each request on the free lists
615 */ 615 */
616static int deadline_init_queue(request_queue_t *q, elevator_t *e) 616static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
617{ 617{
618 struct deadline_data *dd; 618 struct deadline_data *dd;
619 int i; 619 int i;
620 620
621 if (!drq_pool) 621 if (!drq_pool)
622 return -ENOMEM; 622 return NULL;
623 623
624 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 624 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
625 if (!dd) 625 if (!dd)
626 return -ENOMEM; 626 return NULL;
627 memset(dd, 0, sizeof(*dd)); 627 memset(dd, 0, sizeof(*dd));
628 628
629 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, 629 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
630 GFP_KERNEL, q->node); 630 GFP_KERNEL, q->node);
631 if (!dd->hash) { 631 if (!dd->hash) {
632 kfree(dd); 632 kfree(dd);
633 return -ENOMEM; 633 return NULL;
634 } 634 }
635 635
636 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 636 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
638 if (!dd->drq_pool) { 638 if (!dd->drq_pool) {
639 kfree(dd->hash); 639 kfree(dd->hash);
640 kfree(dd); 640 kfree(dd);
641 return -ENOMEM; 641 return NULL;
642 } 642 }
643 643
644 for (i = 0; i < DL_HASH_ENTRIES; i++) 644 for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
653 dd->writes_starved = writes_starved; 653 dd->writes_starved = writes_starved;
654 dd->front_merges = 1; 654 dd->front_merges = 1;
655 dd->fifo_batch = fifo_batch; 655 dd->fifo_batch = fifo_batch;
656 e->elevator_data = dd; 656 return dd;
657 return 0;
658} 657}
659 658
660static void deadline_put_request(request_queue_t *q, struct request *rq) 659static void deadline_put_request(request_queue_t *q, struct request *rq)
diff --git a/block/elevator.c b/block/elevator.c
index 8768a367fdde..a0afdd317cef 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
121 return e; 121 return e;
122} 122}
123 123
124static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) 124static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
125{ 125{
126 int ret = 0; 126 return eq->ops->elevator_init_fn(q, eq);
127}
127 128
129static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
130 void *data)
131{
128 q->elevator = eq; 132 q->elevator = eq;
129 133 eq->elevator_data = data;
130 if (eq->ops->elevator_init_fn)
131 ret = eq->ops->elevator_init_fn(q, eq);
132
133 return ret;
134} 134}
135 135
136static char chosen_elevator[16]; 136static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
181 struct elevator_type *e = NULL; 181 struct elevator_type *e = NULL;
182 struct elevator_queue *eq; 182 struct elevator_queue *eq;
183 int ret = 0; 183 int ret = 0;
184 void *data;
184 185
185 INIT_LIST_HEAD(&q->queue_head); 186 INIT_LIST_HEAD(&q->queue_head);
186 q->last_merge = NULL; 187 q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
202 if (!eq) 203 if (!eq)
203 return -ENOMEM; 204 return -ENOMEM;
204 205
205 ret = elevator_attach(q, eq); 206 data = elevator_init_queue(q, eq);
206 if (ret) 207 if (!data) {
207 kobject_put(&eq->kobj); 208 kobject_put(&eq->kobj);
209 return -ENOMEM;
210 }
208 211
212 elevator_attach(q, eq, data);
209 return ret; 213 return ret;
210} 214}
211 215
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
722 return error; 726 return error;
723} 727}
724 728
729static void __elv_unregister_queue(elevator_t *e)
730{
731 kobject_uevent(&e->kobj, KOBJ_REMOVE);
732 kobject_del(&e->kobj);
733}
734
725void elv_unregister_queue(struct request_queue *q) 735void elv_unregister_queue(struct request_queue *q)
726{ 736{
727 if (q) { 737 if (q)
728 elevator_t *e = q->elevator; 738 __elv_unregister_queue(q->elevator);
729 kobject_uevent(&e->kobj, KOBJ_REMOVE);
730 kobject_del(&e->kobj);
731 }
732} 739}
733 740
734int elv_register(struct elevator_type *e) 741int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
780static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) 787static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
781{ 788{
782 elevator_t *old_elevator, *e; 789 elevator_t *old_elevator, *e;
790 void *data;
783 791
784 /* 792 /*
785 * Allocate new elevator 793 * Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
788 if (!e) 796 if (!e)
789 return 0; 797 return 0;
790 798
799 data = elevator_init_queue(q, e);
800 if (!data) {
801 kobject_put(&e->kobj);
802 return 0;
803 }
804
791 /* 805 /*
792 * Turn on BYPASS and drain all requests w/ elevator private data 806 * Turn on BYPASS and drain all requests w/ elevator private data
793 */ 807 */
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
806 elv_drain_elevator(q); 820 elv_drain_elevator(q);
807 } 821 }
808 822
809 spin_unlock_irq(q->queue_lock);
810
811 /* 823 /*
812 * unregister old elevator data 824 * Remember old elevator.
813 */ 825 */
814 elv_unregister_queue(q);
815 old_elevator = q->elevator; 826 old_elevator = q->elevator;
816 827
817 /* 828 /*
818 * attach and start new elevator 829 * attach and start new elevator
819 */ 830 */
820 if (elevator_attach(q, e)) 831 elevator_attach(q, e, data);
821 goto fail; 832
833 spin_unlock_irq(q->queue_lock);
834
835 __elv_unregister_queue(old_elevator);
822 836
823 if (elv_register_queue(q)) 837 if (elv_register_queue(q))
824 goto fail_register; 838 goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
837 */ 851 */
838 elevator_exit(e); 852 elevator_exit(e);
839 e = NULL; 853 e = NULL;
840fail:
841 q->elevator = old_elevator; 854 q->elevator = old_elevator;
842 elv_register_queue(q); 855 elv_register_queue(q);
843 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 856 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index f370e4a7fe6d..56a7c620574f 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
65 return list_entry(rq->queuelist.next, struct request, queuelist); 65 return list_entry(rq->queuelist.next, struct request, queuelist);
66} 66}
67 67
68static int noop_init_queue(request_queue_t *q, elevator_t *e) 68static void *noop_init_queue(request_queue_t *q, elevator_t *e)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
72 nd = kmalloc(sizeof(*nd), GFP_KERNEL); 72 nd = kmalloc(sizeof(*nd), GFP_KERNEL);
73 if (!nd) 73 if (!nd)
74 return -ENOMEM; 74 return NULL;
75 INIT_LIST_HEAD(&nd->queue); 75 INIT_LIST_HEAD(&nd->queue);
76 e->elevator_data = nd; 76 return nd;
77 return 0;
78} 77}
79 78
80static void noop_exit_queue(elevator_t *e) 79static void noop_exit_queue(elevator_t *e)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index abbdb37a7f5f..f36db22ce1ae 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
577 return_VALUE(-EBUSY); 577 return_VALUE(-EBUSY);
578 } 578 }
579 579
580 WARN_ON(!performance);
581
580 pr->performance = performance; 582 pr->performance = performance;
581 583
582 if (acpi_processor_get_performance_info(pr)) { 584 if (acpi_processor_get_performance_info(pr)) {
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
609 return_VOID; 611 return_VOID;
610 } 612 }
611 613
612 kfree(pr->performance->states); 614 if (pr->performance)
615 kfree(pr->performance->states);
613 pr->performance = NULL; 616 pr->performance = NULL;
614 617
615 acpi_cpufreq_remove_file(pr); 618 acpi_cpufreq_remove_file(pr);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a59876a0bfa1..3170eaa25087 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
1009 if (fp->f_mode & FMODE_WRITE) { 1009 if (fp->f_mode & FMODE_WRITE) {
1010 ret = -EROFS; 1010 ret = -EROFS;
1011 if (cdrom_open_write(cdi)) 1011 if (cdrom_open_write(cdi))
1012 goto err; 1012 goto err_release;
1013 if (!CDROM_CAN(CDC_RAM)) 1013 if (!CDROM_CAN(CDC_RAM))
1014 goto err; 1014 goto err_release;
1015 ret = 0; 1015 ret = 0;
1016 cdi->media_written = 0; 1016 cdi->media_written = 0;
1017 } 1017 }
@@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
1026 not be mounting, but opening with O_NONBLOCK */ 1026 not be mounting, but opening with O_NONBLOCK */
1027 check_disk_change(ip->i_bdev); 1027 check_disk_change(ip->i_bdev);
1028 return 0; 1028 return 0;
1029err_release:
1030 cdi->ops->release(cdi);
1029err: 1031err:
1030 cdi->use_count--; 1032 cdi->use_count--;
1031 return ret; 1033 return ret;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f5b01c6d498e..fb919bfb2824 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
41obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 41obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
42obj-$(CONFIG_SX) += sx.o generic_serial.o 42obj-$(CONFIG_SX) += sx.o generic_serial.o
43obj-$(CONFIG_RIO) += rio/ generic_serial.o 43obj-$(CONFIG_RIO) += rio/ generic_serial.o
44obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
45obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o 44obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
46obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 45obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
46obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
47obj-$(CONFIG_RAW_DRIVER) += raw.o 47obj-$(CONFIG_RAW_DRIVER) += raw.o
48obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 48obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
49obj-$(CONFIG_MMTIMER) += mmtimer.o 49obj-$(CONFIG_MMTIMER) += mmtimer.o
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ede365d05387..b9371d5bf790 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1384,8 +1384,10 @@ do_it_again:
1384 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, 1384 * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
1385 * we won't get any more characters. 1385 * we won't get any more characters.
1386 */ 1386 */
1387 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) 1387 if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
1388 n_tty_set_room(tty);
1388 check_unthrottle(tty); 1389 check_unthrottle(tty);
1390 }
1389 1391
1390 if (b - buf >= minimum) 1392 if (b - buf >= minimum)
1391 break; 1393 break;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index f2a4d382ea19..3201de053943 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
831 return rc; 831 return rc;
832} 832}
833 833
834#ifdef CONFIG_PM
834/* 835/*
835 * spi module resume handler 836 * spi module resume handler
836 */ 837 */
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
846 847
847 return rc; 848 return rc;
848} 849}
850#endif
849 851
850/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 852/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
851/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 853/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5ea133c59afb..7bd4d85d0b42 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -55,6 +55,7 @@ struct i2o_exec_wait {
55 u32 m; /* message id */ 55 u32 m; /* message id */
56 struct i2o_message *msg; /* pointer to the reply message */ 56 struct i2o_message *msg; /* pointer to the reply message */
57 struct list_head list; /* node in global wait list */ 57 struct list_head list; /* node in global wait list */
58 spinlock_t lock; /* lock before modifying */
58}; 59};
59 60
60/* Work struct needed to handle LCT NOTIFY replies */ 61/* Work struct needed to handle LCT NOTIFY replies */
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
87 return NULL; 88 return NULL;
88 89
89 INIT_LIST_HEAD(&wait->list); 90 INIT_LIST_HEAD(&wait->list);
91 spin_lock_init(&wait->lock);
90 92
91 return wait; 93 return wait;
92}; 94};
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
125 DECLARE_WAIT_QUEUE_HEAD(wq); 127 DECLARE_WAIT_QUEUE_HEAD(wq);
126 struct i2o_exec_wait *wait; 128 struct i2o_exec_wait *wait;
127 static u32 tcntxt = 0x80000000; 129 static u32 tcntxt = 0x80000000;
130 long flags;
128 int rc = 0; 131 int rc = 0;
129 132
130 wait = i2o_exec_wait_alloc(); 133 wait = i2o_exec_wait_alloc();
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
146 wait->tcntxt = tcntxt++; 149 wait->tcntxt = tcntxt++;
147 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); 150 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
148 151
152 wait->wq = &wq;
153 /*
154 * we add elements to the head, because if a entry in the list will
155 * never be removed, we have to iterate over it every time
156 */
157 list_add(&wait->list, &i2o_exec_wait_list);
158
149 /* 159 /*
150 * Post the message to the controller. At some point later it will 160 * Post the message to the controller. At some point later it will
151 * return. If we time out before it returns then complete will be zero. 161 * return. If we time out before it returns then complete will be zero.
152 */ 162 */
153 i2o_msg_post(c, msg); 163 i2o_msg_post(c, msg);
154 164
155 if (!wait->complete) { 165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
156 wait->wq = &wq;
157 /*
158 * we add elements add the head, because if a entry in the list
159 * will never be removed, we have to iterate over it every time
160 */
161 list_add(&wait->list, &i2o_exec_wait_list);
162
163 wait_event_interruptible_timeout(wq, wait->complete,
164 timeout * HZ);
165 166
166 wait->wq = NULL; 167 spin_lock_irqsave(&wait->lock, flags);
167 }
168 168
169 barrier(); 169 wait->wq = NULL;
170 170
171 if (wait->complete) { 171 if (wait->complete)
172 rc = le32_to_cpu(wait->msg->body[0]) >> 24; 172 rc = le32_to_cpu(wait->msg->body[0]) >> 24;
173 i2o_flush_reply(c, wait->m); 173 else {
174 i2o_exec_wait_free(wait);
175 } else {
176 /* 174 /*
177 * We cannot remove it now. This is important. When it does 175 * We cannot remove it now. This is important. When it does
178 * terminate (which it must do if the controller has not 176 * terminate (which it must do if the controller has not
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
186 rc = -ETIMEDOUT; 184 rc = -ETIMEDOUT;
187 } 185 }
188 186
187 spin_unlock_irqrestore(&wait->lock, flags);
188
189 if (rc != -ETIMEDOUT) {
190 i2o_flush_reply(c, wait->m);
191 i2o_exec_wait_free(wait);
192 }
193
189 return rc; 194 return rc;
190}; 195};
191 196
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
213{ 218{
214 struct i2o_exec_wait *wait, *tmp; 219 struct i2o_exec_wait *wait, *tmp;
215 unsigned long flags; 220 unsigned long flags;
216 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
217 int rc = 1; 221 int rc = 1;
218 222
219 /* 223 /*
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
223 * already expired. Not much we can do about that except log it for 227 * already expired. Not much we can do about that except log it for
224 * debug purposes, increase timeout, and recompile. 228 * debug purposes, increase timeout, and recompile.
225 */ 229 */
226 spin_lock_irqsave(&lock, flags);
227 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { 230 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
228 if (wait->tcntxt == context) { 231 if (wait->tcntxt == context) {
229 list_del(&wait->list); 232 spin_lock_irqsave(&wait->lock, flags);
230 233
231 spin_unlock_irqrestore(&lock, flags); 234 list_del(&wait->list);
232 235
233 wait->m = m; 236 wait->m = m;
234 wait->msg = msg; 237 wait->msg = msg;
235 wait->complete = 1; 238 wait->complete = 1;
236 239
237 barrier(); 240 if (wait->wq)
238
239 if (wait->wq) {
240 wake_up_interruptible(wait->wq);
241 rc = 0; 241 rc = 0;
242 } else { 242 else
243 rc = -1;
244
245 spin_unlock_irqrestore(&wait->lock, flags);
246
247 if (rc) {
243 struct device *dev; 248 struct device *dev;
244 249
245 dev = &c->pdev->dev; 250 dev = &c->pdev->dev;
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
248 c->name); 253 c->name);
249 i2o_dma_free(dev, &wait->dma); 254 i2o_dma_free(dev, &wait->dma);
250 i2o_exec_wait_free(wait); 255 i2o_exec_wait_free(wait);
251 rc = -1; 256 } else
252 } 257 wake_up_interruptible(wait->wq);
253 258
254 return rc; 259 return rc;
255 } 260 }
256 } 261 }
257 262
258 spin_unlock_irqrestore(&lock, flags);
259
260 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, 263 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
261 context); 264 context);
262 265
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
322static int i2o_exec_probe(struct device *dev) 325static int i2o_exec_probe(struct device *dev)
323{ 326{
324 struct i2o_device *i2o_dev = to_i2o_device(dev); 327 struct i2o_device *i2o_dev = to_i2o_device(dev);
325 struct i2o_controller *c = i2o_dev->iop;
326 328
327 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); 329 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
328 330
329 c->exec = i2o_dev;
330
331 i2o_exec_lct_notify(c, c->lct->change_ind + 1);
332
333 device_create_file(dev, &dev_attr_vendor_id); 331 device_create_file(dev, &dev_attr_vendor_id);
334 device_create_file(dev, &dev_attr_product_id); 332 device_create_file(dev, &dev_attr_product_id);
335 333
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
523 struct device *dev; 521 struct device *dev;
524 struct i2o_message *msg; 522 struct i2o_message *msg;
525 523
524 down(&c->lct_lock);
525
526 dev = &c->pdev->dev; 526 dev = &c->pdev->dev;
527 527
528 if (i2o_dma_realloc 528 if (i2o_dma_realloc
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
545 545
546 i2o_msg_post(c, msg); 546 i2o_msg_post(c, msg);
547 547
548 up(&c->lct_lock);
549
548 return 0; 550 return 0;
549}; 551};
550 552
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 492167446936..febbdd4e0605 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
804 804
805 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
806 i2o_iop_reset(c); 806 i2o_iop_reset(c);
807
808 put_device(&c->device);
809} 807}
810 808
811/** 809/**
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
1059 1057
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); 1058 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc 1059 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4, 1060 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
1063 I2O_MSG_INPOOL_MIN)) { 1061 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c); 1062 kfree(c);
1065 return ERR_PTR(-ENOMEM); 1063 return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca35c6f4..d1c705b412c2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -870,13 +870,16 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
870 *data = 0; 870 *data = 0;
871 871
872 /* Hook up test interrupt handler just for this test */ 872 /* Hook up test interrupt handler just for this test */
873 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 873 if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
874 netdev)) {
874 shared_int = FALSE; 875 shared_int = FALSE;
875 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ, 876 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
876 netdev->name, netdev)){ 877 netdev->name, netdev)){
877 *data = 1; 878 *data = 1;
878 return -1; 879 return -1;
879 } 880 }
881 DPRINTK(PROBE,INFO, "testing %s interrupt\n",
882 (shared_int ? "shared" : "unshared"));
880 883
881 /* Disable all the interrupts */ 884 /* Disable all the interrupts */
882 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); 885 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed15fcaedaf9..97e71a4fe8eb 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3519,7 +3519,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3519 buffer_info = &rx_ring->buffer_info[i]; 3519 buffer_info = &rx_ring->buffer_info[i];
3520 3520
3521 while (rx_desc->status & E1000_RXD_STAT_DD) { 3521 while (rx_desc->status & E1000_RXD_STAT_DD) {
3522 struct sk_buff *skb, *next_skb; 3522 struct sk_buff *skb;
3523 u8 status; 3523 u8 status;
3524#ifdef CONFIG_E1000_NAPI 3524#ifdef CONFIG_E1000_NAPI
3525 if (*work_done >= work_to_do) 3525 if (*work_done >= work_to_do)
@@ -3537,8 +3537,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3537 prefetch(next_rxd); 3537 prefetch(next_rxd);
3538 3538
3539 next_buffer = &rx_ring->buffer_info[i]; 3539 next_buffer = &rx_ring->buffer_info[i];
3540 next_skb = next_buffer->skb;
3541 prefetch(next_skb->data - NET_IP_ALIGN);
3542 3540
3543 cleaned = TRUE; 3541 cleaned = TRUE;
3544 cleaned_count++; 3542 cleaned_count++;
@@ -3668,7 +3666,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3668 struct e1000_buffer *buffer_info, *next_buffer; 3666 struct e1000_buffer *buffer_info, *next_buffer;
3669 struct e1000_ps_page *ps_page; 3667 struct e1000_ps_page *ps_page;
3670 struct e1000_ps_page_dma *ps_page_dma; 3668 struct e1000_ps_page_dma *ps_page_dma;
3671 struct sk_buff *skb, *next_skb; 3669 struct sk_buff *skb;
3672 unsigned int i, j; 3670 unsigned int i, j;
3673 uint32_t length, staterr; 3671 uint32_t length, staterr;
3674 int cleaned_count = 0; 3672 int cleaned_count = 0;
@@ -3697,8 +3695,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3697 prefetch(next_rxd); 3695 prefetch(next_rxd);
3698 3696
3699 next_buffer = &rx_ring->buffer_info[i]; 3697 next_buffer = &rx_ring->buffer_info[i];
3700 next_skb = next_buffer->skb;
3701 prefetch(next_skb->data - NET_IP_ALIGN);
3702 3698
3703 cleaned = TRUE; 3699 cleaned = TRUE;
3704 cleaned_count++; 3700 cleaned_count++;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 959109609d85..fba1e4d4d83d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
187 return v; 187 return v;
188} 188}
189 189
190static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) 190static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
191{ 191{
192 u16 power_control; 192 u16 power_control;
193 u32 reg1; 193 u32 reg1;
194 int vaux; 194 int vaux;
195 int ret = 0;
196 195
197 pr_debug("sky2_set_power_state %d\n", state); 196 pr_debug("sky2_set_power_state %d\n", state);
198 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 197 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
275 break; 274 break;
276 default: 275 default:
277 printk(KERN_ERR PFX "Unknown power state %d\n", state); 276 printk(KERN_ERR PFX "Unknown power state %d\n", state);
278 ret = -1;
279 } 277 }
280 278
281 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); 279 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
282 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 280 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
283 return ret;
284} 281}
285 282
286static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) 283static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
@@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2164/* If idle then force a fake soft NAPI poll once a second 2161/* If idle then force a fake soft NAPI poll once a second
2165 * to work around cases where sharing an edge triggered interrupt. 2162 * to work around cases where sharing an edge triggered interrupt.
2166 */ 2163 */
2164static inline void sky2_idle_start(struct sky2_hw *hw)
2165{
2166 if (idle_timeout > 0)
2167 mod_timer(&hw->idle_timer,
2168 jiffies + msecs_to_jiffies(idle_timeout));
2169}
2170
2167static void sky2_idle(unsigned long arg) 2171static void sky2_idle(unsigned long arg)
2168{ 2172{
2169 struct sky2_hw *hw = (struct sky2_hw *) arg; 2173 struct sky2_hw *hw = (struct sky2_hw *) arg;
@@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2183 int work_done = 0; 2187 int work_done = 0;
2184 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2188 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2185 2189
2190 if (!~status)
2191 goto out;
2192
2186 if (status & Y2_IS_HW_ERR) 2193 if (status & Y2_IS_HW_ERR)
2187 sky2_hw_intr(hw); 2194 sky2_hw_intr(hw);
2188 2195
@@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2219 2226
2220 if (sky2_more_work(hw)) 2227 if (sky2_more_work(hw))
2221 return 1; 2228 return 1;
2222 2229out:
2223 netif_rx_complete(dev0); 2230 netif_rx_complete(dev0);
2224 2231
2225 sky2_read32(hw, B0_Y2_SP_LISR); 2232 sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2248,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2248static void sky2_netpoll(struct net_device *dev) 2255static void sky2_netpoll(struct net_device *dev)
2249{ 2256{
2250 struct sky2_port *sky2 = netdev_priv(dev); 2257 struct sky2_port *sky2 = netdev_priv(dev);
2258 struct net_device *dev0 = sky2->hw->dev[0];
2251 2259
2252 sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL); 2260 if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
2261 __netif_rx_schedule(dev0);
2253} 2262}
2254#endif 2263#endif
2255 2264
@@ -3350,9 +3359,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3350 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3359 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3351 3360
3352 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 3361 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3353 if (idle_timeout > 0) 3362 sky2_idle_start(hw);
3354 mod_timer(&hw->idle_timer,
3355 jiffies + msecs_to_jiffies(idle_timeout));
3356 3363
3357 pci_set_drvdata(pdev, hw); 3364 pci_set_drvdata(pdev, hw);
3358 3365
@@ -3425,8 +3432,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3425{ 3432{
3426 struct sky2_hw *hw = pci_get_drvdata(pdev); 3433 struct sky2_hw *hw = pci_get_drvdata(pdev);
3427 int i; 3434 int i;
3435 pci_power_t pstate = pci_choose_state(pdev, state);
3436
3437 if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
3438 return -EINVAL;
3439
3440 del_timer_sync(&hw->idle_timer);
3428 3441
3429 for (i = 0; i < 2; i++) { 3442 for (i = 0; i < hw->ports; i++) {
3430 struct net_device *dev = hw->dev[i]; 3443 struct net_device *dev = hw->dev[i];
3431 3444
3432 if (dev) { 3445 if (dev) {
@@ -3435,10 +3448,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3435 3448
3436 sky2_down(dev); 3449 sky2_down(dev);
3437 netif_device_detach(dev); 3450 netif_device_detach(dev);
3451 netif_poll_disable(dev);
3438 } 3452 }
3439 } 3453 }
3440 3454
3441 return sky2_set_power_state(hw, pci_choose_state(pdev, state)); 3455 sky2_write32(hw, B0_IMSK, 0);
3456 pci_save_state(pdev);
3457 sky2_set_power_state(hw, pstate);
3458 return 0;
3442} 3459}
3443 3460
3444static int sky2_resume(struct pci_dev *pdev) 3461static int sky2_resume(struct pci_dev *pdev)
@@ -3448,27 +3465,31 @@ static int sky2_resume(struct pci_dev *pdev)
3448 3465
3449 pci_restore_state(pdev); 3466 pci_restore_state(pdev);
3450 pci_enable_wake(pdev, PCI_D0, 0); 3467 pci_enable_wake(pdev, PCI_D0, 0);
3451 err = sky2_set_power_state(hw, PCI_D0); 3468 sky2_set_power_state(hw, PCI_D0);
3452 if (err)
3453 goto out;
3454 3469
3455 err = sky2_reset(hw); 3470 err = sky2_reset(hw);
3456 if (err) 3471 if (err)
3457 goto out; 3472 goto out;
3458 3473
3459 for (i = 0; i < 2; i++) { 3474 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3475
3476 for (i = 0; i < hw->ports; i++) {
3460 struct net_device *dev = hw->dev[i]; 3477 struct net_device *dev = hw->dev[i];
3461 if (dev && netif_running(dev)) { 3478 if (dev && netif_running(dev)) {
3462 netif_device_attach(dev); 3479 netif_device_attach(dev);
3480 netif_poll_enable(dev);
3481
3463 err = sky2_up(dev); 3482 err = sky2_up(dev);
3464 if (err) { 3483 if (err) {
3465 printk(KERN_ERR PFX "%s: could not up: %d\n", 3484 printk(KERN_ERR PFX "%s: could not up: %d\n",
3466 dev->name, err); 3485 dev->name, err);
3467 dev_close(dev); 3486 dev_close(dev);
3468 break; 3487 goto out;
3469 } 3488 }
3470 } 3489 }
3471 } 3490 }
3491
3492 sky2_idle_start(hw);
3472out: 3493out:
3473 return err; 3494 return err;
3474} 3495}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 49ad60b72657..862c226dbbe2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.58" 72#define DRV_MODULE_VERSION "3.59"
73#define DRV_MODULE_RELDATE "May 22, 2006" 73#define DRV_MODULE_RELDATE "June 8, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
4485/* tp->lock is held. */ 4485/* tp->lock is held. */
4486static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 4486static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4487{ 4487{
4488 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) 4488 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4489 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 4489 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4490 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4491 4490
4492 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 4491 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4493 switch (kind) { 4492 switch (kind) {
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
4568 void (*write_op)(struct tg3 *, u32, u32); 4567 void (*write_op)(struct tg3 *, u32, u32);
4569 int i; 4568 int i;
4570 4569
4571 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4570 tg3_nvram_lock(tp);
4572 tg3_nvram_lock(tp); 4571
4573 /* No matching tg3_nvram_unlock() after this because 4572 /* No matching tg3_nvram_unlock() after this because
4574 * chip reset below will undo the nvram lock. 4573 * chip reset below will undo the nvram lock.
4575 */ 4574 */
4576 tp->nvram_lock_cnt = 0; 4575 tp->nvram_lock_cnt = 0;
4577 }
4578 4576
4579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 4577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 4578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
4727 tw32_f(MAC_MODE, 0); 4725 tw32_f(MAC_MODE, 0);
4728 udelay(40); 4726 udelay(40);
4729 4727
4730 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) { 4728 /* Wait for firmware initialization to complete. */
4731 /* Wait for firmware initialization to complete. */ 4729 for (i = 0; i < 100000; i++) {
4732 for (i = 0; i < 100000; i++) { 4730 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4733 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 4731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4732 break;
4735 break; 4733 udelay(10);
4736 udelay(10); 4734 }
4737 } 4735
4738 if (i >= 100000) { 4736 /* Chip might not be fitted with firmare. Some Sun onboard
4739 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, " 4737 * parts are configured like that. So don't signal the timeout
4740 "firmware will not restart magic=%08x\n", 4738 * of the above loop as an error, but do report the lack of
4741 tp->dev->name, val); 4739 * running firmware once.
4742 return -ENODEV; 4740 */
4743 } 4741 if (i >= 100000 &&
4742 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4743 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4744
4745 printk(KERN_INFO PFX "%s: No firmware running.\n",
4746 tp->dev->name);
4744 } 4747 }
4745 4748
4746 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 4749 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
9075{ 9078{
9076 int j; 9079 int j;
9077 9080
9078 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9079 return;
9080
9081 tw32_f(GRC_EEPROM_ADDR, 9081 tw32_f(GRC_EEPROM_ADDR,
9082 (EEPROM_ADDR_FSM_RESET | 9082 (EEPROM_ADDR_FSM_RESET |
9083 (EEPROM_DEFAULT_CLOCK_PERIOD << 9083 (EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9210{ 9210{
9211 int ret; 9211 int ret;
9212 9212
9213 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9214 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9215 return -EINVAL;
9216 }
9217
9218 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 9213 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9219 return tg3_nvram_read_using_eeprom(tp, offset, val); 9214 return tg3_nvram_read_using_eeprom(tp, offset, val);
9220 9215
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9447{ 9442{
9448 int ret; 9443 int ret;
9449 9444
9450 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9451 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9452 return -EINVAL;
9453 }
9454
9455 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 9445 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9456 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 9446 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9457 ~GRC_LCLCTRL_GPIO_OUTPUT1); 9447 ~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9578 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9568 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9579 tp->misc_host_ctrl); 9569 tp->misc_host_ctrl);
9580 9570
9571 /* The memory arbiter has to be enabled in order for SRAM accesses
9572 * to succeed. Normally on powerup the tg3 chip firmware will make
9573 * sure it is enabled, but other entities such as system netboot
9574 * code might disable it.
9575 */
9576 val = tr32(MEMARB_MODE);
9577 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9578
9581 tp->phy_id = PHY_ID_INVALID; 9579 tp->phy_id = PHY_ID_INVALID;
9582 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9580 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9583 9581
9584 /* Do not even try poking around in here on Sun parts. */ 9582 /* Assume an onboard device by default. */
9585 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { 9583 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9586 /* All SUN chips are built-in LOMs. */
9587 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9588 return;
9589 }
9590 9584
9591 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9585 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9592 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9586 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9686 9680
9687 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) 9681 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9688 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 9682 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9683 else
9684 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9689 9685
9690 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9686 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9691 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 9687 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9834 int i; 9830 int i;
9835 u32 magic; 9831 u32 magic;
9836 9832
9837 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9838 /* Sun decided not to put the necessary bits in the
9839 * NVRAM of their onboard tg3 parts :(
9840 */
9841 strcpy(tp->board_part_number, "Sun 570X");
9842 return;
9843 }
9844
9845 if (tg3_nvram_read_swab(tp, 0x0, &magic)) 9833 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9846 return; 9834 goto out_not_found;
9847 9835
9848 if (magic == TG3_EEPROM_MAGIC) { 9836 if (magic == TG3_EEPROM_MAGIC) {
9849 for (i = 0; i < 256; i += 4) { 9837 for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
9874 break; 9862 break;
9875 msleep(1); 9863 msleep(1);
9876 } 9864 }
9865 if (!(tmp16 & 0x8000))
9866 goto out_not_found;
9867
9877 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, 9868 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9878 &tmp); 9869 &tmp);
9879 tmp = cpu_to_le32(tmp); 9870 tmp = cpu_to_le32(tmp);
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9965 } 9956 }
9966} 9957}
9967 9958
9968#ifdef CONFIG_SPARC64
9969static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9970{
9971 struct pci_dev *pdev = tp->pdev;
9972 struct pcidev_cookie *pcp = pdev->sysdata;
9973
9974 if (pcp != NULL) {
9975 int node = pcp->prom_node;
9976 u32 venid;
9977 int err;
9978
9979 err = prom_getproperty(node, "subsystem-vendor-id",
9980 (char *) &venid, sizeof(venid));
9981 if (err == 0 || err == -1)
9982 return 0;
9983 if (venid == PCI_VENDOR_ID_SUN)
9984 return 1;
9985
9986 /* TG3 chips onboard the SunBlade-2500 don't have the
9987 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9988 * are distinguishable from non-Sun variants by being
9989 * named "network" by the firmware. Non-Sun cards will
9990 * show up as being named "ethernet".
9991 */
9992 if (!strcmp(pcp->prom_name, "network"))
9993 return 1;
9994 }
9995 return 0;
9996}
9997#endif
9998
9999static int __devinit tg3_get_invariants(struct tg3 *tp) 9959static int __devinit tg3_get_invariants(struct tg3 *tp)
10000{ 9960{
10001 static struct pci_device_id write_reorder_chipsets[] = { 9961 static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10012 u16 pci_cmd; 9972 u16 pci_cmd;
10013 int err; 9973 int err;
10014 9974
10015#ifdef CONFIG_SPARC64
10016 if (tg3_is_sun_570X(tp))
10017 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
10018#endif
10019
10020 /* Force memory write invalidate off. If we leave it on, 9975 /* Force memory write invalidate off. If we leave it on,
10021 * then on 5700_BX chips we have to enable a workaround. 9976 * then on 5700_BX chips we have to enable a workaround.
10022 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 9977 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10312 if (tp->write32 == tg3_write_indirect_reg32 || 10267 if (tp->write32 == tg3_write_indirect_reg32 ||
10313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 10268 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10314 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10269 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) || 10270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10316 (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
10317 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 10271 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10318 10272
10319 /* Get eeprom hw config before calling tg3_set_power_state(). 10273 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10594#endif 10548#endif
10595 10549
10596 mac_offset = 0x7c; 10550 mac_offset = 0x7c;
10597 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 10551 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10598 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10599 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 10552 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10600 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 10553 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10601 mac_offset = 0xcc; 10554 mac_offset = 0xcc;
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
10622 } 10575 }
10623 if (!addr_ok) { 10576 if (!addr_ok) {
10624 /* Next, try NVRAM. */ 10577 /* Next, try NVRAM. */
10625 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) && 10578 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10626 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10627 !tg3_nvram_read(tp, mac_offset + 4, &lo)) { 10579 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10628 dev->dev_addr[0] = ((hi >> 16) & 0xff); 10580 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10629 dev->dev_addr[1] = ((hi >> 24) & 0xff); 10581 dev->dev_addr[1] = ((hi >> 24) & 0xff);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0e29b885d449..ff0faab94bd5 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2184,7 +2184,7 @@ struct tg3 {
2184#define TG3_FLAG_INIT_COMPLETE 0x80000000 2184#define TG3_FLAG_INIT_COMPLETE 0x80000000
2185 u32 tg3_flags2; 2185 u32 tg3_flags2;
2186#define TG3_FLG2_RESTART_TIMER 0x00000001 2186#define TG3_FLG2_RESTART_TIMER 0x00000001
2187#define TG3_FLG2_SUN_570X 0x00000002 2187/* 0x00000002 available */
2188#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004 2188#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
2189#define TG3_FLG2_IS_5788 0x00000008 2189#define TG3_FLG2_IS_5788 0x00000008
2190#define TG3_FLG2_MAX_RXPEND_64 0x00000010 2190#define TG3_FLG2_MAX_RXPEND_64 0x00000010
@@ -2216,6 +2216,7 @@ struct tg3 {
2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2217#define TG3_FLG2_1SHOT_MSI 0x10000000 2217#define TG3_FLG2_1SHOT_MSI 0x10000000
2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2219#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2219 2220
2220 u32 split_mode_max_reqs; 2221 u32 split_mode_max_reqs;
2221#define SPLIT_MODE_5704_MAX_REQ 3 2222#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index bbecba02e697..d0318e525ba7 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -624,25 +624,28 @@ err_destroy_tx0:
624static u16 generate_cookie(struct bcm43xx_dmaring *ring, 624static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625 int slot) 625 int slot)
626{ 626{
627 u16 cookie = 0x0000; 627 u16 cookie = 0xF000;
628 628
629 /* Use the upper 4 bits of the cookie as 629 /* Use the upper 4 bits of the cookie as
630 * DMA controller ID and store the slot number 630 * DMA controller ID and store the slot number
631 * in the lower 12 bits 631 * in the lower 12 bits.
632 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path.
632 */ 634 */
633 switch (ring->mmio_base) { 635 switch (ring->mmio_base) {
634 default: 636 default:
635 assert(0); 637 assert(0);
636 case BCM43xx_MMIO_DMA1_BASE: 638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000;
637 break; 640 break;
638 case BCM43xx_MMIO_DMA2_BASE: 641 case BCM43xx_MMIO_DMA2_BASE:
639 cookie = 0x1000; 642 cookie = 0xB000;
640 break; 643 break;
641 case BCM43xx_MMIO_DMA3_BASE: 644 case BCM43xx_MMIO_DMA3_BASE:
642 cookie = 0x2000; 645 cookie = 0xC000;
643 break; 646 break;
644 case BCM43xx_MMIO_DMA4_BASE: 647 case BCM43xx_MMIO_DMA4_BASE:
645 cookie = 0x3000; 648 cookie = 0xD000;
646 break; 649 break;
647 } 650 }
648 assert(((u16)slot & 0xF000) == 0x0000); 651 assert(((u16)slot & 0xF000) == 0x0000);
@@ -660,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
660 struct bcm43xx_dmaring *ring = NULL; 663 struct bcm43xx_dmaring *ring = NULL;
661 664
662 switch (cookie & 0xF000) { 665 switch (cookie & 0xF000) {
663 case 0x0000: 666 case 0xA000:
664 ring = dma->tx_ring0; 667 ring = dma->tx_ring0;
665 break; 668 break;
666 case 0x1000: 669 case 0xB000:
667 ring = dma->tx_ring1; 670 ring = dma->tx_ring1;
668 break; 671 break;
669 case 0x2000: 672 case 0xC000:
670 ring = dma->tx_ring2; 673 ring = dma->tx_ring2;
671 break; 674 break;
672 case 0x3000: 675 case 0xD000:
673 ring = dma->tx_ring3; 676 ring = dma->tx_ring3;
674 break; 677 break;
675 default: 678 default:
@@ -839,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
839 /* We received an xmit status. */ 842 /* We received an xmit status. */
840 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
841 struct bcm43xx_xmitstatus stat; 844 struct bcm43xx_xmitstatus stat;
845 int i = 0;
842 846
843 stat.cookie = le16_to_cpu(hw->cookie); 847 stat.cookie = le16_to_cpu(hw->cookie);
848 while (stat.cookie == 0) {
849 if (unlikely(++i >= 10000)) {
850 assert(0);
851 break;
852 }
853 udelay(2);
854 barrier();
855 stat.cookie = le16_to_cpu(hw->cookie);
856 }
844 stat.flags = hw->flags; 857 stat.flags = hw->flags;
845 stat.cnt1 = hw->cnt1; 858 stat.cnt1 = hw->cnt1;
846 stat.cnt2 = hw->cnt2; 859 stat.cnt2 = hw->cnt2;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1456759936c5..10e1a905c144 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
285 * Default resume method for devices that have no driver provided resume, 285 * Default resume method for devices that have no driver provided resume,
286 * or not even a driver at all. 286 * or not even a driver at all.
287 */ 287 */
288static void pci_default_resume(struct pci_dev *pci_dev) 288static int pci_default_resume(struct pci_dev *pci_dev)
289{ 289{
290 int retval; 290 int retval = 0;
291 291
292 /* restore the PCI config space */ 292 /* restore the PCI config space */
293 pci_restore_state(pci_dev); 293 pci_restore_state(pci_dev);
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
297 /* if the device was busmaster before the suspend, make it busmaster again */ 297 /* if the device was busmaster before the suspend, make it busmaster again */
298 if (pci_dev->is_busmaster) 298 if (pci_dev->is_busmaster)
299 pci_set_master(pci_dev); 299 pci_set_master(pci_dev);
300
301 return retval;
300} 302}
301 303
302static int pci_device_resume(struct device * dev) 304static int pci_device_resume(struct device * dev)
303{ 305{
306 int error;
304 struct pci_dev * pci_dev = to_pci_dev(dev); 307 struct pci_dev * pci_dev = to_pci_dev(dev);
305 struct pci_driver * drv = pci_dev->driver; 308 struct pci_driver * drv = pci_dev->driver;
306 309
307 if (drv && drv->resume) 310 if (drv && drv->resume)
308 drv->resume(pci_dev); 311 error = drv->resume(pci_dev);
309 else 312 else
310 pci_default_resume(pci_dev); 313 error = pci_default_resume(pci_dev);
311 return 0; 314 return error;
312} 315}
313 316
314static void pci_device_shutdown(struct device *dev) 317static void pci_device_shutdown(struct device *dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2329f941a0dc..12286275b1c8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -461,9 +461,23 @@ int
461pci_restore_state(struct pci_dev *dev) 461pci_restore_state(struct pci_dev *dev)
462{ 462{
463 int i; 463 int i;
464 int val;
464 465
465 for (i = 0; i < 16; i++) 466 /*
466 pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); 467 * The Base Address register should be programmed before the command
468 * register(s)
469 */
470 for (i = 15; i >= 0; i--) {
471 pci_read_config_dword(dev, i * 4, &val);
472 if (val != dev->saved_config_space[i]) {
473 printk(KERN_DEBUG "PM: Writing back config space on "
474 "device %s at offset %x (was %x, writing %x)\n",
475 pci_name(dev), i,
476 val, (int)dev->saved_config_space[i]);
477 pci_write_config_dword(dev,i * 4,
478 dev->saved_config_space[i]);
479 }
480 }
467 pci_restore_msi_state(dev); 481 pci_restore_msi_state(dev);
468 pci_restore_msix_state(dev); 482 pci_restore_msix_state(dev);
469 return 0; 483 return 0;
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 9b8bca1ac1f0..f16f92a6ec0f 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap)
2035static void mv_eng_timeout(struct ata_port *ap) 2035static void mv_eng_timeout(struct ata_port *ap)
2036{ 2036{
2037 struct ata_queued_cmd *qc; 2037 struct ata_queued_cmd *qc;
2038 unsigned long flags;
2038 2039
2039 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); 2040 printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
2040 DPRINTK("All regs @ start of eng_timeout\n"); 2041 DPRINTK("All regs @ start of eng_timeout\n");
@@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap)
2046 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2047 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2047 &qc->scsicmd->cmnd); 2048 &qc->scsicmd->cmnd);
2048 2049
2050 spin_lock_irqsave(&ap->host_set->lock, flags);
2049 mv_err_intr(ap, 0); 2051 mv_err_intr(ap, 0);
2050 mv_stop_and_reset(ap); 2052 mv_stop_and_reset(ap);
2053 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2051 2054
2052 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2055 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2053 if (qc->flags & ATA_QCFLAG_ACTIVE) { 2056 if (qc->flags & ATA_QCFLAG_ACTIVE) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index acde8868da21..fafe7c1265b3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
185 /* Select Power Management Mode */ 185 /* Select Power Management Mode */
186 pxa27x_ohci_select_pmm(inf->port_mode); 186 pxa27x_ohci_select_pmm(inf->port_mode);
187 187
188 if (inf->power_budget)
189 hcd->power_budget = inf->power_budget;
190
188 ohci_hcd_init(hcd_to_ohci(hcd)); 191 ohci_hcd_init(hcd_to_ohci(hcd));
189 192
190 retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT); 193 retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 953eb8c171d6..47ba1a79adcd 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1745 fbcon_redraw_move(vc, p, 0, t, count); 1745 fbcon_redraw_move(vc, p, 0, t, count);
1746 ypan_up_redraw(vc, t, count); 1746 ypan_up_redraw(vc, t, count);
1747 if (vc->vc_rows - b > 0) 1747 if (vc->vc_rows - b > 0)
1748 fbcon_redraw_move(vc, p, b - count, 1748 fbcon_redraw_move(vc, p, b,
1749 vc->vc_rows - b, b); 1749 vc->vc_rows - b, b);
1750 } else 1750 } else
1751 fbcon_redraw_move(vc, p, t + count, b - t - count, t); 1751 fbcon_redraw_move(vc, p, t + count, b - t - count, t);
diff --git a/fs/bio.c b/fs/bio.c
index 098c12b2d60a..6a0b9ad8f8c9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
654 write_to_vm, 0, &pages[cur_page], NULL); 654 write_to_vm, 0, &pages[cur_page], NULL);
655 up_read(&current->mm->mmap_sem); 655 up_read(&current->mm->mmap_sem);
656 656
657 if (ret < local_nr_pages) 657 if (ret < local_nr_pages) {
658 ret = -EFAULT;
658 goto out_unmap; 659 goto out_unmap;
659 660 }
660 661
661 offset = uaddr & ~PAGE_MASK; 662 offset = uaddr & ~PAGE_MASK;
662 for (j = cur_page; j < page_limit; j++) { 663 for (j = cur_page; j < page_limit; j++) {
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 85d166cdcae4..b55b4ea9a676 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
67static int debugfs_mknod(struct inode *dir, struct dentry *dentry, 67static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
68 int mode, dev_t dev) 68 int mode, dev_t dev)
69{ 69{
70 struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev); 70 struct inode *inode;
71 int error = -EPERM; 71 int error = -EPERM;
72 72
73 if (dentry->d_inode) 73 if (dentry->d_inode)
74 return -EEXIST; 74 return -EEXIST;
75 75
76 inode = debugfs_get_inode(dir->i_sb, mode, dev);
76 if (inode) { 77 if (inode) {
77 d_instantiate(dentry, inode); 78 d_instantiate(dentry, inode);
78 dget(dentry); 79 dget(dentry);
diff --git a/fs/locks.c b/fs/locks.c
index 6f99c0a6f836..ab61a8b54829 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
755 if (request->fl_type == F_UNLCK) 755 if (request->fl_type == F_UNLCK)
756 goto out; 756 goto out;
757 757
758 error = -ENOMEM;
758 new_fl = locks_alloc_lock(); 759 new_fl = locks_alloc_lock();
759 if (new_fl == NULL) 760 if (new_fl == NULL)
760 goto out; 761 goto out;
@@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
781 locks_copy_lock(new_fl, request); 782 locks_copy_lock(new_fl, request);
782 locks_insert_lock(&inode->i_flock, new_fl); 783 locks_insert_lock(&inode->i_flock, new_fl);
783 new_fl = NULL; 784 new_fl = NULL;
785 error = 0;
784 786
785out: 787out:
786 unlock_kernel(); 788 unlock_kernel();
diff --git a/include/asm-arm/arch-pxa/ohci.h b/include/asm-arm/arch-pxa/ohci.h
index 7da89569061e..e848a47128cd 100644
--- a/include/asm-arm/arch-pxa/ohci.h
+++ b/include/asm-arm/arch-pxa/ohci.h
@@ -11,6 +11,8 @@ struct pxaohci_platform_data {
11#define PMM_NPS_MODE 1 11#define PMM_NPS_MODE 1
12#define PMM_GLOBAL_MODE 2 12#define PMM_GLOBAL_MODE 2
13#define PMM_PERPORT_MODE 3 13#define PMM_PERPORT_MODE 3
14
15 int power_budget;
14}; 16};
15 17
16extern void pxa_set_ohci_info(struct pxaohci_platform_data *info); 18extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 9fcf0162d859..f6265c2a0dd2 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
329#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 329#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
330 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 330 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
331 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 331 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
332 CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO) 332 CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
333#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ 333#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
334 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) 334 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
335#endif 335#endif
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index 40c25e166a9b..1802775568b9 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -11,23 +11,24 @@
11#define __futex_atomic_fixup \ 11#define __futex_atomic_fixup \
12 ".section __ex_table,\"a\"\n" \ 12 ".section __ex_table,\"a\"\n" \
13 " .align 4\n" \ 13 " .align 4\n" \
14 " .long 0b,2b,1b,2b\n" \ 14 " .long 0b,4b,2b,4b,3b,4b\n" \
15 ".previous" 15 ".previous"
16#else /* __s390x__ */ 16#else /* __s390x__ */
17#define __futex_atomic_fixup \ 17#define __futex_atomic_fixup \
18 ".section __ex_table,\"a\"\n" \ 18 ".section __ex_table,\"a\"\n" \
19 " .align 8\n" \ 19 " .align 8\n" \
20 " .quad 0b,2b,1b,2b\n" \ 20 " .quad 0b,4b,2b,4b,3b,4b\n" \
21 ".previous" 21 ".previous"
22#endif /* __s390x__ */ 22#endif /* __s390x__ */
23 23
24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
25 asm volatile(" l %1,0(%6)\n" \ 25 asm volatile(" sacf 256\n" \
26 "0: " insn \ 26 "0: l %1,0(%6)\n" \
27 " cs %1,%2,0(%6)\n" \ 27 "1: " insn \
28 "1: jl 0b\n" \ 28 "2: cs %1,%2,0(%6)\n" \
29 "3: jl 1b\n" \
29 " lhi %0,0\n" \ 30 " lhi %0,0\n" \
30 "2:\n" \ 31 "4: sacf 0\n" \
31 __futex_atomic_fixup \ 32 __futex_atomic_fixup \
32 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ 33 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
33 "=m" (*uaddr) \ 34 "=m" (*uaddr) \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ad133fcfb239..1713ace808bf 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
21typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 21typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
22typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 22typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
23 23
24typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); 24typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
25typedef void (elevator_exit_fn) (elevator_t *); 25typedef void (elevator_exit_fn) (elevator_t *);
26 26
27struct elevator_ops 27struct elevator_ops
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index dd7d627bf66f..c115e9e840b4 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
1114 1114
1115 mmsg->mfa = readl(c->in_port); 1115 mmsg->mfa = readl(c->in_port);
1116 if (unlikely(mmsg->mfa >= c->in_queue.len)) { 1116 if (unlikely(mmsg->mfa >= c->in_queue.len)) {
1117 u32 mfa = mmsg->mfa;
1118
1117 mempool_free(mmsg, c->in_msg.mempool); 1119 mempool_free(mmsg, c->in_msg.mempool);
1118 if(mmsg->mfa == I2O_QUEUE_EMPTY) 1120
1121 if (mfa == I2O_QUEUE_EMPTY)
1119 return ERR_PTR(-EBUSY); 1122 return ERR_PTR(-EBUSY);
1120 return ERR_PTR(-EFAULT); 1123 return ERR_PTR(-EFAULT);
1121 } 1124 }
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 6a7621b2b12b..f5fdca1d67e6 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -36,6 +36,7 @@
36#include <linux/nodemask.h> 36#include <linux/nodemask.h>
37 37
38struct vm_area_struct; 38struct vm_area_struct;
39struct mm_struct;
39 40
40#ifdef CONFIG_NUMA 41#ifdef CONFIG_NUMA
41 42
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 4877e35ae202..936ef82ed76a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -50,7 +50,7 @@
50extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); 50extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
51extern acpi_status pci_osc_support_set(u32 flags); 51extern acpi_status pci_osc_support_set(u32 flags);
52#else 52#else
53#if !defined(acpi_status) 53#if !defined(AE_ERROR)
54typedef u32 acpi_status; 54typedef u32 acpi_status;
55#define AE_ERROR (acpi_status) (0x0001) 55#define AE_ERROR (acpi_status) (0x0001)
56#endif 56#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index e95b93282210..e06d0c10a24e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
881 881
882 tsk->flags |= PF_EXITING; 882 tsk->flags |= PF_EXITING;
883 883
884 /*
885 * Make sure we don't try to process any timer firings
886 * while we are already exiting.
887 */
888 tsk->it_virt_expires = cputime_zero;
889 tsk->it_prof_expires = cputime_zero;
890 tsk->it_sched_expires = 0;
891
892 if (unlikely(in_atomic())) 884 if (unlikely(in_atomic()))
893 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 885 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
894 current->comm, current->pid, 886 current->comm, current->pid,
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 520f6c59948d..d38d9ec3276c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
555 struct cpu_timer_list *next; 555 struct cpu_timer_list *next;
556 unsigned long i; 556 unsigned long i;
557 557
558 if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
559 return;
560
561 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 558 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
562 p->cpu_timers : p->signal->cpu_timers); 559 p->cpu_timers : p->signal->cpu_timers);
563 head += CPUCLOCK_WHICH(timer->it_clock); 560 head += CPUCLOCK_WHICH(timer->it_clock);
@@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk,
1173 } 1170 }
1174 t = tsk; 1171 t = tsk;
1175 do { 1172 do {
1173 if (unlikely(t->flags & PF_EXITING))
1174 continue;
1175
1176 ticks = cputime_add(cputime_add(t->utime, t->stime), 1176 ticks = cputime_add(cputime_add(t->utime, t->stime),
1177 prof_left); 1177 prof_left);
1178 if (!cputime_eq(prof_expires, cputime_zero) && 1178 if (!cputime_eq(prof_expires, cputime_zero) &&
@@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk,
1193 t->it_sched_expires > sched)) { 1193 t->it_sched_expires > sched)) {
1194 t->it_sched_expires = sched; 1194 t->it_sched_expires = sched;
1195 } 1195 }
1196 1196 } while ((t = next_thread(t)) != tsk);
1197 do {
1198 t = next_thread(t);
1199 } while (unlikely(t->flags & PF_EXITING));
1200 } while (t != tsk);
1201 } 1197 }
1202} 1198}
1203 1199
@@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1289 1285
1290#undef UNEXPIRED 1286#undef UNEXPIRED
1291 1287
1292 BUG_ON(tsk->exit_state);
1293
1294 /* 1288 /*
1295 * Double-check with locks held. 1289 * Double-check with locks held.
1296 */ 1290 */
1297 read_lock(&tasklist_lock); 1291 read_lock(&tasklist_lock);
1298 spin_lock(&tsk->sighand->siglock); 1292 if (likely(tsk->signal != NULL)) {
1293 spin_lock(&tsk->sighand->siglock);
1299 1294
1300 /* 1295 /*
1301 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] 1296 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1302 * all the timers that are firing, and put them on the firing list. 1297 * all the timers that are firing, and put them on the firing list.
1303 */ 1298 */
1304 check_thread_timers(tsk, &firing); 1299 check_thread_timers(tsk, &firing);
1305 check_process_timers(tsk, &firing); 1300 check_process_timers(tsk, &firing);
1306 1301
1307 /* 1302 /*
1308 * We must release these locks before taking any timer's lock. 1303 * We must release these locks before taking any timer's lock.
1309 * There is a potential race with timer deletion here, as the 1304 * There is a potential race with timer deletion here, as the
1310 * siglock now protects our private firing list. We have set 1305 * siglock now protects our private firing list. We have set
1311 * the firing flag in each timer, so that a deletion attempt 1306 * the firing flag in each timer, so that a deletion attempt
1312 * that gets the timer lock before we do will give it up and 1307 * that gets the timer lock before we do will give it up and
1313 * spin until we've taken care of that timer below. 1308 * spin until we've taken care of that timer below.
1314 */ 1309 */
1315 spin_unlock(&tsk->sighand->siglock); 1310 spin_unlock(&tsk->sighand->siglock);
1311 }
1316 read_unlock(&tasklist_lock); 1312 read_unlock(&tasklist_lock);
1317 1313
1318 /* 1314 /*
diff --git a/mm/shmem.c b/mm/shmem.c
index 4c5e68e4e9ae..1e43c8a865ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1780 if (!simple_empty(dentry)) 1780 if (!simple_empty(dentry))
1781 return -ENOTEMPTY; 1781 return -ENOTEMPTY;
1782 1782
1783 dentry->d_inode->i_nlink--;
1783 dir->i_nlink--; 1784 dir->i_nlink--;
1784 return shmem_unlink(dir, dentry); 1785 return shmem_unlink(dir, dentry);
1785} 1786}
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
2102 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2103 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2103 sb->s_magic = TMPFS_MAGIC; 2104 sb->s_magic = TMPFS_MAGIC;
2104 sb->s_op = &shmem_ops; 2105 sb->s_op = &shmem_ops;
2106 sb->s_time_gran = 1;
2105 2107
2106 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2108 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2107 if (!inode) 2109 if (!inode)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63a8cb6..440a733fe2e9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1061loop_again: 1061loop_again:
1062 total_scanned = 0; 1062 total_scanned = 0;
1063 nr_reclaimed = 0; 1063 nr_reclaimed = 0;
1064 sc.may_writepage = !laptop_mode, 1064 sc.may_writepage = !laptop_mode;
1065 sc.nr_mapped = read_page_state(nr_mapped); 1065 sc.nr_mapped = read_page_state(nr_mapped);
1066 1066
1067 inc_page_state(pageoutrun); 1067 inc_page_state(pageoutrun);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index b5981e5f6b00..8c211c58893b 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -452,6 +452,7 @@ found:
452 (unsigned long long) 452 (unsigned long long)
453 avr->dccpavr_ack_ackno); 453 avr->dccpavr_ack_ackno);
454 dccp_ackvec_throw_record(av, avr); 454 dccp_ackvec_throw_record(av, avr);
455 break;
455 } 456 }
456 /* 457 /*
457 * If it wasn't received, continue scanning... we might 458 * If it wasn't received, continue scanning... we might
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 0923add122b4..9f0bb529ab70 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -116,6 +116,7 @@ sr_failed:
116 116
117too_many_hops: 117too_many_hops:
118 /* Tell the sender its packet died... */ 118 /* Tell the sender its packet died... */
119 IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
119 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); 120 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
120drop: 121drop:
121 kfree_skb(skb); 122 kfree_skb(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a538bc1683d..b5521a9d3dc1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1649 * Hence, we can detect timed out packets during fast 1649 * Hence, we can detect timed out packets during fast
1650 * retransmit without falling to slow start. 1650 * retransmit without falling to slow start.
1651 */ 1651 */
1652 if (tcp_head_timedout(sk, tp)) { 1652 if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
1653 struct sk_buff *skb; 1653 struct sk_buff *skb;
1654 1654
1655 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 1655 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
1662 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 1662 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1663 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1663 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1664 tp->lost_out += tcp_skb_pcount(skb); 1664 tp->lost_out += tcp_skb_pcount(skb);
1665 if (IsReno(tp))
1666 tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
1667 1665
1668 /* clear xmit_retrans hint */ 1666 /* clear xmit_retrans hint */
1669 if (tp->retransmit_skb_hint && 1667 if (tp->retransmit_skb_hint &&