aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-hacking.tmpl4
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt127
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/module.c50
-rw-r--r--arch/x86/include/asm/efi.h5
-rw-r--r--arch/x86/include/asm/irqflags.h8
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h9
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c38
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/efi.c2
-rw-r--r--arch/x86/kernel/efi_64.c6
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/reboot.c34
-rw-r--r--arch/x86/kernel/vmlinux.lds.S16
-rw-r--r--arch/x86/lib/msr.c26
-rw-r--r--arch/x86/mm/pageattr.c39
-rw-r--r--arch/x86/mm/pgtable.c1
-rw-r--r--drivers/acpi/acpi_memhotplug.c34
-rw-r--r--drivers/acpi/acpica/acobject.h1
-rw-r--r--drivers/acpi/acpica/dsopcode.c24
-rw-r--r--drivers/acpi/acpica/exfldio.c6
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/system.c2
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/edac/amd64_edac.c7
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c144
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c5
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5.c51
-rw-r--r--drivers/mfd/twl4030-irq.c55
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c59x.c10
-rw-r--r--drivers/net/eexpress.c6
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c3
-rw-r--r--drivers/net/gianfar_ethtool.c10
-rw-r--r--drivers/net/igbvf/vf.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c25
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/mlx4/en_tx.c1
-rw-r--r--drivers/net/netxen/netxen_nic_main.c37
-rw-r--r--drivers/net/pcnet32.c30
-rw-r--r--drivers/net/ppp_generic.c34
-rw-r--r--drivers/net/pppoe.c1
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/s6gmac.c2
-rw-r--r--drivers/net/sky2.c14
-rw-r--r--drivers/net/sky2.h1
-rw-r--r--drivers/net/tulip/de4x5.c6
-rw-r--r--drivers/net/wireless/airo.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c6
-rw-r--r--drivers/net/wireless/libertas/11d.c2
-rw-r--r--drivers/net/wireless/libertas/assoc.c18
-rw-r--r--drivers/net/wireless/libertas/scan.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/parisc/ccio-dma.c1
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/hppb.c9
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/platform/x86/Kconfig25
-rw-r--r--drivers/platform/x86/eeepc-laptop.c9
-rw-r--r--drivers/platform/x86/hp-wmi.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c390
-rw-r--r--drivers/s390/scsi/zfcp_erp.c68
-rw-r--r--drivers/s390/scsi/zfcp_fc.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c56
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c25
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c7
-rw-r--r--drivers/scsi/libfc/fc_exch.c23
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c147
-rw-r--r--drivers/scsi/libsas/sas_port.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c133
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c145
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c40
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c20
-rw-r--r--drivers/video/console/sticore.c9
-rw-r--r--drivers/watchdog/coh901327_wdt.c11
-rw-r--r--fs/cifs/CHANGES7
-rw-r--r--fs/cifs/README25
-rw-r--r--fs/cifs/cifs_dfs_ref.c12
-rw-r--r--fs/cifs/cifs_unicode.c2
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/connect.c55
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/segment.c16
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/drm/drm_pciids.h5
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/perf_counter.h13
-rw-r--r--include/net/bluetooth/rfcomm.h12
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--init/Kconfig11
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/perf_counter.c101
-rw-r--r--kernel/posix-timers.c7
-rw-r--r--kernel/sched_cpupri.c15
-rw-r--r--kernel/sched_fair.c32
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_printk.c2
-rw-r--r--lib/flex_array.c2
-rw-r--r--net/bluetooth/rfcomm/core.c27
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/ipv4/arp.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/pm.c24
-rw-r--r--net/mac80211/rx.c12
-rw-r--r--net/netlabel/netlabel_kapi.c2
-rw-r--r--net/wireless/reg.c9
-rw-r--r--net/wireless/reg.h3
-rw-r--r--net/wireless/scan.c4
-rwxr-xr-xscripts/recordmcount.pl5
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-top.c1
-rw-r--r--tools/perf/util/quote.c2
-rw-r--r--tools/perf/util/symbol.c2
165 files changed, 1691 insertions, 1301 deletions
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
index a50d6cd58573..992e67e6be7f 100644
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ b/Documentation/DocBook/kernel-hacking.tmpl
@@ -449,8 +449,8 @@ printk(KERN_INFO "i = %u\n", i);
449 </para> 449 </para>
450 450
451 <programlisting> 451 <programlisting>
452__u32 ipaddress; 452__be32 ipaddress;
453printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress)); 453printk(KERN_INFO "my ip: %pI4\n", &amp;ipaddress);
454 </programlisting> 454 </programlisting>
455 455
456 <para> 456 <para>
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index f2296ecedb89..e2ddcdeb61b6 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -36,8 +36,6 @@ detailed description):
36 - Bluetooth enable and disable 36 - Bluetooth enable and disable
37 - video output switching, expansion control 37 - video output switching, expansion control
38 - ThinkLight on and off 38 - ThinkLight on and off
39 - limited docking and undocking
40 - UltraBay eject
41 - CMOS/UCMS control 39 - CMOS/UCMS control
42 - LED control 40 - LED control
43 - ACPI sounds 41 - ACPI sounds
@@ -729,131 +727,6 @@ cannot be read or if it is unknown, thinkpad-acpi will report it as "off".
729It is impossible to know if the status returned through sysfs is valid. 727It is impossible to know if the status returned through sysfs is valid.
730 728
731 729
732Docking / undocking -- /proc/acpi/ibm/dock
733------------------------------------------
734
735Docking and undocking (e.g. with the X4 UltraBase) requires some
736actions to be taken by the operating system to safely make or break
737the electrical connections with the dock.
738
739The docking feature of this driver generates the following ACPI events:
740
741 ibm/dock GDCK 00000003 00000001 -- eject request
742 ibm/dock GDCK 00000003 00000002 -- undocked
743 ibm/dock GDCK 00000000 00000003 -- docked
744
745NOTE: These events will only be generated if the laptop was docked
746when originally booted. This is due to the current lack of support for
747hot plugging of devices in the Linux ACPI framework. If the laptop was
748booted while not in the dock, the following message is shown in the
749logs:
750
751 Mar 17 01:42:34 aero kernel: thinkpad_acpi: dock device not present
752
753In this case, no dock-related events are generated but the dock and
754undock commands described below still work. They can be executed
755manually or triggered by Fn key combinations (see the example acpid
756configuration files included in the driver tarball package available
757on the web site).
758
759When the eject request button on the dock is pressed, the first event
760above is generated. The handler for this event should issue the
761following command:
762
763 echo undock > /proc/acpi/ibm/dock
764
765After the LED on the dock goes off, it is safe to eject the laptop.
766Note: if you pressed this key by mistake, go ahead and eject the
767laptop, then dock it back in. Otherwise, the dock may not function as
768expected.
769
770When the laptop is docked, the third event above is generated. The
771handler for this event should issue the following command to fully
772enable the dock:
773
774 echo dock > /proc/acpi/ibm/dock
775
776The contents of the /proc/acpi/ibm/dock file shows the current status
777of the dock, as provided by the ACPI framework.
778
779The docking support in this driver does not take care of enabling or
780disabling any other devices you may have attached to the dock. For
781example, a CD drive plugged into the UltraBase needs to be disabled or
782enabled separately. See the provided example acpid configuration files
783for how this can be accomplished.
784
785There is no support yet for PCI devices that may be attached to a
786docking station, e.g. in the ThinkPad Dock II. The driver currently
787does not recognize, enable or disable such devices. This means that
788the only docking stations currently supported are the X-series
789UltraBase docks and "dumb" port replicators like the Mini Dock (the
790latter don't need any ACPI support, actually).
791
792
793UltraBay eject -- /proc/acpi/ibm/bay
794------------------------------------
795
796Inserting or ejecting an UltraBay device requires some actions to be
797taken by the operating system to safely make or break the electrical
798connections with the device.
799
800This feature generates the following ACPI events:
801
802 ibm/bay MSTR 00000003 00000000 -- eject request
803 ibm/bay MSTR 00000001 00000000 -- eject lever inserted
804
805NOTE: These events will only be generated if the UltraBay was present
806when the laptop was originally booted (on the X series, the UltraBay
807is in the dock, so it may not be present if the laptop was undocked).
808This is due to the current lack of support for hot plugging of devices
809in the Linux ACPI framework. If the laptop was booted without the
810UltraBay, the following message is shown in the logs:
811
812 Mar 17 01:42:34 aero kernel: thinkpad_acpi: bay device not present
813
814In this case, no bay-related events are generated but the eject
815command described below still works. It can be executed manually or
816triggered by a hot key combination.
817
818Sliding the eject lever generates the first event shown above. The
819handler for this event should take whatever actions are necessary to
820shut down the device in the UltraBay (e.g. call idectl), then issue
821the following command:
822
823 echo eject > /proc/acpi/ibm/bay
824
825After the LED on the UltraBay goes off, it is safe to pull out the
826device.
827
828When the eject lever is inserted, the second event above is
829generated. The handler for this event should take whatever actions are
830necessary to enable the UltraBay device (e.g. call idectl).
831
832The contents of the /proc/acpi/ibm/bay file shows the current status
833of the UltraBay, as provided by the ACPI framework.
834
835EXPERIMENTAL warm eject support on the 600e/x, A22p and A3x (To use
836this feature, you need to supply the experimental=1 parameter when
837loading the module):
838
839These models do not have a button near the UltraBay device to request
840a hot eject but rather require the laptop to be put to sleep
841(suspend-to-ram) before the bay device is ejected or inserted).
842The sequence of steps to eject the device is as follows:
843
844 echo eject > /proc/acpi/ibm/bay
845 put the ThinkPad to sleep
846 remove the drive
847 resume from sleep
848 cat /proc/acpi/ibm/bay should show that the drive was removed
849
850On the A3x, both the UltraBay 2000 and UltraBay Plus devices are
851supported. Use "eject2" instead of "eject" for the second bay.
852
853Note: the UltraBay eject support on the 600e/x, A22p and A3x is
854EXPERIMENTAL and may not work as expected. USE WITH CAUTION!
855
856
857CMOS/UCMS control 730CMOS/UCMS control
858----------------- 731-----------------
859 732
diff --git a/MAINTAINERS b/MAINTAINERS
index d6befb2c470f..b1114cfac6bf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4995,7 +4995,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
4995S: Maintained 4995S: Maintained
4996 4996
4997TTY LAYER 4997TTY LAYER
4998S: Orphan 4998M: Greg Kroah-Hartman <gregkh@suse.de>
4999S: Maintained
5000T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
4999F: drivers/char/tty_* 5001F: drivers/char/tty_*
5000F: drivers/serial/serial_core.c 5002F: drivers/serial/serial_core.c
5001F: include/linux/serial_core.h 5003F: include/linux/serial_core.h
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ae3e70cd1e14..e552e547cb93 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -553,7 +553,7 @@
553 * on most of those machines only handles cache transactions. 553 * on most of those machines only handles cache transactions.
554 */ 554 */
555 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 555 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
556 depi 1,12,1,\prot 556 depdi 1,12,1,\prot
557 557
558 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 558 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
559 convert_for_tlb_insert20 \pte 559 convert_for_tlb_insert20 \pte
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index ef5caf2e6ed0..61ee0eec4e69 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -86,8 +86,12 @@
86 * the bottom of the table, which has a maximum signed displacement of 86 * the bottom of the table, which has a maximum signed displacement of
87 * 0x3fff; however, since we're only going forward, this becomes 87 * 0x3fff; however, since we're only going forward, this becomes
88 * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have 88 * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
89 * at most 1023 entries */ 89 * at most 1023 entries.
90#define MAX_GOTS 1023 90 * To overcome this 14bit displacement with some kernel modules, we'll
91 * use instead the unusal 16bit displacement method (see reassemble_16a)
92 * which gives us a maximum positive displacement of 0x7fff, and as such
93 * allows us to allocate up to 4095 GOT entries. */
94#define MAX_GOTS 4095
91 95
92/* three functions to determine where in the module core 96/* three functions to determine where in the module core
93 * or init pieces the location is */ 97 * or init pieces the location is */
@@ -145,12 +149,40 @@ struct stub_entry {
145/* The reassemble_* functions prepare an immediate value for 149/* The reassemble_* functions prepare an immediate value for
146 insertion into an opcode. pa-risc uses all sorts of weird bitfields 150 insertion into an opcode. pa-risc uses all sorts of weird bitfields
147 in the instruction to hold the value. */ 151 in the instruction to hold the value. */
152static inline int sign_unext(int x, int len)
153{
154 int len_ones;
155
156 len_ones = (1 << len) - 1;
157 return x & len_ones;
158}
159
160static inline int low_sign_unext(int x, int len)
161{
162 int sign, temp;
163
164 sign = (x >> (len-1)) & 1;
165 temp = sign_unext(x, len-1);
166 return (temp << 1) | sign;
167}
168
148static inline int reassemble_14(int as14) 169static inline int reassemble_14(int as14)
149{ 170{
150 return (((as14 & 0x1fff) << 1) | 171 return (((as14 & 0x1fff) << 1) |
151 ((as14 & 0x2000) >> 13)); 172 ((as14 & 0x2000) >> 13));
152} 173}
153 174
175static inline int reassemble_16a(int as16)
176{
177 int s, t;
178
179 /* Unusual 16-bit encoding, for wide mode only. */
180 t = (as16 << 1) & 0xffff;
181 s = (as16 & 0x8000);
182 return (t ^ s ^ (s >> 1)) | (s >> 15);
183}
184
185
154static inline int reassemble_17(int as17) 186static inline int reassemble_17(int as17)
155{ 187{
156 return (((as17 & 0x10000) >> 16) | 188 return (((as17 & 0x10000) >> 16) |
@@ -407,6 +439,7 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
407 enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) 439 enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
408{ 440{
409 struct stub_entry *stub; 441 struct stub_entry *stub;
442 int __maybe_unused d;
410 443
411 /* initialize stub_offset to point in front of the section */ 444 /* initialize stub_offset to point in front of the section */
412 if (!me->arch.section[targetsec].stub_offset) { 445 if (!me->arch.section[targetsec].stub_offset) {
@@ -460,12 +493,19 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
460 */ 493 */
461 switch (stub_type) { 494 switch (stub_type) {
462 case ELF_STUB_GOT: 495 case ELF_STUB_GOT:
463 stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */ 496 d = get_got(me, value, addend);
497 if (d <= 15) {
498 /* Format 5 */
499 stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
500 stub->insns[0] |= low_sign_unext(d, 5) << 16;
501 } else {
502 /* Format 3 */
503 stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
504 stub->insns[0] |= reassemble_16a(d);
505 }
464 stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */ 506 stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
465 stub->insns[2] = 0xe820d000; /* bve (%r1) */ 507 stub->insns[2] = 0xe820d000; /* bve (%r1) */
466 stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */ 508 stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
467
468 stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff);
469 break; 509 break;
470 case ELF_STUB_MILLI: 510 case ELF_STUB_MILLI:
471 stub->insns[0] = 0x20200000; /* ldil 0,%r1 */ 511 stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index edc90f23e708..8406ed7f9926 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 33#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
34 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
35 35
36#define efi_ioremap(addr, size) ioremap_cache(addr, size) 36#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
37 37
38#else /* !CONFIG_X86_32 */ 38#else /* !CONFIG_X86_32 */
39 39
@@ -84,7 +84,8 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
84 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 84 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
85 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 85 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
86 86
87extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); 87extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
88 u32 type);
88 89
89#endif /* CONFIG_X86_32 */ 90#endif /* CONFIG_X86_32 */
90 91
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 2bdab21f0898..c6ccbe7e81ad 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void)
12{ 12{
13 unsigned long flags; 13 unsigned long flags;
14 14
15 /*
16 * Note: this needs to be "=r" not "=rm", because we have the
17 * stack offset from what gcc expects at the time the "pop" is
18 * executed, and so a memory reference with respect to the stack
19 * would end up using the wrong address.
20 */
15 asm volatile("# __raw_save_flags\n\t" 21 asm volatile("# __raw_save_flags\n\t"
16 "pushf ; pop %0" 22 "pushf ; pop %0"
17 : "=g" (flags) 23 : "=r" (flags)
18 : /* no input */ 24 : /* no input */
19 : "memory"); 25 : "memory");
20 26
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 341070f7ad5c..77a68505419a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -175,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
175#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 175#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
176 176
177#define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 177#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
178 ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 178 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
179 179
180#define UV_APIC_PNODE_SHIFT 6 180#define UV_APIC_PNODE_SHIFT 6
181 181
@@ -327,6 +327,7 @@ struct uv_blade_info {
327 unsigned short nr_possible_cpus; 327 unsigned short nr_possible_cpus;
328 unsigned short nr_online_cpus; 328 unsigned short nr_online_cpus;
329 unsigned short pnode; 329 unsigned short pnode;
330 short memory_nid;
330}; 331};
331extern struct uv_blade_info *uv_blade_info; 332extern struct uv_blade_info *uv_blade_info;
332extern short *uv_node_to_blade; 333extern short *uv_node_to_blade;
@@ -363,6 +364,12 @@ static inline int uv_blade_to_pnode(int bid)
363 return uv_blade_info[bid].pnode; 364 return uv_blade_info[bid].pnode;
364} 365}
365 366
367/* Nid of memory node on blade. -1 if no blade-local memory */
368static inline int uv_blade_to_memory_nid(int bid)
369{
370 return uv_blade_info[bid].memory_nid;
371}
372
366/* Determine the number of possible cpus on a blade */ 373/* Determine the number of possible cpus on a blade */
367static inline int uv_blade_nr_possible_cpus(int bid) 374static inline int uv_blade_nr_possible_cpus(int bid)
368{ 375{
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 2284a4812b68..d2ed6c5ddc80 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3793,6 +3793,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3793 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3793 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3794 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3794 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3795 3795
3796 if (cfg->move_in_progress)
3797 send_cleanup_vector(cfg);
3798
3796 return irq; 3799 return irq;
3797} 3800}
3798 3801
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 8e4cbb255c38..2ed4e2bb3b32 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -170,7 +170,7 @@ static unsigned long set_apic_id(unsigned int id)
170 170
171static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) 171static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
172{ 172{
173 return current_cpu_data.initial_apicid >> index_msb; 173 return initial_apicid >> index_msb;
174} 174}
175 175
176static void x2apic_send_IPI_self(int vector) 176static void x2apic_send_IPI_self(int vector)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index a284359627e7..0b631c6a2e00 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -162,7 +162,7 @@ static unsigned long set_apic_id(unsigned int id)
162 162
163static int x2apic_phys_pkg_id(int initial_apicid, int index_msb) 163static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
164{ 164{
165 return current_cpu_data.initial_apicid >> index_msb; 165 return initial_apicid >> index_msb;
166} 166}
167 167
168static void x2apic_send_IPI_self(int vector) 168static void x2apic_send_IPI_self(int vector)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 096d19aea2f7..832e908adcb5 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = {
261 .apic_id_registered = uv_apic_id_registered, 261 .apic_id_registered = uv_apic_id_registered,
262 262
263 .irq_delivery_mode = dest_Fixed, 263 .irq_delivery_mode = dest_Fixed,
264 .irq_dest_mode = 1, /* logical */ 264 .irq_dest_mode = 0, /* physical */
265 265
266 .target_cpus = uv_target_cpus, 266 .target_cpus = uv_target_cpus,
267 .disable_esr = 0, 267 .disable_esr = 0,
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
362 BUG(); 362 BUG();
363} 363}
364 364
365static __init void map_low_mmrs(void)
366{
367 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
368 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
369}
370
371enum map_type {map_wb, map_uc}; 365enum map_type {map_wb, map_uc};
372 366
373static __init void map_high(char *id, unsigned long base, int shift, 367static __init void map_high(char *id, unsigned long base, int shift,
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode)
395 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 389 map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
396} 390}
397 391
398static __init void map_config_high(int max_pnode)
399{
400 union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
401 int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
402
403 cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
404 if (cfg.s.enable)
405 map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
406}
407
408static __init void map_mmr_high(int max_pnode)
409{
410 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
411 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
412
413 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
414 if (mmr.s.enable)
415 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
416}
417
418static __init void map_mmioh_high(int max_pnode) 392static __init void map_mmioh_high(int max_pnode)
419{ 393{
420 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 394 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
@@ -566,8 +540,6 @@ void __init uv_system_init(void)
566 unsigned long mmr_base, present, paddr; 540 unsigned long mmr_base, present, paddr;
567 unsigned short pnode_mask; 541 unsigned short pnode_mask;
568 542
569 map_low_mmrs();
570
571 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 543 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
572 m_val = m_n_config.s.m_skt; 544 m_val = m_n_config.s.m_skt;
573 n_val = m_n_config.s.n_skt; 545 n_val = m_n_config.s.n_skt;
@@ -591,6 +563,8 @@ void __init uv_system_init(void)
591 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 563 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
592 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 564 uv_blade_info = kmalloc(bytes, GFP_KERNEL);
593 BUG_ON(!uv_blade_info); 565 BUG_ON(!uv_blade_info);
566 for (blade = 0; blade < uv_num_possible_blades(); blade++)
567 uv_blade_info[blade].memory_nid = -1;
594 568
595 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 569 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
596 570
@@ -629,6 +603,9 @@ void __init uv_system_init(void)
629 lcpu = uv_blade_info[blade].nr_possible_cpus; 603 lcpu = uv_blade_info[blade].nr_possible_cpus;
630 uv_blade_info[blade].nr_possible_cpus++; 604 uv_blade_info[blade].nr_possible_cpus++;
631 605
606 /* Any node on the blade, else will contain -1. */
607 uv_blade_info[blade].memory_nid = nid;
608
632 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; 609 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
633 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; 610 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
634 uv_cpu_hub_info(cpu)->m_val = m_val; 611 uv_cpu_hub_info(cpu)->m_val = m_val;
@@ -662,11 +639,10 @@ void __init uv_system_init(void)
662 pnode = (paddr >> m_val) & pnode_mask; 639 pnode = (paddr >> m_val) & pnode_mask;
663 blade = boot_pnode_to_blade(pnode); 640 blade = boot_pnode_to_blade(pnode);
664 uv_node_to_blade[nid] = blade; 641 uv_node_to_blade[nid] = blade;
642 max_pnode = max(pnode, max_pnode);
665 } 643 }
666 644
667 map_gru_high(max_pnode); 645 map_gru_high(max_pnode);
668 map_mmr_high(max_pnode);
669 map_config_high(max_pnode);
670 map_mmioh_high(max_pnode); 646 map_mmioh_high(max_pnode);
671 647
672 uv_cpu_init(); 648 uv_cpu_init();
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 79302e9a33a4..442b5508893f 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -811,7 +811,7 @@ static int apm_do_idle(void)
811 u8 ret = 0; 811 u8 ret = 0;
812 int idled = 0; 812 int idled = 0;
813 int polling; 813 int polling;
814 int err; 814 int err = 0;
815 815
816 polling = !!(current_thread_info()->status & TS_POLLING); 816 polling = !!(current_thread_info()->status & TS_POLLING);
817 if (polling) { 817 if (polling) {
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 96f7ac0bbf01..19ccf6d0dccf 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -512,7 +512,7 @@ void __init efi_enter_virtual_mode(void)
512 && end_pfn <= max_pfn_mapped)) 512 && end_pfn <= max_pfn_mapped))
513 va = __va(md->phys_addr); 513 va = __va(md->phys_addr);
514 else 514 else
515 va = efi_ioremap(md->phys_addr, size); 515 va = efi_ioremap(md->phys_addr, size, md->type);
516 516
517 md->virt_addr = (u64) (unsigned long) va; 517 md->virt_addr = (u64) (unsigned long) va;
518 518
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 22c3b7828c50..ac0621a7ac3d 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -98,10 +98,14 @@ void __init efi_call_phys_epilog(void)
98 early_runtime_code_mapping_set_exec(0); 98 early_runtime_code_mapping_set_exec(0);
99} 99}
100 100
101void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size) 101void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
102 u32 type)
102{ 103{
103 unsigned long last_map_pfn; 104 unsigned long last_map_pfn;
104 105
106 if (type == EFI_MEMORY_MAPPED_IO)
107 return ioremap(phys_addr, size);
108
105 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 109 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
106 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) 110 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
107 return NULL; 111 return NULL;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 8663afb56535..0d98a01cbdb2 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -602,7 +602,11 @@ ignore_int:
602#endif 602#endif
603 iret 603 iret
604 604
605.section .cpuinit.data,"wa" 605#ifndef CONFIG_HOTPLUG_CPU
606 __CPUINITDATA
607#else
608 __REFDATA
609#endif
606.align 4 610.align 4
607ENTRY(initial_code) 611ENTRY(initial_code)
608 .long i386_start_kernel 612 .long i386_start_kernel
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 508e982dd072..834c9da8bf9d 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -3,6 +3,7 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/efi.h> 5#include <linux/efi.h>
6#include <linux/dmi.h>
6#include <acpi/reboot.h> 7#include <acpi/reboot.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm/apic.h> 9#include <asm/apic.h>
@@ -17,7 +18,6 @@
17#include <asm/cpu.h> 18#include <asm/cpu.h>
18 19
19#ifdef CONFIG_X86_32 20#ifdef CONFIG_X86_32
20# include <linux/dmi.h>
21# include <linux/ctype.h> 21# include <linux/ctype.h>
22# include <linux/mc146818rtc.h> 22# include <linux/mc146818rtc.h>
23#else 23#else
@@ -404,6 +404,38 @@ EXPORT_SYMBOL(machine_real_restart);
404 404
405#endif /* CONFIG_X86_32 */ 405#endif /* CONFIG_X86_32 */
406 406
407/*
408 * Apple MacBook5,2 (2009 MacBook) needs reboot=p
409 */
410static int __init set_pci_reboot(const struct dmi_system_id *d)
411{
412 if (reboot_type != BOOT_CF9) {
413 reboot_type = BOOT_CF9;
414 printk(KERN_INFO "%s series board detected. "
415 "Selecting PCI-method for reboots.\n", d->ident);
416 }
417 return 0;
418}
419
420static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
421 { /* Handle problems with rebooting on Apple MacBook5,2 */
422 .callback = set_pci_reboot,
423 .ident = "Apple MacBook",
424 .matches = {
425 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
426 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
427 },
428 },
429 { }
430};
431
432static int __init pci_reboot_init(void)
433{
434 dmi_check_system(pci_reboot_dmi_table);
435 return 0;
436}
437core_initcall(pci_reboot_init);
438
407static inline void kb_wait(void) 439static inline void kb_wait(void)
408{ 440{
409 int i; 441 int i;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 59f31d2dd435..78d185d797de 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -393,8 +393,8 @@ SECTIONS
393 393
394 394
395#ifdef CONFIG_X86_32 395#ifdef CONFIG_X86_32
396ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 396. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
397 "kernel image bigger than KERNEL_IMAGE_SIZE") 397 "kernel image bigger than KERNEL_IMAGE_SIZE");
398#else 398#else
399/* 399/*
400 * Per-cpu symbols which need to be offset from __per_cpu_load 400 * Per-cpu symbols which need to be offset from __per_cpu_load
@@ -407,12 +407,12 @@ INIT_PER_CPU(irq_stack_union);
407/* 407/*
408 * Build-time check on the image size: 408 * Build-time check on the image size:
409 */ 409 */
410ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), 410. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
411 "kernel image bigger than KERNEL_IMAGE_SIZE") 411 "kernel image bigger than KERNEL_IMAGE_SIZE");
412 412
413#ifdef CONFIG_SMP 413#ifdef CONFIG_SMP
414ASSERT((per_cpu__irq_stack_union == 0), 414. = ASSERT((per_cpu__irq_stack_union == 0),
415 "irq_stack_union is not at start of per-cpu area"); 415 "irq_stack_union is not at start of per-cpu area");
416#endif 416#endif
417 417
418#endif /* CONFIG_X86_32 */ 418#endif /* CONFIG_X86_32 */
@@ -420,7 +420,7 @@ ASSERT((per_cpu__irq_stack_union == 0),
420#ifdef CONFIG_KEXEC 420#ifdef CONFIG_KEXEC
421#include <asm/kexec.h> 421#include <asm/kexec.h>
422 422
423ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, 423. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
424 "kexec control code size is too big") 424 "kexec control code size is too big");
425#endif 425#endif
426 426
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 1440b9c0547e..caa24aca8115 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -89,16 +89,13 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
89 rv.msrs = msrs; 89 rv.msrs = msrs;
90 rv.msr_no = msr_no; 90 rv.msr_no = msr_no;
91 91
92 preempt_disable(); 92 this_cpu = get_cpu();
93 /* 93
94 * FIXME: handle the CPU we're executing on separately for now until 94 if (cpumask_test_cpu(this_cpu, mask))
95 * smp_call_function_many has been fixed to not skip it. 95 __rdmsr_on_cpu(&rv);
96 */
97 this_cpu = raw_smp_processor_id();
98 smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
99 96
100 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); 97 smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
101 preempt_enable(); 98 put_cpu();
102} 99}
103EXPORT_SYMBOL(rdmsr_on_cpus); 100EXPORT_SYMBOL(rdmsr_on_cpus);
104 101
@@ -121,16 +118,13 @@ void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
121 rv.msrs = msrs; 118 rv.msrs = msrs;
122 rv.msr_no = msr_no; 119 rv.msr_no = msr_no;
123 120
124 preempt_disable(); 121 this_cpu = get_cpu();
125 /* 122
126 * FIXME: handle the CPU we're executing on separately for now until 123 if (cpumask_test_cpu(this_cpu, mask))
127 * smp_call_function_many has been fixed to not skip it. 124 __wrmsr_on_cpu(&rv);
128 */
129 this_cpu = raw_smp_processor_id();
130 smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
131 125
132 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); 126 smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
133 preempt_enable(); 127 put_cpu();
134} 128}
135EXPORT_SYMBOL(wrmsr_on_cpus); 129EXPORT_SYMBOL(wrmsr_on_cpus);
136 130
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1b734d7a8966..7e600c1962db 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -591,9 +591,12 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
591 unsigned int level; 591 unsigned int level;
592 pte_t *kpte, old_pte; 592 pte_t *kpte, old_pte;
593 593
594 if (cpa->flags & CPA_PAGES_ARRAY) 594 if (cpa->flags & CPA_PAGES_ARRAY) {
595 address = (unsigned long)page_address(cpa->pages[cpa->curpage]); 595 struct page *page = cpa->pages[cpa->curpage];
596 else if (cpa->flags & CPA_ARRAY) 596 if (unlikely(PageHighMem(page)))
597 return 0;
598 address = (unsigned long)page_address(page);
599 } else if (cpa->flags & CPA_ARRAY)
597 address = cpa->vaddr[cpa->curpage]; 600 address = cpa->vaddr[cpa->curpage];
598 else 601 else
599 address = *cpa->vaddr; 602 address = *cpa->vaddr;
@@ -697,9 +700,12 @@ static int cpa_process_alias(struct cpa_data *cpa)
697 * No need to redo, when the primary call touched the direct 700 * No need to redo, when the primary call touched the direct
698 * mapping already: 701 * mapping already:
699 */ 702 */
700 if (cpa->flags & CPA_PAGES_ARRAY) 703 if (cpa->flags & CPA_PAGES_ARRAY) {
701 vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]); 704 struct page *page = cpa->pages[cpa->curpage];
702 else if (cpa->flags & CPA_ARRAY) 705 if (unlikely(PageHighMem(page)))
706 return 0;
707 vaddr = (unsigned long)page_address(page);
708 } else if (cpa->flags & CPA_ARRAY)
703 vaddr = cpa->vaddr[cpa->curpage]; 709 vaddr = cpa->vaddr[cpa->curpage];
704 else 710 else
705 vaddr = *cpa->vaddr; 711 vaddr = *cpa->vaddr;
@@ -997,12 +1003,15 @@ EXPORT_SYMBOL(set_memory_array_uc);
997int _set_memory_wc(unsigned long addr, int numpages) 1003int _set_memory_wc(unsigned long addr, int numpages)
998{ 1004{
999 int ret; 1005 int ret;
1006 unsigned long addr_copy = addr;
1007
1000 ret = change_page_attr_set(&addr, numpages, 1008 ret = change_page_attr_set(&addr, numpages,
1001 __pgprot(_PAGE_CACHE_UC_MINUS), 0); 1009 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1002
1003 if (!ret) { 1010 if (!ret) {
1004 ret = change_page_attr_set(&addr, numpages, 1011 ret = change_page_attr_set_clr(&addr_copy, numpages,
1005 __pgprot(_PAGE_CACHE_WC), 0); 1012 __pgprot(_PAGE_CACHE_WC),
1013 __pgprot(_PAGE_CACHE_MASK),
1014 0, 0, NULL);
1006 } 1015 }
1007 return ret; 1016 return ret;
1008} 1017}
@@ -1119,7 +1128,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
1119 int free_idx; 1128 int free_idx;
1120 1129
1121 for (i = 0; i < addrinarray; i++) { 1130 for (i = 0; i < addrinarray; i++) {
1122 start = (unsigned long)page_address(pages[i]); 1131 if (PageHighMem(pages[i]))
1132 continue;
1133 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1123 end = start + PAGE_SIZE; 1134 end = start + PAGE_SIZE;
1124 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) 1135 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
1125 goto err_out; 1136 goto err_out;
@@ -1132,7 +1143,9 @@ int set_pages_array_uc(struct page **pages, int addrinarray)
1132err_out: 1143err_out:
1133 free_idx = i; 1144 free_idx = i;
1134 for (i = 0; i < free_idx; i++) { 1145 for (i = 0; i < free_idx; i++) {
1135 start = (unsigned long)page_address(pages[i]); 1146 if (PageHighMem(pages[i]))
1147 continue;
1148 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1136 end = start + PAGE_SIZE; 1149 end = start + PAGE_SIZE;
1137 free_memtype(start, end); 1150 free_memtype(start, end);
1138 } 1151 }
@@ -1161,7 +1174,9 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
1161 return retval; 1174 return retval;
1162 1175
1163 for (i = 0; i < addrinarray; i++) { 1176 for (i = 0; i < addrinarray; i++) {
1164 start = (unsigned long)page_address(pages[i]); 1177 if (PageHighMem(pages[i]))
1178 continue;
1179 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1165 end = start + PAGE_SIZE; 1180 end = start + PAGE_SIZE;
1166 free_memtype(start, end); 1181 free_memtype(start, end);
1167 } 1182 }
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index af8f9650058c..ed34f5e35999 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -329,7 +329,6 @@ void __init reserve_top_address(unsigned long reserve)
329 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", 329 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
330 (int)-reserve); 330 (int)-reserve);
331 __FIXADDR_TOP = -reserve - PAGE_SIZE; 331 __FIXADDR_TOP = -reserve - PAGE_SIZE;
332 __VMALLOC_RESERVE += reserve;
333#endif 332#endif
334} 333}
335 334
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 7a0f4aa4fa1e..9a62224cc278 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -38,6 +38,9 @@
38 38
39#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT 39#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
40 40
41#undef PREFIX
42#define PREFIX "ACPI:memory_hp:"
43
41ACPI_MODULE_NAME("acpi_memhotplug"); 44ACPI_MODULE_NAME("acpi_memhotplug");
42MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>"); 45MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
43MODULE_DESCRIPTION("Hotplug Mem Driver"); 46MODULE_DESCRIPTION("Hotplug Mem Driver");
@@ -153,6 +156,7 @@ acpi_memory_get_device(acpi_handle handle,
153 acpi_handle phandle; 156 acpi_handle phandle;
154 struct acpi_device *device = NULL; 157 struct acpi_device *device = NULL;
155 struct acpi_device *pdevice = NULL; 158 struct acpi_device *pdevice = NULL;
159 int result;
156 160
157 161
158 if (!acpi_bus_get_device(handle, &device) && device) 162 if (!acpi_bus_get_device(handle, &device) && device)
@@ -165,9 +169,9 @@ acpi_memory_get_device(acpi_handle handle,
165 } 169 }
166 170
167 /* Get the parent device */ 171 /* Get the parent device */
168 status = acpi_bus_get_device(phandle, &pdevice); 172 result = acpi_bus_get_device(phandle, &pdevice);
169 if (ACPI_FAILURE(status)) { 173 if (result) {
170 ACPI_EXCEPTION((AE_INFO, status, "Cannot get acpi bus device")); 174 printk(KERN_WARNING PREFIX "Cannot get acpi bus device");
171 return -EINVAL; 175 return -EINVAL;
172 } 176 }
173 177
@@ -175,9 +179,9 @@ acpi_memory_get_device(acpi_handle handle,
175 * Now add the notified device. This creates the acpi_device 179 * Now add the notified device. This creates the acpi_device
176 * and invokes .add function 180 * and invokes .add function
177 */ 181 */
178 status = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); 182 result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
179 if (ACPI_FAILURE(status)) { 183 if (result) {
180 ACPI_EXCEPTION((AE_INFO, status, "Cannot add acpi bus")); 184 printk(KERN_WARNING PREFIX "Cannot add acpi bus");
181 return -EINVAL; 185 return -EINVAL;
182 } 186 }
183 187
@@ -238,7 +242,12 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
238 num_enabled++; 242 num_enabled++;
239 continue; 243 continue;
240 } 244 }
241 245 /*
246 * If the memory block size is zero, please ignore it.
247 * Don't try to do the following memory hotplug flowchart.
248 */
249 if (!info->length)
250 continue;
242 if (node < 0) 251 if (node < 0)
243 node = memory_add_physaddr_to_nid(info->start_addr); 252 node = memory_add_physaddr_to_nid(info->start_addr);
244 253
@@ -253,8 +262,15 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
253 mem_device->state = MEMORY_INVALID_STATE; 262 mem_device->state = MEMORY_INVALID_STATE;
254 return -EINVAL; 263 return -EINVAL;
255 } 264 }
256 265 /*
257 return result; 266 * Sometimes the memory device will contain several memory blocks.
267 * When one memory block is hot-added to the system memory, it will
268 * be regarded as a success.
269 * Otherwise if the last memory block can't be hot-added to the system
270 * memory, it will be failure and the memory device can't be bound with
271 * driver.
272 */
273 return 0;
258} 274}
259 275
260static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) 276static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 544dcf834922..eb6f038b03d9 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -97,6 +97,7 @@
97#define AOPOBJ_OBJECT_INITIALIZED 0x08 97#define AOPOBJ_OBJECT_INITIALIZED 0x08
98#define AOPOBJ_SETUP_COMPLETE 0x10 98#define AOPOBJ_SETUP_COMPLETE 0x10
99#define AOPOBJ_SINGLE_DATUM 0x20 99#define AOPOBJ_SINGLE_DATUM 0x20
100#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
100 101
101/****************************************************************************** 102/******************************************************************************
102 * 103 *
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 584d766e6f12..b79978f7bc71 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -397,6 +397,30 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
397 status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node), 397 status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
398 extra_desc->extra.aml_length, 398 extra_desc->extra.aml_length,
399 extra_desc->extra.aml_start); 399 extra_desc->extra.aml_start);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
404 /* Validate the region address/length via the host OS */
405
406 status = acpi_os_validate_address(obj_desc->region.space_id,
407 obj_desc->region.address,
408 (acpi_size) obj_desc->region.length,
409 acpi_ut_get_node_name(node));
410
411 if (ACPI_FAILURE(status)) {
412 /*
413 * Invalid address/length. We will emit an error message and mark
414 * the region as invalid, so that it will cause an additional error if
415 * it is ever used. Then return AE_OK.
416 */
417 ACPI_EXCEPTION((AE_INFO, status,
418 "During address validation of OpRegion [%4.4s]",
419 node->name.ascii));
420 obj_desc->common.flags |= AOPOBJ_INVALID;
421 status = AE_OK;
422 }
423
400 return_ACPI_STATUS(status); 424 return_ACPI_STATUS(status);
401} 425}
402 426
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index d4075b821021..6687be167f5f 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -113,6 +113,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
113 } 113 }
114 } 114 }
115 115
116 /* Exit if Address/Length have been disallowed by the host OS */
117
118 if (rgn_desc->common.flags & AOPOBJ_INVALID) {
119 return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
120 }
121
116 /* 122 /*
117 * Exit now for SMBus address space, it has a non-linear address space 123 * Exit now for SMBus address space, it has a non-linear address space
118 * and the request cannot be directly validated 124 * and the request cannot be directly validated
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 71670719d61a..5691f165a952 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -189,11 +189,36 @@ acpi_status __init acpi_os_initialize(void)
189 return AE_OK; 189 return AE_OK;
190} 190}
191 191
192static void bind_to_cpu0(struct work_struct *work)
193{
194 set_cpus_allowed(current, cpumask_of_cpu(0));
195 kfree(work);
196}
197
198static void bind_workqueue(struct workqueue_struct *wq)
199{
200 struct work_struct *work;
201
202 work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
203 INIT_WORK(work, bind_to_cpu0);
204 queue_work(wq, work);
205}
206
192acpi_status acpi_os_initialize1(void) 207acpi_status acpi_os_initialize1(void)
193{ 208{
209 /*
210 * On some machines, a software-initiated SMI causes corruption unless
211 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
212 * typically it's done in GPE-related methods that are run via
213 * workqueues, so we can avoid the known corruption cases by binding
214 * the workqueues to CPU 0.
215 */
194 kacpid_wq = create_singlethread_workqueue("kacpid"); 216 kacpid_wq = create_singlethread_workqueue("kacpid");
217 bind_workqueue(kacpid_wq);
195 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); 218 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
219 bind_workqueue(kacpi_notify_wq);
196 kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); 220 kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
221 bind_workqueue(kacpi_hotplug_wq);
197 BUG_ON(!kacpid_wq); 222 BUG_ON(!kacpid_wq);
198 BUG_ON(!kacpi_notify_wq); 223 BUG_ON(!kacpi_notify_wq);
199 BUG_ON(!kacpi_hotplug_wq); 224 BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 0944daec064f..9c61ab2177cf 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -121,7 +121,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
121 table_attr->attr.size = 0; 121 table_attr->attr.size = 0;
122 table_attr->attr.read = acpi_table_show; 122 table_attr->attr.read = acpi_table_show;
123 table_attr->attr.attr.name = table_attr->name; 123 table_attr->attr.attr.name = table_attr->name;
124 table_attr->attr.attr.mode = 0444; 124 table_attr->attr.attr.mode = 0400;
125 125
126 return; 126 return;
127} 127}
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index f4bb43fb8016..e077701ae3d9 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -225,7 +225,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
225 .configure = parisc_agp_configure, 225 .configure = parisc_agp_configure,
226 .fetch_size = parisc_agp_fetch_size, 226 .fetch_size = parisc_agp_fetch_size,
227 .tlb_flush = parisc_agp_tlbflush, 227 .tlb_flush = parisc_agp_tlbflush,
228 .mask_memory = parisc_agp_mask_memory, 228 .mask_memory = parisc_agp_page_mask_memory,
229 .masks = parisc_agp_masks, 229 .masks = parisc_agp_masks,
230 .agp_enable = parisc_agp_enable, 230 .agp_enable = parisc_agp_enable,
231 .cache_flush = global_cache_flush, 231 .cache_flush = global_cache_flush,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b90eda8b3440..fd69086d08d5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
858 858
859 /* Check for existing affected CPUs. 859 /* Check for existing affected CPUs.
860 * They may not be aware of it due to CPU Hotplug. 860 * They may not be aware of it due to CPU Hotplug.
861 * cpufreq_cpu_put is called when the device is removed
862 * in __cpufreq_remove_dev()
861 */ 863 */
862 managed_policy = cpufreq_cpu_get(j); 864 managed_policy = cpufreq_cpu_get(j);
863 if (unlikely(managed_policy)) { 865 if (unlikely(managed_policy)) {
@@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
884 ret = sysfs_create_link(&sys_dev->kobj, 886 ret = sysfs_create_link(&sys_dev->kobj,
885 &managed_policy->kobj, 887 &managed_policy->kobj,
886 "cpufreq"); 888 "cpufreq");
887 if (!ret) 889 if (ret)
888 cpufreq_cpu_put(managed_policy); 890 cpufreq_cpu_put(managed_policy);
889 /* 891 /*
890 * Success. We only needed to be added to the mask. 892 * Success. We only needed to be added to the mask.
@@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
924 926
925 spin_lock_irqsave(&cpufreq_driver_lock, flags); 927 spin_lock_irqsave(&cpufreq_driver_lock, flags);
926 for_each_cpu(j, policy->cpus) { 928 for_each_cpu(j, policy->cpus) {
929 if (!cpu_online(j))
930 continue;
927 per_cpu(cpufreq_cpu_data, j) = policy; 931 per_cpu(cpufreq_cpu_data, j) = policy;
928 per_cpu(policy_cpu, j) = policy->cpu; 932 per_cpu(policy_cpu, j) = policy->cpu;
929 } 933 }
@@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get);
1244 1248
1245static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) 1249static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1246{ 1250{
1247 int cpu = sysdev->id;
1248 int ret = 0; 1251 int ret = 0;
1252
1253#ifdef __powerpc__
1254 int cpu = sysdev->id;
1249 unsigned int cur_freq = 0; 1255 unsigned int cur_freq = 0;
1250 struct cpufreq_policy *cpu_policy; 1256 struct cpufreq_policy *cpu_policy;
1251 1257
1252 dprintk("suspending cpu %u\n", cpu); 1258 dprintk("suspending cpu %u\n", cpu);
1253 1259
1260 /*
1261 * This whole bogosity is here because Powerbooks are made of fail.
1262 * No sane platform should need any of the code below to be run.
1263 * (it's entirely the wrong thing to do, as driver->get may
1264 * reenable interrupts on some architectures).
1265 */
1266
1254 if (!cpu_online(cpu)) 1267 if (!cpu_online(cpu))
1255 return 0; 1268 return 0;
1256 1269
@@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1309 1322
1310out: 1323out:
1311 cpufreq_cpu_put(cpu_policy); 1324 cpufreq_cpu_put(cpu_policy);
1325#endif /* __powerpc__ */
1312 return ret; 1326 return ret;
1313} 1327}
1314 1328
@@ -1322,12 +1336,18 @@ out:
1322 */ 1336 */
1323static int cpufreq_resume(struct sys_device *sysdev) 1337static int cpufreq_resume(struct sys_device *sysdev)
1324{ 1338{
1325 int cpu = sysdev->id;
1326 int ret = 0; 1339 int ret = 0;
1340
1341#ifdef __powerpc__
1342 int cpu = sysdev->id;
1327 struct cpufreq_policy *cpu_policy; 1343 struct cpufreq_policy *cpu_policy;
1328 1344
1329 dprintk("resuming cpu %u\n", cpu); 1345 dprintk("resuming cpu %u\n", cpu);
1330 1346
1347 /* As with the ->suspend method, all the code below is
1348 * only necessary because Powerbooks suck.
1349 * See commit 42d4dc3f4e1e for jokes. */
1350
1331 if (!cpu_online(cpu)) 1351 if (!cpu_online(cpu))
1332 return 0; 1352 return 0;
1333 1353
@@ -1391,6 +1411,7 @@ out:
1391 schedule_work(&cpu_policy->update); 1411 schedule_work(&cpu_policy->update);
1392fail: 1412fail:
1393 cpufreq_cpu_put(cpu_policy); 1413 cpufreq_cpu_put(cpu_policy);
1414#endif /* __powerpc__ */
1394 return ret; 1415 return ret;
1395} 1416}
1396 1417
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 57490502b21c..bdea7e2f94ba 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -63,6 +63,7 @@ struct cpu_dbs_info_s {
63 unsigned int down_skip; 63 unsigned int down_skip;
64 unsigned int requested_freq; 64 unsigned int requested_freq;
65 int cpu; 65 int cpu;
66 unsigned int enable:1;
66 /* 67 /*
67 * percpu mutex that serializes governor limit change with 68 * percpu mutex that serializes governor limit change with
68 * do_dbs_timer invocation. We do not want do_dbs_timer to run 69 * do_dbs_timer invocation. We do not want do_dbs_timer to run
@@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
141 142
142 struct cpufreq_policy *policy; 143 struct cpufreq_policy *policy;
143 144
145 if (!this_dbs_info->enable)
146 return 0;
147
144 policy = this_dbs_info->cur_policy; 148 policy = this_dbs_info->cur_policy;
145 149
146 /* 150 /*
@@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
497 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 501 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
498 delay -= jiffies % delay; 502 delay -= jiffies % delay;
499 503
504 dbs_info->enable = 1;
500 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 505 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
501 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, 506 queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
502 delay); 507 delay);
@@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
504 509
505static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 510static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
506{ 511{
512 dbs_info->enable = 0;
507 cancel_delayed_work_sync(&dbs_info->work); 513 cancel_delayed_work_sync(&dbs_info->work);
508} 514}
509 515
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 24964c1d0af9..e2a10bcba7a1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -868,6 +868,8 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
868 goto err_reg; 868 goto err_reg;
869 } 869 }
870 870
871 return;
872
871err_reg: 873err_reg:
872 debugf0("Error reading F2x%03x.\n", reg); 874 debugf0("Error reading F2x%03x.\n", reg);
873} 875}
@@ -2634,6 +2636,8 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2634 2636
2635 amd64_dump_misc_regs(pvt); 2637 amd64_dump_misc_regs(pvt);
2636 2638
2639 return;
2640
2637err_reg: 2641err_reg:
2638 debugf0("Reading an MC register failed\n"); 2642 debugf0("Reading an MC register failed\n");
2639 2643
@@ -2977,6 +2981,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2977 "ECC is enabled by BIOS, Proceeding " 2981 "ECC is enabled by BIOS, Proceeding "
2978 "with EDAC module initialization\n"); 2982 "with EDAC module initialization\n");
2979 2983
2984 /* Signal good ECC status */
2985 ret = 0;
2986
2980 /* CLEAR the override, since BIOS controlled it */ 2987 /* CLEAR the override, since BIOS controlled it */
2981 ecc_enable_override = 0; 2988 ecc_enable_override = 0;
2982 } 2989 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8fab7890a363..33be210d6723 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1461,7 +1461,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1461 goto out; 1461 goto out;
1462 } 1462 }
1463 1463
1464 if (crtc_req->count_connectors > 0 && !mode && !fb) { 1464 if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
1465 DRM_DEBUG("Count connectors is %d but no mode or fb set\n", 1465 DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
1466 crtc_req->count_connectors); 1466 crtc_req->count_connectors);
1467 ret = -EINVAL; 1467 ret = -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3da9cfa31848..6aaa2cb23365 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
706 struct drm_encoder **save_encoders, *new_encoder; 706 struct drm_encoder **save_encoders, *new_encoder;
707 struct drm_framebuffer *old_fb = NULL; 707 struct drm_framebuffer *old_fb = NULL;
708 bool save_enabled; 708 bool save_enabled;
709 bool mode_changed = false; 709 bool mode_changed = false; /* if true do a full mode set */
710 bool fb_changed = false; 710 bool fb_changed = false; /* if true and !mode_changed just do a flip */
711 struct drm_connector *connector; 711 struct drm_connector *connector;
712 int count = 0, ro, fail = 0; 712 int count = 0, ro, fail = 0;
713 struct drm_crtc_helper_funcs *crtc_funcs; 713 struct drm_crtc_helper_funcs *crtc_funcs;
@@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
758 if (set->crtc->fb == NULL) { 758 if (set->crtc->fb == NULL) {
759 DRM_DEBUG("crtc has no fb, full mode set\n"); 759 DRM_DEBUG("crtc has no fb, full mode set\n");
760 mode_changed = true; 760 mode_changed = true;
761 } else if (set->fb == NULL) {
762 mode_changed = true;
761 } else if ((set->fb->bits_per_pixel != 763 } else if ((set->fb->bits_per_pixel !=
762 set->crtc->fb->bits_per_pixel) || 764 set->crtc->fb->bits_per_pixel) ||
763 set->fb->depth != set->crtc->fb->depth) 765 set->fb->depth != set->crtc->fb->depth)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 05a44896dffb..f1ba8ff41130 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -722,13 +722,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
722 unsigned idx) 722 unsigned idx)
723{ 723{
724 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 724 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
725 uint32_t header = ib_chunk->kdata[idx]; 725 uint32_t header;
726 726
727 if (idx >= ib_chunk->length_dw) { 727 if (idx >= ib_chunk->length_dw) {
728 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 728 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
729 idx, ib_chunk->length_dw); 729 idx, ib_chunk->length_dw);
730 return -EINVAL; 730 return -EINVAL;
731 } 731 }
732 header = ib_chunk->kdata[idx];
732 pkt->idx = idx; 733 pkt->idx = idx;
733 pkt->type = CP_PACKET_GET_TYPE(header); 734 pkt->type = CP_PACKET_GET_TYPE(header);
734 pkt->count = CP_PACKET_GET_COUNT(header); 735 pkt->count = CP_PACKET_GET_COUNT(header);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 3cfcee17dc56..0bd5879a4957 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -318,6 +318,14 @@ static int __init radeon_init(void)
318 driver = &driver_old; 318 driver = &driver_old;
319 driver->num_ioctls = radeon_max_ioctl; 319 driver->num_ioctls = radeon_max_ioctl;
320#if defined(CONFIG_DRM_RADEON_KMS) 320#if defined(CONFIG_DRM_RADEON_KMS)
321#ifdef CONFIG_VGA_CONSOLE
322 if (vgacon_text_force() && radeon_modeset == -1) {
323 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
324 driver = &driver_old;
325 driver->driver_features &= ~DRIVER_MODESET;
326 radeon_modeset = 0;
327 }
328#endif
321 /* if enabled by default */ 329 /* if enabled by default */
322 if (radeon_modeset == -1) { 330 if (radeon_modeset == -1) {
323 DRM_INFO("radeon default to kernel modesetting.\n"); 331 DRM_INFO("radeon default to kernel modesetting.\n");
@@ -329,17 +337,8 @@ static int __init radeon_init(void)
329 driver->driver_features |= DRIVER_MODESET; 337 driver->driver_features |= DRIVER_MODESET;
330 driver->num_ioctls = radeon_max_kms_ioctl; 338 driver->num_ioctls = radeon_max_kms_ioctl;
331 } 339 }
332
333 /* if the vga console setting is enabled still 340 /* if the vga console setting is enabled still
334 * let modprobe override it */ 341 * let modprobe override it */
335#ifdef CONFIG_VGA_CONSOLE
336 if (vgacon_text_force() && radeon_modeset == -1) {
337 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
338 driver = &driver_old;
339 driver->driver_features &= ~DRIVER_MODESET;
340 radeon_modeset = 0;
341 }
342#endif
343#endif 342#endif
344 return drm_init(driver); 343 return drm_init(driver);
345} 344}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 937a2f1cdb46..3357110e30ce 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
58 if (r) { 58 if (r) {
59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); 59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
60 radeon_device_fini(rdev); 60 radeon_device_fini(rdev);
61 kfree(rdev);
62 dev->dev_private = NULL;
61 return r; 63 return r;
62 } 64 }
63 return 0; 65 return 0;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 551e608702e4..fd8f3ca716ea 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -370,6 +370,7 @@ void rv515_vram_info(struct radeon_device *rdev)
370 370
371 rv515_vram_get_type(rdev); 371 rv515_vram_get_type(rdev);
372 372
373 r100_vram_init_sizes(rdev);
373 /* FIXME: we should enforce default clock in case GPU is not in 374 /* FIXME: we should enforce default clock in case GPU is not in
374 * default setup 375 * default setup
375 */ 376 */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6538d4236989..c2b0d710d10f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1182,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1182 1182
1183int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1183int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1184{ 1184{
1185 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1185 struct ttm_mem_type_manager *man;
1186 int ret = -EINVAL; 1186 int ret = -EINVAL;
1187 1187
1188 if (mem_type >= TTM_NUM_MEM_TYPES) { 1188 if (mem_type >= TTM_NUM_MEM_TYPES) {
1189 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); 1189 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1190 return ret; 1190 return ret;
1191 } 1191 }
1192 man = &bdev->man[mem_type];
1192 1193
1193 if (!man->has_type) { 1194 if (!man->has_type) {
1194 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " 1195 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
@@ -1575,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1575 driver->sync_obj_unref(&sync_obj); 1576 driver->sync_obj_unref(&sync_obj);
1576 driver->sync_obj_unref(&tmp_obj); 1577 driver->sync_obj_unref(&tmp_obj);
1577 spin_lock(&bo->lock); 1578 spin_lock(&bo->lock);
1579 } else {
1580 spin_unlock(&bo->lock);
1581 driver->sync_obj_unref(&sync_obj);
1582 spin_lock(&bo->lock);
1578 } 1583 }
1579 } 1584 }
1580 return 0; 1585 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ce2e6f38ea01..ad4ada07c6cf 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -150,7 +150,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
150#ifdef CONFIG_X86 150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot); 151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else 152#else
153 if (prot != PAGE_KERNEL) 153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
154 dst = vmap(&d, 1, 0, prot); 154 dst = vmap(&d, 1, 0, prot);
155 else 155 else
156 dst = kmap(d); 156 dst = kmap(d);
@@ -163,7 +163,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
163#ifdef CONFIG_X86 163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0); 164 kunmap_atomic(dst, KM_USER0);
165#else 165#else
166 if (prot != PAGE_KERNEL) 166 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
167 vunmap(dst); 167 vunmap(dst);
168 else 168 else
169 kunmap(d); 169 kunmap(d);
@@ -186,7 +186,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
186#ifdef CONFIG_X86 186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot); 187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else 188#else
189 if (prot != PAGE_KERNEL) 189 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190 src = vmap(&s, 1, 0, prot); 190 src = vmap(&s, 1, 0, prot);
191 else 191 else
192 src = kmap(s); 192 src = kmap(s);
@@ -199,7 +199,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
199#ifdef CONFIG_X86 199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0); 200 kunmap_atomic(src, KM_USER0);
201#else 201#else
202 if (prot != PAGE_KERNEL) 202 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
203 vunmap(src); 203 vunmap(src);
204 else 204 else
205 kunmap(s); 205 kunmap(s);
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index b587e2d576ac..820e51673b26 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
296 priv->tseq[3] = 0; 296 priv->tseq[3] = 0;
297 if (mlc->opacket & HIL_CTRL_APE) { 297 if (mlc->opacket & HIL_CTRL_APE) {
298 priv->tseq[3] |= HP_SDC_LPC_APE_IPF; 298 priv->tseq[3] |= HP_SDC_LPC_APE_IPF;
299 down_trylock(&mlc->csem); 299 BUG_ON(down_trylock(&mlc->csem));
300 } 300 }
301 enqueue: 301 enqueue:
302 hp_sdc_enqueue_transaction(&priv->trans); 302 hp_sdc_enqueue_transaction(&priv->trans);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index c3b661a666cb..7e5f30dbc0a0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1480,7 +1480,7 @@ l1oip_init(void)
1480 return -ENOMEM; 1480 return -ENOMEM;
1481 1481
1482 l1oip_cnt = 0; 1482 l1oip_cnt = 0;
1483 while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) { 1483 while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) {
1484 switch (type[l1oip_cnt] & 0xff) { 1484 switch (type[l1oip_cnt] & 0xff) {
1485 case 1: 1485 case 1:
1486 pri = 0; 1486 pri = 0;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5810fa906af0..5fe39c2a3d2b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -220,6 +220,7 @@ static int linear_run (mddev_t *mddev)
220 mddev->queue->unplug_fn = linear_unplug; 220 mddev->queue->unplug_fn = linear_unplug;
221 mddev->queue->backing_dev_info.congested_fn = linear_congested; 221 mddev->queue->backing_dev_info.congested_fn = linear_congested;
222 mddev->queue->backing_dev_info.congested_data = mddev; 222 mddev->queue->backing_dev_info.congested_data = mddev;
223 md_integrity_register(mddev);
223 return 0; 224 return 0;
224} 225}
225 226
@@ -256,6 +257,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
256 rcu_assign_pointer(mddev->private, newconf); 257 rcu_assign_pointer(mddev->private, newconf);
257 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 258 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
258 set_capacity(mddev->gendisk, mddev->array_sectors); 259 set_capacity(mddev->gendisk, mddev->array_sectors);
260 revalidate_disk(mddev->gendisk);
259 call_rcu(&oldconf->rcu, free_conf); 261 call_rcu(&oldconf->rcu, free_conf);
260 return 0; 262 return 0;
261} 263}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d4351ff0849f..5b98bea4ff9b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1308,7 +1308,12 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1308 } 1308 }
1309 if (mddev->level != LEVEL_MULTIPATH) { 1309 if (mddev->level != LEVEL_MULTIPATH) {
1310 int role; 1310 int role;
1311 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1311 if (rdev->desc_nr < 0 ||
1312 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1313 role = 0xffff;
1314 rdev->desc_nr = -1;
1315 } else
1316 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1312 switch(role) { 1317 switch(role) {
1313 case 0xffff: /* spare */ 1318 case 0xffff: /* spare */
1314 break; 1319 break;
@@ -1394,8 +1399,14 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1394 if (rdev2->desc_nr+1 > max_dev) 1399 if (rdev2->desc_nr+1 > max_dev)
1395 max_dev = rdev2->desc_nr+1; 1400 max_dev = rdev2->desc_nr+1;
1396 1401
1397 if (max_dev > le32_to_cpu(sb->max_dev)) 1402 if (max_dev > le32_to_cpu(sb->max_dev)) {
1403 int bmask;
1398 sb->max_dev = cpu_to_le32(max_dev); 1404 sb->max_dev = cpu_to_le32(max_dev);
1405 rdev->sb_size = max_dev * 2 + 256;
1406 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1407 if (rdev->sb_size & bmask)
1408 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1409 }
1399 for (i=0; i<max_dev;i++) 1410 for (i=0; i<max_dev;i++)
1400 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1411 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1401 1412
@@ -1487,37 +1498,76 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1487 1498
1488static LIST_HEAD(pending_raid_disks); 1499static LIST_HEAD(pending_raid_disks);
1489 1500
1490static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev) 1501/*
1502 * Try to register data integrity profile for an mddev
1503 *
1504 * This is called when an array is started and after a disk has been kicked
1505 * from the array. It only succeeds if all working and active component devices
1506 * are integrity capable with matching profiles.
1507 */
1508int md_integrity_register(mddev_t *mddev)
1509{
1510 mdk_rdev_t *rdev, *reference = NULL;
1511
1512 if (list_empty(&mddev->disks))
1513 return 0; /* nothing to do */
1514 if (blk_get_integrity(mddev->gendisk))
1515 return 0; /* already registered */
1516 list_for_each_entry(rdev, &mddev->disks, same_set) {
1517 /* skip spares and non-functional disks */
1518 if (test_bit(Faulty, &rdev->flags))
1519 continue;
1520 if (rdev->raid_disk < 0)
1521 continue;
1522 /*
1523 * If at least one rdev is not integrity capable, we can not
1524 * enable data integrity for the md device.
1525 */
1526 if (!bdev_get_integrity(rdev->bdev))
1527 return -EINVAL;
1528 if (!reference) {
1529 /* Use the first rdev as the reference */
1530 reference = rdev;
1531 continue;
1532 }
1533 /* does this rdev's profile match the reference profile? */
1534 if (blk_integrity_compare(reference->bdev->bd_disk,
1535 rdev->bdev->bd_disk) < 0)
1536 return -EINVAL;
1537 }
1538 /*
1539 * All component devices are integrity capable and have matching
1540 * profiles, register the common profile for the md device.
1541 */
1542 if (blk_integrity_register(mddev->gendisk,
1543 bdev_get_integrity(reference->bdev)) != 0) {
1544 printk(KERN_ERR "md: failed to register integrity for %s\n",
1545 mdname(mddev));
1546 return -EINVAL;
1547 }
1548 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1549 mdname(mddev));
1550 return 0;
1551}
1552EXPORT_SYMBOL(md_integrity_register);
1553
1554/* Disable data integrity if non-capable/non-matching disk is being added */
1555void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1491{ 1556{
1492 struct mdk_personality *pers = mddev->pers;
1493 struct gendisk *disk = mddev->gendisk;
1494 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1557 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1495 struct blk_integrity *bi_mddev = blk_get_integrity(disk); 1558 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1496 1559
1497 /* Data integrity passthrough not supported on RAID 4, 5 and 6 */ 1560 if (!bi_mddev) /* nothing to do */
1498 if (pers && pers->level >= 4 && pers->level <= 6)
1499 return; 1561 return;
1500 1562 if (rdev->raid_disk < 0) /* skip spares */
1501 /* If rdev is integrity capable, register profile for mddev */
1502 if (!bi_mddev && bi_rdev) {
1503 if (blk_integrity_register(disk, bi_rdev))
1504 printk(KERN_ERR "%s: %s Could not register integrity!\n",
1505 __func__, disk->disk_name);
1506 else
1507 printk(KERN_NOTICE "Enabling data integrity on %s\n",
1508 disk->disk_name);
1509 return; 1563 return;
1510 } 1564 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1511 1565 rdev->bdev->bd_disk) >= 0)
1512 /* Check that mddev and rdev have matching profiles */ 1566 return;
1513 if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) { 1567 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1514 printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__, 1568 blk_integrity_unregister(mddev->gendisk);
1515 disk->disk_name, rdev->bdev->bd_disk->disk_name);
1516 printk(KERN_NOTICE "Disabling data integrity on %s\n",
1517 disk->disk_name);
1518 blk_integrity_unregister(disk);
1519 }
1520} 1569}
1570EXPORT_SYMBOL(md_integrity_add_rdev);
1521 1571
1522static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1572static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1523{ 1573{
@@ -1591,7 +1641,6 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1591 /* May as well allow recovery to be retried once */ 1641 /* May as well allow recovery to be retried once */
1592 mddev->recovery_disabled = 0; 1642 mddev->recovery_disabled = 0;
1593 1643
1594 md_integrity_check(rdev, mddev);
1595 return 0; 1644 return 0;
1596 1645
1597 fail: 1646 fail:
@@ -2657,6 +2706,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2657 ssize_t rv = len; 2706 ssize_t rv = len;
2658 struct mdk_personality *pers; 2707 struct mdk_personality *pers;
2659 void *priv; 2708 void *priv;
2709 mdk_rdev_t *rdev;
2660 2710
2661 if (mddev->pers == NULL) { 2711 if (mddev->pers == NULL) {
2662 if (len == 0) 2712 if (len == 0)
@@ -2736,6 +2786,12 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2736 mddev_suspend(mddev); 2786 mddev_suspend(mddev);
2737 mddev->pers->stop(mddev); 2787 mddev->pers->stop(mddev);
2738 module_put(mddev->pers->owner); 2788 module_put(mddev->pers->owner);
2789 /* Invalidate devices that are now superfluous */
2790 list_for_each_entry(rdev, &mddev->disks, same_set)
2791 if (rdev->raid_disk >= mddev->raid_disks) {
2792 rdev->raid_disk = -1;
2793 clear_bit(In_sync, &rdev->flags);
2794 }
2739 mddev->pers = pers; 2795 mddev->pers = pers;
2740 mddev->private = priv; 2796 mddev->private = priv;
2741 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2797 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
@@ -3685,17 +3741,8 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
3685 3741
3686 mddev->array_sectors = sectors; 3742 mddev->array_sectors = sectors;
3687 set_capacity(mddev->gendisk, mddev->array_sectors); 3743 set_capacity(mddev->gendisk, mddev->array_sectors);
3688 if (mddev->pers) { 3744 if (mddev->pers)
3689 struct block_device *bdev = bdget_disk(mddev->gendisk, 0); 3745 revalidate_disk(mddev->gendisk);
3690
3691 if (bdev) {
3692 mutex_lock(&bdev->bd_inode->i_mutex);
3693 i_size_write(bdev->bd_inode,
3694 (loff_t)mddev->array_sectors << 9);
3695 mutex_unlock(&bdev->bd_inode->i_mutex);
3696 bdput(bdev);
3697 }
3698 }
3699 3746
3700 return len; 3747 return len;
3701} 3748}
@@ -4048,10 +4095,6 @@ static int do_md_run(mddev_t * mddev)
4048 } 4095 }
4049 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4096 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4050 4097
4051 if (pers->level >= 4 && pers->level <= 6)
4052 /* Cannot support integrity (yet) */
4053 blk_integrity_unregister(mddev->gendisk);
4054
4055 if (mddev->reshape_position != MaxSector && 4098 if (mddev->reshape_position != MaxSector &&
4056 pers->start_reshape == NULL) { 4099 pers->start_reshape == NULL) {
4057 /* This personality cannot handle reshaping... */ 4100 /* This personality cannot handle reshaping... */
@@ -4189,6 +4232,7 @@ static int do_md_run(mddev_t * mddev)
4189 md_wakeup_thread(mddev->thread); 4232 md_wakeup_thread(mddev->thread);
4190 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4233 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4191 4234
4235 revalidate_disk(mddev->gendisk);
4192 mddev->changed = 1; 4236 mddev->changed = 1;
4193 md_new_event(mddev); 4237 md_new_event(mddev);
4194 sysfs_notify_dirent(mddev->sysfs_state); 4238 sysfs_notify_dirent(mddev->sysfs_state);
@@ -5087,18 +5131,8 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
5087 return -ENOSPC; 5131 return -ENOSPC;
5088 } 5132 }
5089 rv = mddev->pers->resize(mddev, num_sectors); 5133 rv = mddev->pers->resize(mddev, num_sectors);
5090 if (!rv) { 5134 if (!rv)
5091 struct block_device *bdev; 5135 revalidate_disk(mddev->gendisk);
5092
5093 bdev = bdget_disk(mddev->gendisk, 0);
5094 if (bdev) {
5095 mutex_lock(&bdev->bd_inode->i_mutex);
5096 i_size_write(bdev->bd_inode,
5097 (loff_t)mddev->array_sectors << 9);
5098 mutex_unlock(&bdev->bd_inode->i_mutex);
5099 bdput(bdev);
5100 }
5101 }
5102 return rv; 5136 return rv;
5103} 5137}
5104 5138
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 9430a110db93..78f03168baf9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -431,5 +431,7 @@ extern int md_allow_write(mddev_t *mddev);
431extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 431extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
432extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 432extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
433extern int md_check_no_bitmap(mddev_t *mddev); 433extern int md_check_no_bitmap(mddev_t *mddev);
434extern int md_integrity_register(mddev_t *mddev);
435void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
434 436
435#endif /* _MD_MD_H */ 437#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 237fe3fd235c..7140909f6662 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -313,6 +313,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
313 set_bit(In_sync, &rdev->flags); 313 set_bit(In_sync, &rdev->flags);
314 rcu_assign_pointer(p->rdev, rdev); 314 rcu_assign_pointer(p->rdev, rdev);
315 err = 0; 315 err = 0;
316 md_integrity_add_rdev(rdev, mddev);
316 break; 317 break;
317 } 318 }
318 319
@@ -345,7 +346,9 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
345 /* lost the race, try later */ 346 /* lost the race, try later */
346 err = -EBUSY; 347 err = -EBUSY;
347 p->rdev = rdev; 348 p->rdev = rdev;
349 goto abort;
348 } 350 }
351 md_integrity_register(mddev);
349 } 352 }
350abort: 353abort:
351 354
@@ -519,7 +522,7 @@ static int multipath_run (mddev_t *mddev)
519 mddev->queue->unplug_fn = multipath_unplug; 522 mddev->queue->unplug_fn = multipath_unplug;
520 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 523 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
521 mddev->queue->backing_dev_info.congested_data = mddev; 524 mddev->queue->backing_dev_info.congested_data = mddev;
522 525 md_integrity_register(mddev);
523 return 0; 526 return 0;
524 527
525out_free_conf: 528out_free_conf:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 335f490dcad6..898e2bdfee47 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -351,6 +351,7 @@ static int raid0_run(mddev_t *mddev)
351 351
352 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 352 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
353 dump_zones(mddev); 353 dump_zones(mddev);
354 md_integrity_register(mddev);
354 return 0; 355 return 0;
355} 356}
356 357
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0569efba0c02..8726fd7ebce5 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1144,7 +1144,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1144 rcu_assign_pointer(p->rdev, rdev); 1144 rcu_assign_pointer(p->rdev, rdev);
1145 break; 1145 break;
1146 } 1146 }
1147 1147 md_integrity_add_rdev(rdev, mddev);
1148 print_conf(conf); 1148 print_conf(conf);
1149 return err; 1149 return err;
1150} 1150}
@@ -1178,7 +1178,9 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1178 /* lost the race, try later */ 1178 /* lost the race, try later */
1179 err = -EBUSY; 1179 err = -EBUSY;
1180 p->rdev = rdev; 1180 p->rdev = rdev;
1181 goto abort;
1181 } 1182 }
1183 md_integrity_register(mddev);
1182 } 1184 }
1183abort: 1185abort:
1184 1186
@@ -2067,7 +2069,7 @@ static int run(mddev_t *mddev)
2067 mddev->queue->unplug_fn = raid1_unplug; 2069 mddev->queue->unplug_fn = raid1_unplug;
2068 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2070 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2069 mddev->queue->backing_dev_info.congested_data = mddev; 2071 mddev->queue->backing_dev_info.congested_data = mddev;
2070 2072 md_integrity_register(mddev);
2071 return 0; 2073 return 0;
2072 2074
2073out_no_mem: 2075out_no_mem:
@@ -2132,6 +2134,7 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
2132 return -EINVAL; 2134 return -EINVAL;
2133 set_capacity(mddev->gendisk, mddev->array_sectors); 2135 set_capacity(mddev->gendisk, mddev->array_sectors);
2134 mddev->changed = 1; 2136 mddev->changed = 1;
2137 revalidate_disk(mddev->gendisk);
2135 if (sectors > mddev->dev_sectors && 2138 if (sectors > mddev->dev_sectors &&
2136 mddev->recovery_cp == MaxSector) { 2139 mddev->recovery_cp == MaxSector) {
2137 mddev->recovery_cp = mddev->dev_sectors; 2140 mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7298a5e5a183..3d9020cf6f6e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1170,6 +1170,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1170 break; 1170 break;
1171 } 1171 }
1172 1172
1173 md_integrity_add_rdev(rdev, mddev);
1173 print_conf(conf); 1174 print_conf(conf);
1174 return err; 1175 return err;
1175} 1176}
@@ -1203,7 +1204,9 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1203 /* lost the race, try later */ 1204 /* lost the race, try later */
1204 err = -EBUSY; 1205 err = -EBUSY;
1205 p->rdev = rdev; 1206 p->rdev = rdev;
1207 goto abort;
1206 } 1208 }
1209 md_integrity_register(mddev);
1207 } 1210 }
1208abort: 1211abort:
1209 1212
@@ -2225,6 +2228,7 @@ static int run(mddev_t *mddev)
2225 2228
2226 if (conf->near_copies < mddev->raid_disks) 2229 if (conf->near_copies < mddev->raid_disks)
2227 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2230 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2231 md_integrity_register(mddev);
2228 return 0; 2232 return 0;
2229 2233
2230out_free_conf: 2234out_free_conf:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 37835538b58e..2b521ee67dfa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3999,6 +3999,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
3999 return 0; 3999 return 0;
4000 } 4000 }
4001 4001
4002 /* Allow raid5_quiesce to complete */
4003 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4004
4002 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4005 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4003 return reshape_request(mddev, sector_nr, skipped); 4006 return reshape_request(mddev, sector_nr, skipped);
4004 4007
@@ -4316,6 +4319,15 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4316 return sectors * (raid_disks - conf->max_degraded); 4319 return sectors * (raid_disks - conf->max_degraded);
4317} 4320}
4318 4321
4322static void free_conf(raid5_conf_t *conf)
4323{
4324 shrink_stripes(conf);
4325 safe_put_page(conf->spare_page);
4326 kfree(conf->disks);
4327 kfree(conf->stripe_hashtbl);
4328 kfree(conf);
4329}
4330
4319static raid5_conf_t *setup_conf(mddev_t *mddev) 4331static raid5_conf_t *setup_conf(mddev_t *mddev)
4320{ 4332{
4321 raid5_conf_t *conf; 4333 raid5_conf_t *conf;
@@ -4447,11 +4459,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4447 4459
4448 abort: 4460 abort:
4449 if (conf) { 4461 if (conf) {
4450 shrink_stripes(conf); 4462 free_conf(conf);
4451 safe_put_page(conf->spare_page);
4452 kfree(conf->disks);
4453 kfree(conf->stripe_hashtbl);
4454 kfree(conf);
4455 return ERR_PTR(-EIO); 4463 return ERR_PTR(-EIO);
4456 } else 4464 } else
4457 return ERR_PTR(-ENOMEM); 4465 return ERR_PTR(-ENOMEM);
@@ -4629,12 +4637,8 @@ abort:
4629 md_unregister_thread(mddev->thread); 4637 md_unregister_thread(mddev->thread);
4630 mddev->thread = NULL; 4638 mddev->thread = NULL;
4631 if (conf) { 4639 if (conf) {
4632 shrink_stripes(conf);
4633 print_raid5_conf(conf); 4640 print_raid5_conf(conf);
4634 safe_put_page(conf->spare_page); 4641 free_conf(conf);
4635 kfree(conf->disks);
4636 kfree(conf->stripe_hashtbl);
4637 kfree(conf);
4638 } 4642 }
4639 mddev->private = NULL; 4643 mddev->private = NULL;
4640 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4644 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
@@ -4649,13 +4653,10 @@ static int stop(mddev_t *mddev)
4649 4653
4650 md_unregister_thread(mddev->thread); 4654 md_unregister_thread(mddev->thread);
4651 mddev->thread = NULL; 4655 mddev->thread = NULL;
4652 shrink_stripes(conf);
4653 kfree(conf->stripe_hashtbl);
4654 mddev->queue->backing_dev_info.congested_fn = NULL; 4656 mddev->queue->backing_dev_info.congested_fn = NULL;
4655 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4657 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
4656 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4658 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
4657 kfree(conf->disks); 4659 free_conf(conf);
4658 kfree(conf);
4659 mddev->private = NULL; 4660 mddev->private = NULL;
4660 return 0; 4661 return 0;
4661} 4662}
@@ -4857,6 +4858,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4857 return -EINVAL; 4858 return -EINVAL;
4858 set_capacity(mddev->gendisk, mddev->array_sectors); 4859 set_capacity(mddev->gendisk, mddev->array_sectors);
4859 mddev->changed = 1; 4860 mddev->changed = 1;
4861 revalidate_disk(mddev->gendisk);
4860 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { 4862 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
4861 mddev->recovery_cp = mddev->dev_sectors; 4863 mddev->recovery_cp = mddev->dev_sectors;
4862 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4864 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5002,7 +5004,7 @@ static int raid5_start_reshape(mddev_t *mddev)
5002 spin_unlock_irqrestore(&conf->device_lock, flags); 5004 spin_unlock_irqrestore(&conf->device_lock, flags);
5003 } 5005 }
5004 mddev->raid_disks = conf->raid_disks; 5006 mddev->raid_disks = conf->raid_disks;
5005 mddev->reshape_position = 0; 5007 mddev->reshape_position = conf->reshape_progress;
5006 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5008 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5007 5009
5008 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5010 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5057,7 +5059,6 @@ static void end_reshape(raid5_conf_t *conf)
5057 */ 5059 */
5058static void raid5_finish_reshape(mddev_t *mddev) 5060static void raid5_finish_reshape(mddev_t *mddev)
5059{ 5061{
5060 struct block_device *bdev;
5061 raid5_conf_t *conf = mddev->private; 5062 raid5_conf_t *conf = mddev->private;
5062 5063
5063 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5064 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5066,15 +5067,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5066 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5067 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5067 set_capacity(mddev->gendisk, mddev->array_sectors); 5068 set_capacity(mddev->gendisk, mddev->array_sectors);
5068 mddev->changed = 1; 5069 mddev->changed = 1;
5069 5070 revalidate_disk(mddev->gendisk);
5070 bdev = bdget_disk(mddev->gendisk, 0);
5071 if (bdev) {
5072 mutex_lock(&bdev->bd_inode->i_mutex);
5073 i_size_write(bdev->bd_inode,
5074 (loff_t)mddev->array_sectors << 9);
5075 mutex_unlock(&bdev->bd_inode->i_mutex);
5076 bdput(bdev);
5077 }
5078 } else { 5071 } else {
5079 int d; 5072 int d;
5080 mddev->degraded = conf->raid_disks; 5073 mddev->degraded = conf->raid_disks;
@@ -5106,12 +5099,18 @@ static void raid5_quiesce(mddev_t *mddev, int state)
5106 5099
5107 case 1: /* stop all writes */ 5100 case 1: /* stop all writes */
5108 spin_lock_irq(&conf->device_lock); 5101 spin_lock_irq(&conf->device_lock);
5109 conf->quiesce = 1; 5102 /* '2' tells resync/reshape to pause so that all
5103 * active stripes can drain
5104 */
5105 conf->quiesce = 2;
5110 wait_event_lock_irq(conf->wait_for_stripe, 5106 wait_event_lock_irq(conf->wait_for_stripe,
5111 atomic_read(&conf->active_stripes) == 0 && 5107 atomic_read(&conf->active_stripes) == 0 &&
5112 atomic_read(&conf->active_aligned_reads) == 0, 5108 atomic_read(&conf->active_aligned_reads) == 0,
5113 conf->device_lock, /* nothing */); 5109 conf->device_lock, /* nothing */);
5110 conf->quiesce = 1;
5114 spin_unlock_irq(&conf->device_lock); 5111 spin_unlock_irq(&conf->device_lock);
5112 /* allow reshape to continue */
5113 wake_up(&conf->wait_for_overlap);
5115 break; 5114 break;
5116 5115
5117 case 0: /* re-enable writes */ 5116 case 0: /* re-enable writes */
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index bae61b22501c..7d430835655f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -180,14 +180,9 @@ static struct completion irq_event;
180static int twl4030_irq_thread(void *data) 180static int twl4030_irq_thread(void *data)
181{ 181{
182 long irq = (long)data; 182 long irq = (long)data;
183 struct irq_desc *desc = irq_to_desc(irq);
184 static unsigned i2c_errors; 183 static unsigned i2c_errors;
185 static const unsigned max_i2c_errors = 100; 184 static const unsigned max_i2c_errors = 100;
186 185
187 if (!desc) {
188 pr_err("twl4030: Invalid IRQ: %ld\n", irq);
189 return -EINVAL;
190 }
191 186
192 current->flags |= PF_NOFREEZE; 187 current->flags |= PF_NOFREEZE;
193 188
@@ -240,7 +235,7 @@ static int twl4030_irq_thread(void *data)
240 } 235 }
241 local_irq_enable(); 236 local_irq_enable();
242 237
243 desc->chip->unmask(irq); 238 enable_irq(irq);
244 } 239 }
245 240
246 return 0; 241 return 0;
@@ -255,25 +250,13 @@ static int twl4030_irq_thread(void *data)
255 * thread. All we do here is acknowledge and mask the interrupt and wakeup 250 * thread. All we do here is acknowledge and mask the interrupt and wakeup
256 * the kernel thread. 251 * the kernel thread.
257 */ 252 */
258static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc) 253static irqreturn_t handle_twl4030_pih(int irq, void *devid)
259{ 254{
260 /* Acknowledge, clear *AND* mask the interrupt... */ 255 /* Acknowledge, clear *AND* mask the interrupt... */
261 desc->chip->ack(irq); 256 disable_irq_nosync(irq);
262 complete(&irq_event); 257 complete(devid);
263} 258 return IRQ_HANDLED;
264
265static struct task_struct *start_twl4030_irq_thread(long irq)
266{
267 struct task_struct *thread;
268
269 init_completion(&irq_event);
270 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
271 if (!thread)
272 pr_err("twl4030: could not create irq %ld thread!\n", irq);
273
274 return thread;
275} 259}
276
277/*----------------------------------------------------------------------*/ 260/*----------------------------------------------------------------------*/
278 261
279/* 262/*
@@ -734,18 +717,28 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
734 } 717 }
735 718
736 /* install an irq handler to demultiplex the TWL4030 interrupt */ 719 /* install an irq handler to demultiplex the TWL4030 interrupt */
737 task = start_twl4030_irq_thread(irq_num);
738 if (!task) {
739 pr_err("twl4030: irq thread FAIL\n");
740 status = -ESRCH;
741 goto fail;
742 }
743 720
744 set_irq_data(irq_num, task);
745 set_irq_chained_handler(irq_num, handle_twl4030_pih);
746 721
747 return status; 722 init_completion(&irq_event);
748 723
724 status = request_irq(irq_num, handle_twl4030_pih, IRQF_DISABLED,
725 "TWL4030-PIH", &irq_event);
726 if (status < 0) {
727 pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
728 goto fail_rqirq;
729 }
730
731 task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq");
732 if (IS_ERR(task)) {
733 pr_err("twl4030: could not create irq %d thread!\n", irq_num);
734 status = PTR_ERR(task);
735 goto fail_kthread;
736 }
737 return status;
738fail_kthread:
739 free_irq(irq_num, &irq_event);
740fail_rqirq:
741 /* clean up twl4030_sih_setup */
749fail: 742fail:
750 for (i = irq_base; i < irq_end; i++) 743 for (i = irq_base; i < irq_end; i++)
751 set_irq_chip_and_handler(i, NULL, NULL); 744 set_irq_chip_and_handler(i, NULL, NULL);
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3e00fa8ea65f..4a7c32895be5 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev)
832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
834 } 834 }
835 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 835 if (i != 0)
836 vp->rx_ring[i - 1].next =
837 isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
836 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 838 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
837 } 839 }
838 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ 840 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c34aee91250b..c20416850948 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2721,13 +2721,15 @@ dump_tx_ring(struct net_device *dev)
2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); 2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2722 issue_and_wait(dev, DownStall); 2722 issue_and_wait(dev, DownStall);
2723 for (i = 0; i < TX_RING_SIZE; i++) { 2723 for (i = 0; i < TX_RING_SIZE; i++) {
2724 pr_err(" %d: @%p length %8.8x status %8.8x\n", i, 2724 unsigned int length;
2725 &vp->tx_ring[i], 2725
2726#if DO_ZEROCOPY 2726#if DO_ZEROCOPY
2727 le32_to_cpu(vp->tx_ring[i].frag[0].length), 2727 length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2728#else 2728#else
2729 le32_to_cpu(vp->tx_ring[i].length), 2729 length = le32_to_cpu(vp->tx_ring[i].length);
2730#endif 2730#endif
2731 pr_err(" %d: @%p length %8.8x status %8.8x\n",
2732 i, &vp->tx_ring[i], length,
2731 le32_to_cpu(vp->tx_ring[i].status)); 2733 le32_to_cpu(vp->tx_ring[i].status));
2732 } 2734 }
2733 if (!stalled) 2735 if (!stalled)
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1686dca28748..1f016d66684a 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev)
1474 outw(0x0000, ioaddr + 0x800c); 1474 outw(0x0000, ioaddr + 0x800c);
1475 outw(0x0000, ioaddr + 0x800e); 1475 outw(0x0000, ioaddr + 0x800e);
1476 1476
1477 for (i = 0; i < (sizeof(start_code)); i+=32) { 1477 for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
1478 int j; 1478 int j;
1479 outw(i, ioaddr + SM_PTR); 1479 outw(i, ioaddr + SM_PTR);
1480 for (j = 0; j < 16; j+=2) 1480 for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
1481 outw(start_code[(i+j)/2], 1481 outw(start_code[(i+j)/2],
1482 ioaddr+0x4000+j); 1482 ioaddr+0x4000+j);
1483 for (j = 0; j < 16; j+=2) 1483 for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
1484 outw(start_code[(i+j+16)/2], 1484 outw(start_code[(i+j+16)/2],
1485 ioaddr+0x8000+j); 1485 ioaddr+0x8000+j);
1486 } 1486 }
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 78952f8324e2..fa311a950996 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0101" 43#define DRV_VERSION "EHEA_0102"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index e8d46cc1bec2..977c3d358279 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1545{ 1545{
1546 int ret, i; 1546 int ret, i;
1547 1547
1548 if (pr->qp)
1549 netif_napi_del(&pr->napi);
1550
1548 ret = ehea_destroy_qp(pr->qp); 1551 ret = ehea_destroy_qp(pr->qp);
1549 1552
1550 if (!ret) { 1553 if (!ret) {
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index dbf06e9313cc..2234118eedbb 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 366 return -EINVAL;
367 } 367 }
368 368
369 priv->rxic = mk_ic_value( 369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), 370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
371 cvals->rx_max_coalesced_frames);
372 371
373 /* Set up tx coalescing */ 372 /* Set up tx coalescing */
374 if ((cvals->tx_coalesce_usecs == 0) || 373 if ((cvals->tx_coalesce_usecs == 0) ||
@@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
390 return -EINVAL; 389 return -EINVAL;
391 } 390 }
392 391
393 priv->txic = mk_ic_value( 392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
394 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), 393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
395 cvals->tx_max_coalesced_frames);
396 394
397 gfar_write(&priv->regs->rxic, 0); 395 gfar_write(&priv->regs->rxic, 0);
398 if (priv->rxcoalescing) 396 if (priv->rxcoalescing)
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 2a4faf9ade69..a9a61efa964c 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
274 274
275 err = mbx->ops.read_posted(hw, msgbuf, 2); 275 err = mbx->ops.read_posted(hw, msgbuf, 2);
276 276
277 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
278
277 /* if nacked the vlan was rejected */ 279 /* if nacked the vlan was rejected */
278 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) 280 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
279 err = -E1000_ERR_MAC_INIT; 281 err = -E1000_ERR_MAC_INIT;
@@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
317 if (!ret_val) 319 if (!ret_val)
318 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 320 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
319 321
322 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
323
320 /* if nacked the address was rejected, use "perm_addr" */ 324 /* if nacked the address was rejected, use "perm_addr" */
321 if (!ret_val && 325 if (!ret_val &&
322 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) 326 (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 1b12c7ba275f..e11d83d5852b 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -96,6 +96,8 @@
96#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 96#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
97#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 97#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
98 98
99#define IXGBE_MAX_RSC_INT_RATE 162760
100
99/* wrapper around a pointer to a socket buffer, 101/* wrapper around a pointer to a socket buffer,
100 * so a DMA handle can be stored along with the buffer */ 102 * so a DMA handle can be stored along with the buffer */
101struct ixgbe_tx_buffer { 103struct ixgbe_tx_buffer {
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index b9923047ce11..522c03bc1dad 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -50,6 +50,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
50 u8 *eeprom_data); 50 u8 *eeprom_data);
51 51
52/** 52/**
53 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
54 * @hw: pointer to the HW structure
55 *
56 * The defaults for 82598 should be in the range of 50us to 50ms,
57 * however the hardware default for these parts is 500us to 1ms which is less
58 * than the 10ms recommended by the pci-e spec. To address this we need to
59 * increase the value to either 10ms to 250ms for capability version 1 config,
60 * or 16ms to 55ms for version 2.
61 **/
62void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
63{
64 struct ixgbe_adapter *adapter = hw->back;
65 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
66 u16 pcie_devctl2;
67
68 /* only take action if timeout value is defaulted to 0 */
69 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
70 goto out;
71
72 /*
73 * if capababilities version is type 1 we can write the
74 * timeout of 10ms to 250ms through the GCR register
75 */
76 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
77 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
78 goto out;
79 }
80
81 /*
82 * for version 2 capabilities we need to write the config space
83 * directly in order to set the completion timeout value for
84 * 16ms to 55ms
85 */
86 pci_read_config_word(adapter->pdev,
87 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
88 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
89 pci_write_config_word(adapter->pdev,
90 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
91out:
92 /* disable completion timeout resend */
93 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
94 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
95}
96
97/**
53 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count 98 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
54 * @hw: pointer to hardware structure 99 * @hw: pointer to hardware structure
55 * 100 *
@@ -153,6 +198,26 @@ out:
153} 198}
154 199
155/** 200/**
201 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
202 * @hw: pointer to hardware structure
203 *
204 * Starts the hardware using the generic start_hw function.
205 * Then set pcie completion timeout
206 **/
207s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
208{
209 s32 ret_val = 0;
210
211 ret_val = ixgbe_start_hw_generic(hw);
212
213 /* set the completion timeout for interface */
214 if (ret_val == 0)
215 ixgbe_set_pcie_completion_timeout(hw);
216
217 return ret_val;
218}
219
220/**
156 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 221 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
157 * @hw: pointer to hardware structure 222 * @hw: pointer to hardware structure
158 * @speed: pointer to link speed 223 * @speed: pointer to link speed
@@ -1085,7 +1150,7 @@ out:
1085static struct ixgbe_mac_operations mac_ops_82598 = { 1150static struct ixgbe_mac_operations mac_ops_82598 = {
1086 .init_hw = &ixgbe_init_hw_generic, 1151 .init_hw = &ixgbe_init_hw_generic,
1087 .reset_hw = &ixgbe_reset_hw_82598, 1152 .reset_hw = &ixgbe_reset_hw_82598,
1088 .start_hw = &ixgbe_start_hw_generic, 1153 .start_hw = &ixgbe_start_hw_82598,
1089 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1154 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1090 .get_media_type = &ixgbe_get_media_type_82598, 1155 .get_media_type = &ixgbe_get_media_type_82598,
1091 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 1156 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2a978008fd6e..79144e950a34 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1975,7 +1975,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1975 * any other value means disable eitr, which is best 1975 * any other value means disable eitr, which is best
1976 * served by setting the interrupt rate very high 1976 * served by setting the interrupt rate very high
1977 */ 1977 */
1978 adapter->eitr_param = IXGBE_MAX_INT_RATE; 1978 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
1979 adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
1980 else
1981 adapter->eitr_param = IXGBE_MAX_INT_RATE;
1979 adapter->itr_setting = 0; 1982 adapter->itr_setting = 0;
1980 } 1983 }
1981 1984
@@ -1999,13 +2002,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
1999 2002
2000 ethtool_op_set_flags(netdev, data); 2003 ethtool_op_set_flags(netdev, data);
2001 2004
2002 if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) 2005 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2003 return 0; 2006 return 0;
2004 2007
2005 /* if state changes we need to update adapter->flags and reset */ 2008 /* if state changes we need to update adapter->flags and reset */
2006 if ((!!(data & ETH_FLAG_LRO)) != 2009 if ((!!(data & ETH_FLAG_LRO)) !=
2007 (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { 2010 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2008 adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; 2011 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2009 if (netif_running(netdev)) 2012 if (netif_running(netdev))
2010 ixgbe_reinit_locked(adapter); 2013 ixgbe_reinit_locked(adapter);
2011 else 2014 else
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 200454f30f6a..110c65ab5cb5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -780,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
780 prefetch(next_rxd); 780 prefetch(next_rxd);
781 cleaned_count++; 781 cleaned_count++;
782 782
783 if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) 783 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
784 rsc_count = ixgbe_get_rsc_count(rx_desc); 784 rsc_count = ixgbe_get_rsc_count(rx_desc);
785 785
786 if (rsc_count) { 786 if (rsc_count) {
@@ -2036,7 +2036,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2036 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2036 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2037 } 2037 }
2038 } else { 2038 } else {
2039 if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && 2039 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2040 (netdev->mtu <= ETH_DATA_LEN)) 2040 (netdev->mtu <= ETH_DATA_LEN))
2041 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2041 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2042 else 2042 else
@@ -2165,7 +2165,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2165 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2165 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2166 } 2166 }
2167 2167
2168 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { 2168 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2169 /* Enable 82599 HW-RSC */ 2169 /* Enable 82599 HW-RSC */
2170 for (i = 0; i < adapter->num_rx_queues; i++) { 2170 for (i = 0; i < adapter->num_rx_queues; i++) {
2171 j = adapter->rx_ring[i].reg_idx; 2171 j = adapter->rx_ring[i].reg_idx;
@@ -3812,8 +3812,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3812 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3812 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3813 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3813 } else if (hw->mac.type == ixgbe_mac_82599EB) {
3814 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3814 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3815 adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; 3815 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3816 adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; 3816 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
3817 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 3817 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3818 adapter->ring_feature[RING_F_FDIR].indices = 3818 adapter->ring_feature[RING_F_FDIR].indices =
3819 IXGBE_MAX_FDIR_INDICES; 3819 IXGBE_MAX_FDIR_INDICES;
@@ -5360,12 +5360,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5360static void ixgbe_netpoll(struct net_device *netdev) 5360static void ixgbe_netpoll(struct net_device *netdev)
5361{ 5361{
5362 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5363 int i;
5363 5364
5364 disable_irq(adapter->pdev->irq);
5365 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5365 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5366 ixgbe_intr(adapter->pdev->irq, netdev); 5366 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5367 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5368 for (i = 0; i < num_q_vectors; i++) {
5369 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5370 ixgbe_msix_clean_many(0, q_vector);
5371 }
5372 } else {
5373 ixgbe_intr(adapter->pdev->irq, netdev);
5374 }
5367 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 5375 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
5368 enable_irq(adapter->pdev->irq);
5369} 5376}
5370#endif 5377#endif
5371 5378
@@ -5611,7 +5618,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5611 if (pci_using_dac) 5618 if (pci_using_dac)
5612 netdev->features |= NETIF_F_HIGHDMA; 5619 netdev->features |= NETIF_F_HIGHDMA;
5613 5620
5614 if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) 5621 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
5615 netdev->features |= NETIF_F_LRO; 5622 netdev->features |= NETIF_F_LRO;
5616 5623
5617 /* make sure the EEPROM is good */ 5624 /* make sure the EEPROM is good */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fa87309dc087..be90eb4575f6 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -718,6 +718,12 @@
718#define IXGBE_ECC_STATUS_82599 0x110E0 718#define IXGBE_ECC_STATUS_82599 0x110E0
719#define IXGBE_BAR_CTRL_82599 0x110F4 719#define IXGBE_BAR_CTRL_82599 0x110F4
720 720
721/* PCI Express Control */
722#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
723#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
724#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
725#define IXGBE_GCR_CAP_VER2 0x00040000
726
721/* Time Sync Registers */ 727/* Time Sync Registers */
722#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 728#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
723#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ 729#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1521,6 +1527,7 @@
1521 1527
1522/* PCI Bus Info */ 1528/* PCI Bus Info */
1523#define IXGBE_PCI_LINK_STATUS 0xB2 1529#define IXGBE_PCI_LINK_STATUS 0xB2
1530#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1524#define IXGBE_PCI_LINK_WIDTH 0x3F0 1531#define IXGBE_PCI_LINK_WIDTH 0x3F0
1525#define IXGBE_PCI_LINK_WIDTH_1 0x10 1532#define IXGBE_PCI_LINK_WIDTH_1 0x10
1526#define IXGBE_PCI_LINK_WIDTH_2 0x20 1533#define IXGBE_PCI_LINK_WIDTH_2 0x20
@@ -1531,6 +1538,7 @@
1531#define IXGBE_PCI_LINK_SPEED_5000 0x2 1538#define IXGBE_PCI_LINK_SPEED_5000 0x2
1532#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E 1539#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
1533#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1540#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
1541#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
1534 1542
1535/* Number of 100 microseconds we wait for PCI Express master disable */ 1543/* Number of 100 microseconds we wait for PCI Express master disable */
1536#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 1544#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 08c43f2ae72b..5a88b3f57693 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
249 pci_unmap_page(mdev->pdev, 249 pci_unmap_page(mdev->pdev,
250 (dma_addr_t) be64_to_cpu(data->addr), 250 (dma_addr_t) be64_to_cpu(data->addr),
251 frag->size, PCI_DMA_TODEVICE); 251 frag->size, PCI_DMA_TODEVICE);
252 ++data;
252 } 253 }
253 } 254 }
254 /* Stamp the freed descriptor */ 255 /* Stamp the freed descriptor */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 637ac8b89bac..3cd8cfcf627b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -221,7 +221,7 @@ netxen_napi_disable(struct netxen_adapter *adapter)
221 } 221 }
222} 222}
223 223
224static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 224static int nx_set_dma_mask(struct netxen_adapter *adapter)
225{ 225{
226 struct pci_dev *pdev = adapter->pdev; 226 struct pci_dev *pdev = adapter->pdev;
227 uint64_t mask, cmask; 227 uint64_t mask, cmask;
@@ -229,19 +229,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
229 adapter->pci_using_dac = 0; 229 adapter->pci_using_dac = 0;
230 230
231 mask = DMA_BIT_MASK(32); 231 mask = DMA_BIT_MASK(32);
232 /*
233 * Consistent DMA mask is set to 32 bit because it cannot be set to
234 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
235 * come off this pool.
236 */
237 cmask = DMA_BIT_MASK(32); 232 cmask = DMA_BIT_MASK(32);
238 233
234 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
239#ifndef CONFIG_IA64 235#ifndef CONFIG_IA64
240 if (revision_id >= NX_P3_B0)
241 mask = DMA_BIT_MASK(39);
242 else if (revision_id == NX_P2_C1)
243 mask = DMA_BIT_MASK(35); 236 mask = DMA_BIT_MASK(35);
244#endif 237#endif
238 } else {
239 mask = DMA_BIT_MASK(39);
240 cmask = mask;
241 }
242
245 if (pci_set_dma_mask(pdev, mask) == 0 && 243 if (pci_set_dma_mask(pdev, mask) == 0 &&
246 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 244 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
247 adapter->pci_using_dac = 1; 245 adapter->pci_using_dac = 1;
@@ -256,7 +254,7 @@ static int
256nx_update_dma_mask(struct netxen_adapter *adapter) 254nx_update_dma_mask(struct netxen_adapter *adapter)
257{ 255{
258 int change, shift, err; 256 int change, shift, err;
259 uint64_t mask, old_mask; 257 uint64_t mask, old_mask, old_cmask;
260 struct pci_dev *pdev = adapter->pdev; 258 struct pci_dev *pdev = adapter->pdev;
261 259
262 change = 0; 260 change = 0;
@@ -272,14 +270,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
272 270
273 if (change) { 271 if (change) {
274 old_mask = pdev->dma_mask; 272 old_mask = pdev->dma_mask;
273 old_cmask = pdev->dev.coherent_dma_mask;
274
275 mask = (1ULL<<(32+shift)) - 1; 275 mask = (1ULL<<(32+shift)) - 1;
276 276
277 err = pci_set_dma_mask(pdev, mask); 277 err = pci_set_dma_mask(pdev, mask);
278 if (err) 278 if (err)
279 return pci_set_dma_mask(pdev, old_mask); 279 goto err_out;
280
281 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
282
283 err = pci_set_consistent_dma_mask(pdev, mask);
284 if (err)
285 goto err_out;
286 }
287 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
280 } 288 }
281 289
282 return 0; 290 return 0;
291
292err_out:
293 pci_set_dma_mask(pdev, old_mask);
294 pci_set_consistent_dma_mask(pdev, old_cmask);
295 return err;
283} 296}
284 297
285static void netxen_check_options(struct netxen_adapter *adapter) 298static void netxen_check_options(struct netxen_adapter *adapter)
@@ -1006,7 +1019,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1006 revision_id = pdev->revision; 1019 revision_id = pdev->revision;
1007 adapter->ahw.revision_id = revision_id; 1020 adapter->ahw.revision_id = revision_id;
1008 1021
1009 err = nx_set_dma_mask(adapter, revision_id); 1022 err = nx_set_dma_mask(adapter);
1010 if (err) 1023 if (err)
1011 goto err_out_free_netdev; 1024 goto err_out_free_netdev;
1012 1025
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 28368157dac4..a646a445fda9 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1612 && pcnet32_dwio_check(ioaddr)) { 1612 && pcnet32_dwio_check(ioaddr)) {
1613 a = &pcnet32_dwio; 1613 a = &pcnet32_dwio;
1614 } else 1614 } else {
1615 if (pcnet32_debug & NETIF_MSG_PROBE)
1616 printk(KERN_ERR PFX "No access methods\n");
1615 goto err_release_region; 1617 goto err_release_region;
1618 }
1616 } 1619 }
1617 1620
1618 chip_version = 1621 chip_version =
@@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1719 ret = -ENOMEM; 1722 ret = -ENOMEM;
1720 goto err_release_region; 1723 goto err_release_region;
1721 } 1724 }
1722 SET_NETDEV_DEV(dev, &pdev->dev); 1725
1726 if (pdev)
1727 SET_NETDEV_DEV(dev, &pdev->dev);
1723 1728
1724 if (pcnet32_debug & NETIF_MSG_PROBE) 1729 if (pcnet32_debug & NETIF_MSG_PROBE)
1725 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); 1730 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
@@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1818 1823
1819 spin_lock_init(&lp->lock); 1824 spin_lock_init(&lp->lock);
1820 1825
1821 SET_NETDEV_DEV(dev, &pdev->dev);
1822 lp->name = chipname; 1826 lp->name = chipname;
1823 lp->shared_irq = shared; 1827 lp->shared_irq = shared;
1824 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ 1828 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
@@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1852 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1856 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1853 lp->options |= PCNET32_PORT_FD; 1857 lp->options |= PCNET32_PORT_FD;
1854 1858
1855 if (!a) {
1856 if (pcnet32_debug & NETIF_MSG_PROBE)
1857 printk(KERN_ERR PFX "No access methods\n");
1858 ret = -ENODEV;
1859 goto err_free_consistent;
1860 }
1861 lp->a = *a; 1859 lp->a = *a;
1862 1860
1863 /* prior to register_netdev, dev->name is not yet correct */ 1861 /* prior to register_netdev, dev->name is not yet correct */
@@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1973 1971
1974 return 0; 1972 return 0;
1975 1973
1976 err_free_ring: 1974err_free_ring:
1977 pcnet32_free_ring(dev); 1975 pcnet32_free_ring(dev);
1978 err_free_consistent:
1979 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1976 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
1980 lp->init_block, lp->init_dma_addr); 1977 lp->init_block, lp->init_dma_addr);
1981 err_free_netdev: 1978err_free_netdev:
1982 free_netdev(dev); 1979 free_netdev(dev);
1983 err_release_region: 1980err_release_region:
1984 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1981 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1985 return ret; 1982 return ret;
1986} 1983}
@@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev)
2089static int pcnet32_open(struct net_device *dev) 2086static int pcnet32_open(struct net_device *dev)
2090{ 2087{
2091 struct pcnet32_private *lp = netdev_priv(dev); 2088 struct pcnet32_private *lp = netdev_priv(dev);
2089 struct pci_dev *pdev = lp->pci_dev;
2092 unsigned long ioaddr = dev->base_addr; 2090 unsigned long ioaddr = dev->base_addr;
2093 u16 val; 2091 u16 val;
2094 int i; 2092 int i;
@@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev)
2149 lp->a.write_csr(ioaddr, 124, val); 2147 lp->a.write_csr(ioaddr, 124, val);
2150 2148
2151 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 2149 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
2152 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 2150 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2153 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 2151 (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
2154 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2152 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
2155 if (lp->options & PCNET32_PORT_ASEL) { 2153 if (lp->options & PCNET32_PORT_ASEL) {
2156 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2154 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2157 if (netif_msg_link(lp)) 2155 if (netif_msg_link(lp))
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 639d11bc444e..cd37d739ac74 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1384 1384
1385 /* create a fragment for each channel */ 1385 /* create a fragment for each channel */
1386 bits = B; 1386 bits = B;
1387 while (nfree > 0 && len > 0) { 1387 while (len > 0) {
1388 list = list->next; 1388 list = list->next;
1389 if (list == &ppp->channels) { 1389 if (list == &ppp->channels) {
1390 i = 0; 1390 i = 0;
@@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1431 *otherwise divide it according to the speed 1431 *otherwise divide it according to the speed
1432 *of the channel we are going to transmit on 1432 *of the channel we are going to transmit on
1433 */ 1433 */
1434 if (pch->speed == 0) { 1434 if (nfree > 0) {
1435 flen = totlen/nfree ; 1435 if (pch->speed == 0) {
1436 if (nbigger > 0) { 1436 flen = totlen/nfree ;
1437 flen++; 1437 if (nbigger > 0) {
1438 nbigger--; 1438 flen++;
1439 } 1439 nbigger--;
1440 } else { 1440 }
1441 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1441 } else {
1442 ((totspeed*totfree)/pch->speed)) - hdrlen; 1442 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
1443 if (nbigger > 0) { 1443 ((totspeed*totfree)/pch->speed)) - hdrlen;
1444 flen += ((totfree - nzero)*pch->speed)/totspeed; 1444 if (nbigger > 0) {
1445 nbigger -= ((totfree - nzero)*pch->speed)/ 1445 flen += ((totfree - nzero)*pch->speed)/totspeed;
1446 nbigger -= ((totfree - nzero)*pch->speed)/
1446 totspeed; 1447 totspeed;
1448 }
1447 } 1449 }
1450 nfree--;
1448 } 1451 }
1449 nfree--;
1450 1452
1451 /* 1453 /*
1452 *check if we are on the last channel or 1454 *check if we are on the last channel or
1453 *we exceded the lenght of the data to 1455 *we exceded the lenght of the data to
1454 *fragment 1456 *fragment
1455 */ 1457 */
1456 if ((nfree == 0) || (flen > len)) 1458 if ((nfree <= 0) || (flen > len))
1457 flen = len; 1459 flen = len;
1458 /* 1460 /*
1459 *it is not worth to tx on slow channels: 1461 *it is not worth to tx on slow channels:
@@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1467 continue; 1469 continue;
1468 } 1470 }
1469 1471
1470 mtu = pch->chan->mtu + 2 - hdrlen; 1472 mtu = pch->chan->mtu - hdrlen;
1471 if (mtu < 4) 1473 if (mtu < 4)
1472 mtu = 4; 1474 mtu = 4;
1473 if (flen > mtu) 1475 if (flen > mtu)
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index f0031f1f97e5..5f2090233d7b 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1063 else { 1063 else {
1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
1065 1065
1066 po = NULL;
1066 while (++hash < PPPOE_HASH_SIZE) { 1067 while (++hash < PPPOE_HASH_SIZE) {
1067 po = pn->hash_table[hash]; 1068 po = pn->hash_table[hash];
1068 if (po) 1069 if (po)
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index e7935d09c896..e0f9219a0aea 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2680,6 +2680,7 @@ out_unregister_pppol2tp_proto:
2680static void __exit pppol2tp_exit(void) 2680static void __exit pppol2tp_exit(void)
2681{ 2681{
2682 unregister_pppox_proto(PX_PROTO_OL2TP); 2682 unregister_pppox_proto(PX_PROTO_OL2TP);
2683 unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops);
2683 proto_unregister(&pppol2tp_sk_proto); 2684 proto_unregister(&pppol2tp_sk_proto);
2684} 2685}
2685 2686
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 5345e47b35ac..4525cbe8dd69 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
793 struct s6gmac *pd = netdev_priv(dev); 793 struct s6gmac *pd = netdev_priv(dev);
794 int i = 0; 794 int i = 0;
795 struct phy_device *p = NULL; 795 struct phy_device *p = NULL;
796 while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) 796 while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
797 i++; 797 i++;
798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, 798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
799 PHY_INTERFACE_MODE_RGMII); 799 PHY_INTERFACE_MODE_RGMII);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3550c5dcd93c..0a551d8f5d95 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1488,6 +1488,8 @@ static int sky2_up(struct net_device *dev)
1488 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1488 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1489#endif 1489#endif
1490 1490
1491 sky2->restarting = 0;
1492
1491 err = sky2_rx_start(sky2); 1493 err = sky2_rx_start(sky2);
1492 if (err) 1494 if (err)
1493 goto err_out; 1495 goto err_out;
@@ -1500,6 +1502,9 @@ static int sky2_up(struct net_device *dev)
1500 1502
1501 sky2_set_multicast(dev); 1503 sky2_set_multicast(dev);
1502 1504
1505 /* wake queue incase we are restarting */
1506 netif_wake_queue(dev);
1507
1503 if (netif_msg_ifup(sky2)) 1508 if (netif_msg_ifup(sky2))
1504 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1509 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1505 return 0; 1510 return 0;
@@ -1533,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head)
1533/* Number of list elements available for next tx */ 1538/* Number of list elements available for next tx */
1534static inline int tx_avail(const struct sky2_port *sky2) 1539static inline int tx_avail(const struct sky2_port *sky2)
1535{ 1540{
1541 if (unlikely(sky2->restarting))
1542 return 0;
1536 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); 1543 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1537} 1544}
1538 1545
@@ -1818,6 +1825,10 @@ static int sky2_down(struct net_device *dev)
1818 if (netif_msg_ifdown(sky2)) 1825 if (netif_msg_ifdown(sky2))
1819 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1826 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1820 1827
1828 /* explicitly shut off tx incase we're restarting */
1829 sky2->restarting = 1;
1830 netif_tx_disable(dev);
1831
1821 /* Force flow control off */ 1832 /* Force flow control off */
1822 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1833 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1823 1834
@@ -2359,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2359{ 2370{
2360 struct sky2_port *sky2 = netdev_priv(dev); 2371 struct sky2_port *sky2 = netdev_priv(dev);
2361 2372
2362 if (netif_running(dev)) { 2373 if (likely(netif_running(dev) && !sky2->restarting)) {
2363 netif_tx_lock(dev); 2374 netif_tx_lock(dev);
2364 sky2_tx_complete(sky2, last); 2375 sky2_tx_complete(sky2, last);
2365 netif_tx_unlock(dev); 2376 netif_tx_unlock(dev);
@@ -4283,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4283 spin_lock_init(&sky2->phy_lock); 4294 spin_lock_init(&sky2->phy_lock);
4284 sky2->tx_pending = TX_DEF_PENDING; 4295 sky2->tx_pending = TX_DEF_PENDING;
4285 sky2->rx_pending = RX_DEF_PENDING; 4296 sky2->rx_pending = RX_DEF_PENDING;
4297 sky2->restarting = 0;
4286 4298
4287 hw->dev[port] = dev; 4299 hw->dev[port] = dev;
4288 4300
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b5549c9e5107..4486b066b43f 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2051,6 +2051,7 @@ struct sky2_port {
2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2052 u8 rx_csum; 2052 u8 rx_csum;
2053 u8 wol; 2053 u8 wol;
2054 u8 restarting;
2054 enum flow_control flow_mode; 2055 enum flow_control flow_mode;
2055 enum flow_control flow_status; 2056 enum flow_control flow_status;
2056 2057
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index eb72d2e9ab3d..acfdccd44567 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev)
5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ 5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
5060 for (j=0; j<limit; j++) { /* Search PHY table */ 5060 for (j=0; j<limit; j++) { /* Search PHY table */
5061 if (id != phy_info[j].id) continue; /* ID match? */ 5061 if (id != phy_info[j].id) continue; /* ID match? */
5062 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5062 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5063 if (k < DE4X5_MAX_PHY) { 5063 if (k < DE4X5_MAX_PHY) {
5064 memcpy((char *)&lp->phy[k], 5064 memcpy((char *)&lp->phy[k],
5065 (char *)&phy_info[j], sizeof(struct phy_table)); 5065 (char *)&phy_info[j], sizeof(struct phy_table));
@@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev)
5072 break; 5072 break;
5073 } 5073 }
5074 if ((j == limit) && (i < DE4X5_MAX_MII)) { 5074 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5075 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5075 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5076 lp->phy[k].addr = i; 5076 lp->phy[k].addr = i;
5077 lp->phy[k].id = id; 5077 lp->phy[k].id = id;
5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ 5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
@@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev)
5091 purgatory: 5091 purgatory:
5092 lp->active = 0; 5092 lp->active = 0;
5093 if (lp->phy[0].id) { /* Reset the PHY devices */ 5093 if (lp->phy[0].id) { /* Reset the PHY devices */
5094 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ 5094 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); 5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); 5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5097 5097
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index c70604f0329e..8ce5e4cee168 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev,
5918 readSsidRid(local, &SSID_rid); 5918 readSsidRid(local, &SSID_rid);
5919 5919
5920 /* Check if we asked for `any' */ 5920 /* Check if we asked for `any' */
5921 if(dwrq->flags == 0) { 5921 if (dwrq->flags == 0) {
5922 /* Just send an empty SSID list */ 5922 /* Just send an empty SSID list */
5923 memset(&SSID_rid, 0, sizeof(SSID_rid)); 5923 memset(&SSID_rid, 0, sizeof(SSID_rid));
5924 } else { 5924 } else {
5925 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 5925 unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
5926 5926
5927 /* Check the size of the string */ 5927 /* Check the size of the string */
5928 if(dwrq->length > IW_ESSID_MAX_SIZE) { 5928 if (dwrq->length > IW_ESSID_MAX_SIZE)
5929 return -E2BIG ; 5929 return -E2BIG ;
5930 } 5930
5931 /* Check if index is valid */ 5931 /* Check if index is valid */
5932 if((index < 0) || (index >= 4)) { 5932 if (index >= ARRAY_SIZE(SSID_rid.ssids))
5933 return -EINVAL; 5933 return -EINVAL;
5934 }
5935 5934
5936 /* Set the SSID */ 5935 /* Set the SSID */
5937 memset(SSID_rid.ssids[index].ssid, 0, 5936 memset(SSID_rid.ssids[index].ssid, 0,
@@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev,
6819 return -EINVAL; 6818 return -EINVAL;
6820 } 6819 }
6821 clear_bit (FLAG_RADIO_OFF, &local->flags); 6820 clear_bit (FLAG_RADIO_OFF, &local->flags);
6822 for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) 6821 for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++)
6823 if (v == cap_rid.txPowerLevels[i]) { 6822 if (v == cap_rid.txPowerLevels[i]) {
6824 readConfigRid(local, 1); 6823 readConfigRid(local, 1);
6825 local->config.txPower = v; 6824 local->config.txPower = v;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a2fda702b620..ce0e86c36a82 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
460 integer = swab32(eep->modalHeader.antCtrlCommon); 460 integer = swab32(eep->modalHeader.antCtrlCommon);
461 eep->modalHeader.antCtrlCommon = integer; 461 eep->modalHeader.antCtrlCommon = integer;
462 462
463 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 463 for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
464 integer = swab32(eep->modalHeader.antCtrlChain[i]); 464 integer = swab32(eep->modalHeader.antCtrlChain[i]);
465 eep->modalHeader.antCtrlChain[i] = integer; 465 eep->modalHeader.antCtrlChain[i] = integer;
466 } 466 }
@@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
914 ctlMode, numCtlModes, isHt40CtlMode, 914 ctlMode, numCtlModes, isHt40CtlMode,
915 (pCtlMode[ctlMode] & EXT_ADDITIVE)); 915 (pCtlMode[ctlMode] & EXT_ADDITIVE));
916 916
917 for (i = 0; (i < AR5416_NUM_CTLS) && 917 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
918 pEepData->ctlIndex[i]; i++) { 918 pEepData->ctlIndex[i]; i++) {
919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index fbb3a573463e..2de6471d4be9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -112,7 +112,7 @@ enum iwl3945_antenna {
112#define IWL_TX_FIFO_NONE 7 112#define IWL_TX_FIFO_NONE 7
113 113
114/* Minimum number of queues. MAX_NUM is defined in hw specific files */ 114/* Minimum number of queues. MAX_NUM is defined in hw specific files */
115#define IWL_MIN_NUM_QUEUES 4 115#define IWL39_MIN_NUM_QUEUES 4
116 116
117#define IEEE80211_DATA_LEN 2304 117#define IEEE80211_DATA_LEN 2304
118#define IEEE80211_4ADDR_LEN 30 118#define IEEE80211_4ADDR_LEN 30
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 6ab07165ea28..18b135f510e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv)
1332 1332
1333 hw->wiphy->custom_regulatory = true; 1333 hw->wiphy->custom_regulatory = true;
1334 1334
1335 /* Firmware does not support this */
1336 hw->wiphy->disable_beacon_hints = true;
1337
1335 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 1338 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
1336 /* we create the 802.11 header and a zero-length SSID element */ 1339 /* we create the 802.11 header and a zero-length SSID element */
1337 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 1340 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 11e08c068917..ca00cc8ad4c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
308 return -ENODATA; 308 return -ENODATA;
309 } 309 }
310 310
311 ptr = priv->eeprom;
312 if (!ptr) {
313 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
314 return -ENOMEM;
315 }
316
311 /* 4 characters for byte 0xYY */ 317 /* 4 characters for byte 0xYY */
312 buf = kzalloc(buf_size, GFP_KERNEL); 318 buf = kzalloc(buf_size, GFP_KERNEL);
313 if (!buf) { 319 if (!buf) {
314 IWL_ERR(priv, "Can not allocate Buffer\n"); 320 IWL_ERR(priv, "Can not allocate Buffer\n");
315 return -ENOMEM; 321 return -ENOMEM;
316 } 322 }
317
318 ptr = priv->eeprom;
319 if (!ptr) {
320 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
321 return -ENOMEM;
322 }
323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", 323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
324 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) 324 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
325 ? "OTP" : "EEPROM"); 325 ? "OTP" : "EEPROM");
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index e2d620f0b6e8..650e20af20fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -258,8 +258,10 @@ struct iwl_channel_info {
258#define IWL_TX_FIFO_HCCA_2 6 258#define IWL_TX_FIFO_HCCA_2 6
259#define IWL_TX_FIFO_NONE 7 259#define IWL_TX_FIFO_NONE 7
260 260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files */ 261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262#define IWL_MIN_NUM_QUEUES 4 262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
263 265
264/* Power management (not Tx power) structures */ 266/* Power management (not Tx power) structures */
265 267
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 2addf735b193..ffd5c61a7553 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
566 unsigned long flags; 566 unsigned long flags;
567 567
568 spin_lock_irqsave(&priv->sta_lock, flags); 568 spin_lock_irqsave(&priv->sta_lock, flags);
569 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
570 keyconf->keyidx);
569 571
570 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) 572 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
571 IWL_ERR(priv, "index %d not used in uCode key table.\n", 573 IWL_ERR(priv, "index %d not used in uCode key table.\n",
@@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
573 575
574 priv->default_wep_key--; 576 priv->default_wep_key--;
575 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 577 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
578 if (iwl_is_rfkill(priv)) {
579 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
580 spin_unlock_irqrestore(&priv->sta_lock, flags);
581 return 0;
582 }
576 ret = iwl_send_static_wepkey_cmd(priv, 1); 583 ret = iwl_send_static_wepkey_cmd(priv, 1);
577 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 584 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
578 keyconf->keyidx, ret); 585 keyconf->keyidx, ret);
@@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
853 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 860 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
854 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 861 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
855 862
863 if (iwl_is_rfkill(priv)) {
864 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
865 spin_unlock_irqrestore(&priv->sta_lock, flags);
866 return 0;
867 }
856 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 868 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
857 spin_unlock_irqrestore(&priv->sta_lock, flags); 869 spin_unlock_irqrestore(&priv->sta_lock, flags);
858 return ret; 870 return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 9bbeec9427f0..2e89040e63be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
720 goto drop_unlock; 720 goto drop_unlock;
721 } 721 }
722 722
723 spin_unlock_irqrestore(&priv->lock, flags);
724
725 hdr_len = ieee80211_hdrlen(fc); 723 hdr_len = ieee80211_hdrlen(fc);
726 724
727 /* Find (or create) index into station table for destination station */ 725 /* Find (or create) index into station table for destination station */
@@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
729 if (sta_id == IWL_INVALID_STATION) { 727 if (sta_id == IWL_INVALID_STATION) {
730 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 728 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
731 hdr->addr1); 729 hdr->addr1);
732 goto drop; 730 goto drop_unlock;
733 } 731 }
734 732
735 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 733 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
750 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 748 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
751 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); 749 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
752 } 750 }
753 priv->stations[sta_id].tid[tid].tfds_in_queue++;
754 } 751 }
755 752
756 txq = &priv->txq[txq_id]; 753 txq = &priv->txq[txq_id];
757 q = &txq->q; 754 q = &txq->q;
758 txq->swq_id = swq_id; 755 txq->swq_id = swq_id;
759 756
760 spin_lock_irqsave(&priv->lock, flags); 757 if (unlikely(iwl_queue_space(q) < q->high_mark))
758 goto drop_unlock;
759
760 if (ieee80211_is_data_qos(fc))
761 priv->stations[sta_id].tid[tid].tfds_in_queue++;
761 762
762 /* Set up driver data for this TFD */ 763 /* Set up driver data for this TFD */
763 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 764 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -902,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
902 903
903drop_unlock: 904drop_unlock:
904 spin_unlock_irqrestore(&priv->lock, flags); 905 spin_unlock_irqrestore(&priv->lock, flags);
905drop:
906 return -1; 906 return -1;
907} 907}
908EXPORT_SYMBOL(iwl_tx_skb); 908EXPORT_SYMBOL(iwl_tx_skb);
@@ -1171,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1171 IWL_ERR(priv, "Start AGG on invalid station\n"); 1171 IWL_ERR(priv, "Start AGG on invalid station\n");
1172 return -ENXIO; 1172 return -ENXIO;
1173 } 1173 }
1174 if (unlikely(tid >= MAX_TID_COUNT))
1175 return -EINVAL;
1174 1176
1175 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1177 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1176 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); 1178 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 956798f2c80c..523843369ca2 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3968,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3968 3968
3969 hw->wiphy->custom_regulatory = true; 3969 hw->wiphy->custom_regulatory = true;
3970 3970
3971 /* Firmware does not support this */
3972 hw->wiphy->disable_beacon_hints = true;
3973
3971 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3974 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3972 /* we create the 802.11 header and a zero-length SSID element */ 3975 /* we create the 802.11 header and a zero-length SSID element */
3973 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 3976 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@@ -4018,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4018 SET_IEEE80211_DEV(hw, &pdev->dev); 4021 SET_IEEE80211_DEV(hw, &pdev->dev);
4019 4022
4020 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || 4023 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
4021 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { 4024 (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) {
4022 IWL_ERR(priv, 4025 IWL_ERR(priv,
4023 "invalid queues_num, should be between %d and %d\n", 4026 "invalid queues_num, should be between %d and %d\n",
4024 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); 4027 IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
4025 err = -EINVAL; 4028 err = -EINVAL;
4026 goto out_ieee80211_free_hw; 4029 goto out_ieee80211_free_hw;
4027 } 4030 }
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 834a7f544e5d..e2334d123599 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); 220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
221 if (IS_ERR(eeprom_rxiq)) { 221 if (IS_ERR(eeprom_rxiq)) {
222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); 222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
223 kfree(rxiq);
223 return PTR_ERR(eeprom_rxiq); 224 return PTR_ERR(eeprom_rxiq);
224 } 225 }
225 226
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index aea5ccf24ccf..bf294e41753b 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
106 int ret = 0; 106 int ret = 0;
107 107
108 wdev = iwm_wdev_alloc(sizeof_bus, dev); 108 wdev = iwm_wdev_alloc(sizeof_bus, dev);
109 if (!wdev) { 109 if (IS_ERR(wdev))
110 dev_err(dev, "no memory for wireless device instance\n"); 110 return wdev;
111 return ERR_PTR(-ENOMEM);
112 }
113 111
114 iwm = wdev_to_iwm(wdev); 112 iwm = wdev_to_iwm(wdev);
115 iwm->bus_ops = if_ops; 113 iwm->bus_ops = if_ops;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
index 9a5408e7d94a..5c6968101f0d 100644
--- a/drivers/net/wireless/libertas/11d.c
+++ b/drivers/net/wireless/libertas/11d.c
@@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region)
47{ 47{
48 u8 i; 48 u8 i;
49 49
50 for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) 50 for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
51 region[i] = toupper(region[i]); 51 region[i] = toupper(region[i]);
52 52
53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) { 53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index b9b374119033..d6997371c27e 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,6 +1,7 @@
1/* Copyright (C) 2006, Red Hat, Inc. */ 1/* Copyright (C) 2006, Red Hat, Inc. */
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/kernel.h>
4#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
5#include <linux/ieee80211.h> 6#include <linux/ieee80211.h>
6#include <linux/if_arp.h> 7#include <linux/if_arp.h>
@@ -43,21 +44,21 @@ static int get_common_rates(struct lbs_private *priv,
43 u16 *rates_size) 44 u16 *rates_size)
44{ 45{
45 u8 *card_rates = lbs_bg_rates; 46 u8 *card_rates = lbs_bg_rates;
46 size_t num_card_rates = sizeof(lbs_bg_rates);
47 int ret = 0, i, j; 47 int ret = 0, i, j;
48 u8 tmp[30]; 48 u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)];
49 size_t tmp_size = 0; 49 size_t tmp_size = 0;
50 50
51 /* For each rate in card_rates that exists in rate1, copy to tmp */ 51 /* For each rate in card_rates that exists in rate1, copy to tmp */
52 for (i = 0; card_rates[i] && (i < num_card_rates); i++) { 52 for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) {
53 for (j = 0; rates[j] && (j < *rates_size); j++) { 53 for (j = 0; j < *rates_size && rates[j]; j++) {
54 if (rates[j] == card_rates[i]) 54 if (rates[j] == card_rates[i])
55 tmp[tmp_size++] = card_rates[i]; 55 tmp[tmp_size++] = card_rates[i];
56 } 56 }
57 } 57 }
58 58
59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); 59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); 60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates,
61 ARRAY_SIZE(lbs_bg_rates));
61 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 62 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
62 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 63 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
63 64
@@ -69,10 +70,7 @@ static int get_common_rates(struct lbs_private *priv,
69 lbs_pr_alert("Previously set fixed data rate %#x isn't " 70 lbs_pr_alert("Previously set fixed data rate %#x isn't "
70 "compatible with the network.\n", priv->cur_rate); 71 "compatible with the network.\n", priv->cur_rate);
71 ret = -1; 72 ret = -1;
72 goto done;
73 } 73 }
74 ret = 0;
75
76done: 74done:
77 memset(rates, 0, *rates_size); 75 memset(rates, 0, *rates_size);
78 *rates_size = min_t(int, tmp_size, *rates_size); 76 *rates_size = min_t(int, tmp_size, *rates_size);
@@ -322,7 +320,7 @@ static int lbs_associate(struct lbs_private *priv,
322 rates = (struct mrvl_ie_rates_param_set *) pos; 320 rates = (struct mrvl_ie_rates_param_set *) pos;
323 rates->header.type = cpu_to_le16(TLV_TYPE_RATES); 321 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
324 memcpy(&rates->rates, &bss->rates, MAX_RATES); 322 memcpy(&rates->rates, &bss->rates, MAX_RATES);
325 tmplen = MAX_RATES; 323 tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES);
326 if (get_common_rates(priv, rates->rates, &tmplen)) { 324 if (get_common_rates(priv, rates->rates, &tmplen)) {
327 ret = -1; 325 ret = -1;
328 goto done; 326 goto done;
@@ -598,7 +596,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
598 596
599 /* Copy Data rates from the rates recorded in scan response */ 597 /* Copy Data rates from the rates recorded in scan response */
600 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); 598 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
601 ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES); 599 ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES);
602 memcpy(cmd.bss.rates, bss->rates, ratesize); 600 memcpy(cmd.bss.rates, bss->rates, ratesize);
603 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { 601 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
604 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); 602 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 601b54249677..6c95af3023cc 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -5,6 +5,7 @@
5 * for sending scan commands to the firmware. 5 * for sending scan commands to the firmware.
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/kernel.h>
8#include <linux/etherdevice.h> 9#include <linux/etherdevice.h>
9#include <linux/if_arp.h> 10#include <linux/if_arp.h>
10#include <asm/unaligned.h> 11#include <asm/unaligned.h>
@@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
876 iwe.u.bitrate.disabled = 0; 877 iwe.u.bitrate.disabled = 0;
877 iwe.u.bitrate.value = 0; 878 iwe.u.bitrate.value = 0;
878 879
879 for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { 880 for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
880 /* Bit rate given in 500 kb/s units */ 881 /* Bit rate given in 500 kb/s units */
881 iwe.u.bitrate.value = bss->rates[j] * 500000; 882 iwe.u.bitrate.value = bss->rates[j] * 500000;
882 current_val = iwe_stream_add_value(info, start, current_val, 883 current_val = iwe_stream_add_value(info, start, current_val,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 40b07b988224..3bd3c779fff3 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
698 && !mac->pass_ctrl) 698 && !mac->pass_ctrl)
699 return 0; 699 return 0;
700 700
701 fc = *(__le16 *)buffer; 701 fc = get_unaligned((__le16*)buffer);
702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); 702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
703 703
704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0)); 704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 0f0e0b919ef4..a45b0c0d574e 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -70,7 +70,6 @@
70#undef CCIO_COLLECT_STATS 70#undef CCIO_COLLECT_STATS
71#endif 71#endif
72 72
73#include <linux/proc_fs.h>
74#include <asm/runway.h> /* for proc_runway_root */ 73#include <asm/runway.h> /* for proc_runway_root */
75 74
76#ifdef DEBUG_CCIO_INIT 75#ifdef DEBUG_CCIO_INIT
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index c590974e9815..d69bde6a2343 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -614,7 +614,7 @@ dino_fixup_bus(struct pci_bus *bus)
614 dev_name(&bus->self->dev), i, 614 dev_name(&bus->self->dev), i,
615 bus->self->resource[i].start, 615 bus->self->resource[i].start,
616 bus->self->resource[i].end); 616 bus->self->resource[i].end);
617 pci_assign_resource(bus->self, i); 617 WARN_ON(pci_assign_resource(bus->self, i));
618 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", 618 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
619 dev_name(&bus->self->dev), i, 619 dev_name(&bus->self->dev), i,
620 bus->self->resource[i].start, 620 bus->self->resource[i].start,
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 685d94e69d44..8c0b26e9b98a 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file,
55 ssize_t ret; 55 ssize_t ret;
56 int i; 56 int i;
57 57
58 if (*ppos >= HPEE_MAX_LENGTH) 58 if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH)
59 return 0; 59 return 0;
60 60
61 count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; 61 count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 13856415b432..815db175d427 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -62,7 +62,8 @@ static int hppb_probe(struct parisc_device *dev)
62 } 62 }
63 card = card->next; 63 card = card->next;
64 } 64 }
65 printk(KERN_INFO "Found GeckoBoa at 0x%x\n", dev->hpa.start); 65 printk(KERN_INFO "Found GeckoBoa at 0x%llx\n",
66 (unsigned long long) dev->hpa.start);
66 67
67 card->hpa = dev->hpa.start; 68 card->hpa = dev->hpa.start;
68 card->mmio_region.name = "HP-PB Bus"; 69 card->mmio_region.name = "HP-PB Bus";
@@ -73,8 +74,10 @@ static int hppb_probe(struct parisc_device *dev)
73 74
74 status = ccio_request_resource(dev, &card->mmio_region); 75 status = ccio_request_resource(dev, &card->mmio_region);
75 if(status < 0) { 76 if(status < 0) {
76 printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08x, %08x)\n", 77 printk(KERN_ERR "%s: failed to claim HP-PB "
77 __FILE__, card->mmio_region.start, card->mmio_region.end); 78 "bus space (0x%08llx, 0x%08llx)\n",
79 __FILE__, (unsigned long long) card->mmio_region.start,
80 (unsigned long long) card->mmio_region.end);
78 } 81 }
79 82
80 return 0; 83 return 0;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index ede614616f8e..3aeb3279c92a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -992,7 +992,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
992 return; 992 return;
993 993
994 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); 994 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
995 if (!pa_pdc_cell) { 995 if (!io_pdc_cell) {
996 kfree(pa_pdc_cell); 996 kfree(pa_pdc_cell);
997 return; 997 return;
998 } 998 }
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index f9f9a5f1bbd0..13a64bc081b6 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -370,7 +370,7 @@ pdcspath_layer_read(struct pdcspath_entry *entry, char *buf)
370 if (!i) /* entry is not ready */ 370 if (!i) /* entry is not ready */
371 return -ENODATA; 371 return -ENODATA;
372 372
373 for (i = 0; devpath->layers[i] && (likely(i < 6)); i++) 373 for (i = 0; i < 6 && devpath->layers[i]; i++)
374 out += sprintf(out, "%u ", devpath->layers[i]); 374 out += sprintf(out, "%u ", devpath->layers[i]);
375 375
376 out += sprintf(out, "\n"); 376 out += sprintf(out, "\n");
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index b711fb7181e2..1898c7b47907 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -100,16 +100,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
100{ 100{
101 struct resource *res = &dev->resource[resource]; 101 struct resource *res = &dev->resource[resource];
102 struct resource *root; 102 struct resource *root;
103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
104 int err; 103 int err;
105 104
106 root = pci_find_parent_resource(dev, res); 105 root = pci_find_parent_resource(dev, res);
107 106
108 err = -EINVAL; 107 err = -EINVAL;
109 if (root != NULL) 108 if (root != NULL)
110 err = insert_resource(root, res); 109 err = request_resource(root, res);
111 110
112 if (err) { 111 if (err) {
112 const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
113 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", 113 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
114 resource, 114 resource,
115 root ? "address space collision on" : 115 root ? "address space collision on" :
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 46dad12f952f..77c6097ced80 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -277,31 +277,6 @@ config THINKPAD_ACPI_UNSAFE_LEDS
277 Say N here, unless you are building a kernel for your own 277 Say N here, unless you are building a kernel for your own
278 use, and need to control the important firmware LEDs. 278 use, and need to control the important firmware LEDs.
279 279
280config THINKPAD_ACPI_DOCK
281 bool "Legacy Docking Station Support"
282 depends on THINKPAD_ACPI
283 depends on ACPI_DOCK=n
284 default n
285 ---help---
286 Allows the thinkpad_acpi driver to handle docking station events.
287 This support was made obsolete by the generic ACPI docking station
288 support (CONFIG_ACPI_DOCK). It will allow locking and removing the
289 laptop from the docking station, but will not properly connect PCI
290 devices.
291
292 If you are not sure, say N here.
293
294config THINKPAD_ACPI_BAY
295 bool "Legacy Removable Bay Support"
296 depends on THINKPAD_ACPI
297 default y
298 ---help---
299 Allows the thinkpad_acpi driver to handle removable bays. It will
300 electrically disable the device in the bay, and also generate
301 notifications when the bay lever is ejected or inserted.
302
303 If you are not sure, say Y here.
304
305config THINKPAD_ACPI_VIDEO 280config THINKPAD_ACPI_VIDEO
306 bool "Video output control support" 281 bool "Video output control support"
307 depends on THINKPAD_ACPI 282 depends on THINKPAD_ACPI
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ec560f16d720..222ffb892f22 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -143,6 +143,7 @@ struct eeepc_hotk {
143 struct rfkill *bluetooth_rfkill; 143 struct rfkill *bluetooth_rfkill;
144 struct rfkill *wwan3g_rfkill; 144 struct rfkill *wwan3g_rfkill;
145 struct hotplug_slot *hotplug_slot; 145 struct hotplug_slot *hotplug_slot;
146 struct work_struct hotplug_work;
146}; 147};
147 148
148/* The actual device the driver binds to */ 149/* The actual device the driver binds to */
@@ -660,7 +661,7 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
660 return 0; 661 return 0;
661} 662}
662 663
663static void eeepc_rfkill_hotplug(void) 664static void eeepc_hotplug_work(struct work_struct *work)
664{ 665{
665 struct pci_dev *dev; 666 struct pci_dev *dev;
666 struct pci_bus *bus = pci_find_bus(0, 1); 667 struct pci_bus *bus = pci_find_bus(0, 1);
@@ -701,7 +702,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
701 if (event != ACPI_NOTIFY_BUS_CHECK) 702 if (event != ACPI_NOTIFY_BUS_CHECK)
702 return; 703 return;
703 704
704 eeepc_rfkill_hotplug(); 705 schedule_work(&ehotk->hotplug_work);
705} 706}
706 707
707static void eeepc_hotk_notify(struct acpi_device *device, u32 event) 708static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
@@ -892,7 +893,7 @@ static int eeepc_hotk_resume(struct acpi_device *device)
892 893
893 rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1); 894 rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
894 895
895 eeepc_rfkill_hotplug(); 896 schedule_work(&ehotk->hotplug_work);
896 } 897 }
897 898
898 if (ehotk->bluetooth_rfkill) 899 if (ehotk->bluetooth_rfkill)
@@ -1093,6 +1094,8 @@ static int eeepc_rfkill_init(struct device *dev)
1093{ 1094{
1094 int result = 0; 1095 int result = 0;
1095 1096
1097 INIT_WORK(&ehotk->hotplug_work, eeepc_hotplug_work);
1098
1096 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6"); 1099 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
1097 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7"); 1100 eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
1098 1101
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index ca508564a181..a2ad53e15874 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -520,11 +520,13 @@ static int hp_wmi_resume_handler(struct platform_device *device)
520 * the input layer will only actually pass it on if the state 520 * the input layer will only actually pass it on if the state
521 * changed. 521 * changed.
522 */ 522 */
523 523 if (hp_wmi_input_dev) {
524 input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); 524 input_report_switch(hp_wmi_input_dev, SW_DOCK,
525 input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, 525 hp_wmi_dock_state());
526 hp_wmi_tablet_state()); 526 input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
527 input_sync(hp_wmi_input_dev); 527 hp_wmi_tablet_state());
528 input_sync(hp_wmi_input_dev);
529 }
528 530
529 return 0; 531 return 0;
530} 532}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a463fd72c495..e85600852502 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -239,12 +239,6 @@ struct ibm_init_struct {
239}; 239};
240 240
241static struct { 241static struct {
242#ifdef CONFIG_THINKPAD_ACPI_BAY
243 u32 bay_status:1;
244 u32 bay_eject:1;
245 u32 bay_status2:1;
246 u32 bay_eject2:1;
247#endif
248 u32 bluetooth:1; 242 u32 bluetooth:1;
249 u32 hotkey:1; 243 u32 hotkey:1;
250 u32 hotkey_mask:1; 244 u32 hotkey_mask:1;
@@ -589,18 +583,6 @@ static int acpi_ec_write(int i, u8 v)
589 return 1; 583 return 1;
590} 584}
591 585
592#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
593static int _sta(acpi_handle handle)
594{
595 int status;
596
597 if (!handle || !acpi_evalf(handle, &status, "_STA", "d"))
598 status = 0;
599
600 return status;
601}
602#endif
603
604static int issue_thinkpad_cmos_command(int cmos_cmd) 586static int issue_thinkpad_cmos_command(int cmos_cmd)
605{ 587{
606 if (!cmos_handle) 588 if (!cmos_handle)
@@ -784,6 +766,8 @@ static int dispatch_procfs_write(struct file *file,
784 766
785 if (!ibm || !ibm->write) 767 if (!ibm || !ibm->write)
786 return -EINVAL; 768 return -EINVAL;
769 if (count > PAGE_SIZE - 2)
770 return -EINVAL;
787 771
788 kernbuf = kmalloc(count + 2, GFP_KERNEL); 772 kernbuf = kmalloc(count + 2, GFP_KERNEL);
789 if (!kernbuf) 773 if (!kernbuf)
@@ -4442,293 +4426,6 @@ static struct ibm_struct light_driver_data = {
4442}; 4426};
4443 4427
4444/************************************************************************* 4428/*************************************************************************
4445 * Dock subdriver
4446 */
4447
4448#ifdef CONFIG_THINKPAD_ACPI_DOCK
4449
4450static void dock_notify(struct ibm_struct *ibm, u32 event);
4451static int dock_read(char *p);
4452static int dock_write(char *buf);
4453
4454TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
4455 "\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
4456 "\\_SB.PCI0.PCI1.DOCK", /* all others */
4457 "\\_SB.PCI.ISA.SLCE", /* 570 */
4458 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
4459
4460/* don't list other alternatives as we install a notify handler on the 570 */
4461TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
4462
4463static const struct acpi_device_id ibm_pci_device_ids[] = {
4464 {PCI_ROOT_HID_STRING, 0},
4465 {"", 0},
4466};
4467
4468static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
4469 {
4470 .notify = dock_notify,
4471 .handle = &dock_handle,
4472 .type = ACPI_SYSTEM_NOTIFY,
4473 },
4474 {
4475 /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
4476 * We just use it to get notifications of dock hotplug
4477 * in very old thinkpads */
4478 .hid = ibm_pci_device_ids,
4479 .notify = dock_notify,
4480 .handle = &pci_handle,
4481 .type = ACPI_SYSTEM_NOTIFY,
4482 },
4483};
4484
4485static struct ibm_struct dock_driver_data[2] = {
4486 {
4487 .name = "dock",
4488 .read = dock_read,
4489 .write = dock_write,
4490 .acpi = &ibm_dock_acpidriver[0],
4491 },
4492 {
4493 .name = "dock",
4494 .acpi = &ibm_dock_acpidriver[1],
4495 },
4496};
4497
4498#define dock_docked() (_sta(dock_handle) & 1)
4499
4500static int __init dock_init(struct ibm_init_struct *iibm)
4501{
4502 vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
4503
4504 TPACPI_ACPIHANDLE_INIT(dock);
4505
4506 vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
4507 str_supported(dock_handle != NULL));
4508
4509 return (dock_handle)? 0 : 1;
4510}
4511
4512static int __init dock_init2(struct ibm_init_struct *iibm)
4513{
4514 int dock2_needed;
4515
4516 vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n");
4517
4518 if (dock_driver_data[0].flags.acpi_driver_registered &&
4519 dock_driver_data[0].flags.acpi_notify_installed) {
4520 TPACPI_ACPIHANDLE_INIT(pci);
4521 dock2_needed = (pci_handle != NULL);
4522 vdbg_printk(TPACPI_DBG_INIT,
4523 "dock PCI handler for the TP 570 is %s\n",
4524 str_supported(dock2_needed));
4525 } else {
4526 vdbg_printk(TPACPI_DBG_INIT,
4527 "dock subdriver part 2 not required\n");
4528 dock2_needed = 0;
4529 }
4530
4531 return (dock2_needed)? 0 : 1;
4532}
4533
4534static void dock_notify(struct ibm_struct *ibm, u32 event)
4535{
4536 int docked = dock_docked();
4537 int pci = ibm->acpi->hid && ibm->acpi->device &&
4538 acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids);
4539 int data;
4540
4541 if (event == 1 && !pci) /* 570 */
4542 data = 1; /* button */
4543 else if (event == 1 && pci) /* 570 */
4544 data = 3; /* dock */
4545 else if (event == 3 && docked)
4546 data = 1; /* button */
4547 else if (event == 3 && !docked)
4548 data = 2; /* undock */
4549 else if (event == 0 && docked)
4550 data = 3; /* dock */
4551 else {
4552 printk(TPACPI_ERR "unknown dock event %d, status %d\n",
4553 event, _sta(dock_handle));
4554 data = 0; /* unknown */
4555 }
4556 acpi_bus_generate_proc_event(ibm->acpi->device, event, data);
4557 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
4558 dev_name(&ibm->acpi->device->dev),
4559 event, data);
4560}
4561
4562static int dock_read(char *p)
4563{
4564 int len = 0;
4565 int docked = dock_docked();
4566
4567 if (!dock_handle)
4568 len += sprintf(p + len, "status:\t\tnot supported\n");
4569 else if (!docked)
4570 len += sprintf(p + len, "status:\t\tundocked\n");
4571 else {
4572 len += sprintf(p + len, "status:\t\tdocked\n");
4573 len += sprintf(p + len, "commands:\tdock, undock\n");
4574 }
4575
4576 return len;
4577}
4578
4579static int dock_write(char *buf)
4580{
4581 char *cmd;
4582
4583 if (!dock_docked())
4584 return -ENODEV;
4585
4586 while ((cmd = next_cmd(&buf))) {
4587 if (strlencmp(cmd, "undock") == 0) {
4588 if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) ||
4589 !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1))
4590 return -EIO;
4591 } else if (strlencmp(cmd, "dock") == 0) {
4592 if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1))
4593 return -EIO;
4594 } else
4595 return -EINVAL;
4596 }
4597
4598 return 0;
4599}
4600
4601#endif /* CONFIG_THINKPAD_ACPI_DOCK */
4602
4603/*************************************************************************
4604 * Bay subdriver
4605 */
4606
4607#ifdef CONFIG_THINKPAD_ACPI_BAY
4608
4609TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
4610 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
4611 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
4612 "\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */
4613 ); /* A21e, R30, R31 */
4614TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
4615 "_EJ0", /* all others */
4616 ); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
4617TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
4618 "\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */
4619 ); /* all others */
4620TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
4621 "_EJ0", /* 770x */
4622 ); /* all others */
4623
4624static int __init bay_init(struct ibm_init_struct *iibm)
4625{
4626 vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
4627
4628 TPACPI_ACPIHANDLE_INIT(bay);
4629 if (bay_handle)
4630 TPACPI_ACPIHANDLE_INIT(bay_ej);
4631 TPACPI_ACPIHANDLE_INIT(bay2);
4632 if (bay2_handle)
4633 TPACPI_ACPIHANDLE_INIT(bay2_ej);
4634
4635 tp_features.bay_status = bay_handle &&
4636 acpi_evalf(bay_handle, NULL, "_STA", "qv");
4637 tp_features.bay_status2 = bay2_handle &&
4638 acpi_evalf(bay2_handle, NULL, "_STA", "qv");
4639
4640 tp_features.bay_eject = bay_handle && bay_ej_handle &&
4641 (strlencmp(bay_ej_path, "_EJ0") == 0 || experimental);
4642 tp_features.bay_eject2 = bay2_handle && bay2_ej_handle &&
4643 (strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental);
4644
4645 vdbg_printk(TPACPI_DBG_INIT,
4646 "bay 1: status %s, eject %s; bay 2: status %s, eject %s\n",
4647 str_supported(tp_features.bay_status),
4648 str_supported(tp_features.bay_eject),
4649 str_supported(tp_features.bay_status2),
4650 str_supported(tp_features.bay_eject2));
4651
4652 return (tp_features.bay_status || tp_features.bay_eject ||
4653 tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1;
4654}
4655
4656static void bay_notify(struct ibm_struct *ibm, u32 event)
4657{
4658 acpi_bus_generate_proc_event(ibm->acpi->device, event, 0);
4659 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
4660 dev_name(&ibm->acpi->device->dev),
4661 event, 0);
4662}
4663
4664#define bay_occupied(b) (_sta(b##_handle) & 1)
4665
4666static int bay_read(char *p)
4667{
4668 int len = 0;
4669 int occupied = bay_occupied(bay);
4670 int occupied2 = bay_occupied(bay2);
4671 int eject, eject2;
4672
4673 len += sprintf(p + len, "status:\t\t%s\n",
4674 tp_features.bay_status ?
4675 (occupied ? "occupied" : "unoccupied") :
4676 "not supported");
4677 if (tp_features.bay_status2)
4678 len += sprintf(p + len, "status2:\t%s\n", occupied2 ?
4679 "occupied" : "unoccupied");
4680
4681 eject = tp_features.bay_eject && occupied;
4682 eject2 = tp_features.bay_eject2 && occupied2;
4683
4684 if (eject && eject2)
4685 len += sprintf(p + len, "commands:\teject, eject2\n");
4686 else if (eject)
4687 len += sprintf(p + len, "commands:\teject\n");
4688 else if (eject2)
4689 len += sprintf(p + len, "commands:\teject2\n");
4690
4691 return len;
4692}
4693
4694static int bay_write(char *buf)
4695{
4696 char *cmd;
4697
4698 if (!tp_features.bay_eject && !tp_features.bay_eject2)
4699 return -ENODEV;
4700
4701 while ((cmd = next_cmd(&buf))) {
4702 if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) {
4703 if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1))
4704 return -EIO;
4705 } else if (tp_features.bay_eject2 &&
4706 strlencmp(cmd, "eject2") == 0) {
4707 if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1))
4708 return -EIO;
4709 } else
4710 return -EINVAL;
4711 }
4712
4713 return 0;
4714}
4715
4716static struct tp_acpi_drv_struct ibm_bay_acpidriver = {
4717 .notify = bay_notify,
4718 .handle = &bay_handle,
4719 .type = ACPI_SYSTEM_NOTIFY,
4720};
4721
4722static struct ibm_struct bay_driver_data = {
4723 .name = "bay",
4724 .read = bay_read,
4725 .write = bay_write,
4726 .acpi = &ibm_bay_acpidriver,
4727};
4728
4729#endif /* CONFIG_THINKPAD_ACPI_BAY */
4730
4731/*************************************************************************
4732 * CMOS subdriver 4429 * CMOS subdriver
4733 */ 4430 */
4734 4431
@@ -5945,14 +5642,48 @@ static struct backlight_ops ibm_backlight_data = {
5945 5642
5946/* --------------------------------------------------------------------- */ 5643/* --------------------------------------------------------------------- */
5947 5644
5645/*
5646 * These are only useful for models that have only one possibility
5647 * of GPU. If the BIOS model handles both ATI and Intel, don't use
5648 * these quirks.
5649 */
5650#define TPACPI_BRGHT_Q_NOEC 0x0001 /* Must NOT use EC HBRV */
5651#define TPACPI_BRGHT_Q_EC 0x0002 /* Should or must use EC HBRV */
5652#define TPACPI_BRGHT_Q_ASK 0x8000 /* Ask for user report */
5653
5654static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
5655 /* Models with ATI GPUs known to require ECNVRAM mode */
5656 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
5657
5658 /* Models with ATI GPUs (waiting confirmation) */
5659 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5660 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5661 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5662 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
5663
5664 /* Models with Intel Extreme Graphics 2 (waiting confirmation) */
5665 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5666 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5667 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
5668
5669 /* Models with Intel GMA900 */
5670 TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
5671 TPACPI_Q_IBM('7', '4', TPACPI_BRGHT_Q_NOEC), /* X41 */
5672 TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */
5673};
5674
5948static int __init brightness_init(struct ibm_init_struct *iibm) 5675static int __init brightness_init(struct ibm_init_struct *iibm)
5949{ 5676{
5950 int b; 5677 int b;
5678 unsigned long quirks;
5951 5679
5952 vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n"); 5680 vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
5953 5681
5954 mutex_init(&brightness_mutex); 5682 mutex_init(&brightness_mutex);
5955 5683
5684 quirks = tpacpi_check_quirks(brightness_quirk_table,
5685 ARRAY_SIZE(brightness_quirk_table));
5686
5956 /* 5687 /*
5957 * We always attempt to detect acpi support, so as to switch 5688 * We always attempt to detect acpi support, so as to switch
5958 * Lenovo Vista BIOS to ACPI brightness mode even if we are not 5689 * Lenovo Vista BIOS to ACPI brightness mode even if we are not
@@ -6009,23 +5740,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6009 /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */ 5740 /* TPACPI_BRGHT_MODE_AUTO not implemented yet, just use default */
6010 if (brightness_mode == TPACPI_BRGHT_MODE_AUTO || 5741 if (brightness_mode == TPACPI_BRGHT_MODE_AUTO ||
6011 brightness_mode == TPACPI_BRGHT_MODE_MAX) { 5742 brightness_mode == TPACPI_BRGHT_MODE_MAX) {
6012 if (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) { 5743 if (quirks & TPACPI_BRGHT_Q_EC)
6013 /* 5744 brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM;
6014 * IBM models that define HBRV probably have 5745 else
6015 * EC-based backlight level control
6016 */
6017 if (acpi_evalf(ec_handle, NULL, "HBRV", "qd"))
6018 /* T40-T43, R50-R52, R50e, R51e, X31-X41 */
6019 brightness_mode = TPACPI_BRGHT_MODE_ECNVRAM;
6020 else
6021 /* all other IBM ThinkPads */
6022 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP;
6023 } else
6024 /* All Lenovo ThinkPads */
6025 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP; 5746 brightness_mode = TPACPI_BRGHT_MODE_UCMS_STEP;
6026 5747
6027 dbg_printk(TPACPI_DBG_BRGHT, 5748 dbg_printk(TPACPI_DBG_BRGHT,
6028 "selected brightness_mode=%d\n", 5749 "driver auto-selected brightness_mode=%d\n",
6029 brightness_mode); 5750 brightness_mode);
6030 } 5751 }
6031 5752
@@ -6052,6 +5773,15 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6052 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 5773 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6053 "brightness is supported\n"); 5774 "brightness is supported\n");
6054 5775
5776 if (quirks & TPACPI_BRGHT_Q_ASK) {
5777 printk(TPACPI_NOTICE
5778 "brightness: will use unverified default: "
5779 "brightness_mode=%d\n", brightness_mode);
5780 printk(TPACPI_NOTICE
5781 "brightness: please report to %s whether it works well "
5782 "or not on your ThinkPad\n", TPACPI_MAIL);
5783 }
5784
6055 ibm_backlight_device->props.max_brightness = 5785 ibm_backlight_device->props.max_brightness =
6056 (tp_features.bright_16levels)? 15 : 7; 5786 (tp_features.bright_16levels)? 15 : 7;
6057 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; 5787 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
@@ -7854,22 +7584,6 @@ static struct ibm_init_struct ibms_init[] __initdata = {
7854 .init = light_init, 7584 .init = light_init,
7855 .data = &light_driver_data, 7585 .data = &light_driver_data,
7856 }, 7586 },
7857#ifdef CONFIG_THINKPAD_ACPI_DOCK
7858 {
7859 .init = dock_init,
7860 .data = &dock_driver_data[0],
7861 },
7862 {
7863 .init = dock_init2,
7864 .data = &dock_driver_data[1],
7865 },
7866#endif
7867#ifdef CONFIG_THINKPAD_ACPI_BAY
7868 {
7869 .init = bay_init,
7870 .data = &bay_driver_data,
7871 },
7872#endif
7873 { 7587 {
7874 .init = cmos_init, 7588 .init = cmos_init,
7875 .data = &cmos_driver_data, 7589 .data = &cmos_driver_data,
@@ -7968,12 +7682,6 @@ TPACPI_PARAM(hotkey);
7968TPACPI_PARAM(bluetooth); 7682TPACPI_PARAM(bluetooth);
7969TPACPI_PARAM(video); 7683TPACPI_PARAM(video);
7970TPACPI_PARAM(light); 7684TPACPI_PARAM(light);
7971#ifdef CONFIG_THINKPAD_ACPI_DOCK
7972TPACPI_PARAM(dock);
7973#endif
7974#ifdef CONFIG_THINKPAD_ACPI_BAY
7975TPACPI_PARAM(bay);
7976#endif /* CONFIG_THINKPAD_ACPI_BAY */
7977TPACPI_PARAM(cmos); 7685TPACPI_PARAM(cmos);
7978TPACPI_PARAM(led); 7686TPACPI_PARAM(led);
7979TPACPI_PARAM(beep); 7687TPACPI_PARAM(beep);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8030e25152fb..c75d6f35cb5f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -553,40 +553,35 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
553 _zfcp_erp_unit_reopen(unit, clear, id, ref); 553 _zfcp_erp_unit_reopen(unit, clear, id, ref);
554} 554}
555 555
556static void zfcp_erp_strategy_followup_actions(struct zfcp_erp_action *act) 556static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
557{ 557{
558 struct zfcp_adapter *adapter = act->adapter;
559 struct zfcp_port *port = act->port;
560 struct zfcp_unit *unit = act->unit;
561 u32 status = act->status;
562
563 /* initiate follow-up actions depending on success of finished action */
564 switch (act->action) { 558 switch (act->action) {
565
566 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 559 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
567 if (status == ZFCP_ERP_SUCCEEDED) 560 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL);
568 _zfcp_erp_port_reopen_all(adapter, 0, "ersfa_1", NULL);
569 else
570 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_2", NULL);
571 break; 561 break;
572
573 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 562 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
574 if (status == ZFCP_ERP_SUCCEEDED) 563 _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL);
575 _zfcp_erp_port_reopen(port, 0, "ersfa_3", NULL);
576 else
577 _zfcp_erp_adapter_reopen(adapter, 0, "ersfa_4", NULL);
578 break; 564 break;
579
580 case ZFCP_ERP_ACTION_REOPEN_PORT: 565 case ZFCP_ERP_ACTION_REOPEN_PORT:
581 if (status == ZFCP_ERP_SUCCEEDED) 566 _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
582 _zfcp_erp_unit_reopen_all(port, 0, "ersfa_5", NULL);
583 else
584 _zfcp_erp_port_forced_reopen(port, 0, "ersfa_6", NULL);
585 break; 567 break;
586
587 case ZFCP_ERP_ACTION_REOPEN_UNIT: 568 case ZFCP_ERP_ACTION_REOPEN_UNIT:
588 if (status != ZFCP_ERP_SUCCEEDED) 569 _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL);
589 _zfcp_erp_port_reopen(unit->port, 0, "ersfa_7", NULL); 570 break;
571 }
572}
573
574static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
575{
576 switch (act->action) {
577 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
578 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL);
579 break;
580 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
581 _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
582 break;
583 case ZFCP_ERP_ACTION_REOPEN_PORT:
584 _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL);
590 break; 585 break;
591 } 586 }
592} 587}
@@ -801,7 +796,7 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
801 return ZFCP_ERP_FAILED; 796 return ZFCP_ERP_FAILED;
802 797
803 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 798 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
804 if (status & ZFCP_STATUS_PORT_PHYS_OPEN) 799 if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
805 return ZFCP_ERP_SUCCEEDED; 800 return ZFCP_ERP_SUCCEEDED;
806 } 801 }
807 return ZFCP_ERP_FAILED; 802 return ZFCP_ERP_FAILED;
@@ -853,11 +848,17 @@ void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
853 gid_pn_work); 848 gid_pn_work);
854 849
855 retval = zfcp_fc_ns_gid_pn(&port->erp_action); 850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
856 if (retval == -ENOMEM) 851 if (!retval) {
857 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM); 852 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
858 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; 853 goto out;
859 if (retval) 854 }
860 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED); 855 if (retval == -ENOMEM) {
856 zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM);
857 goto out;
858 }
859 /* all other error condtions */
860 zfcp_erp_notify(&port->erp_action, 0);
861out:
861 zfcp_port_put(port); 862 zfcp_port_put(port);
862} 863}
863 864
@@ -1289,7 +1290,10 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1289 retval = zfcp_erp_strategy_statechange(erp_action, retval); 1290 retval = zfcp_erp_strategy_statechange(erp_action, retval);
1290 if (retval == ZFCP_ERP_EXIT) 1291 if (retval == ZFCP_ERP_EXIT)
1291 goto unlock; 1292 goto unlock;
1292 zfcp_erp_strategy_followup_actions(erp_action); 1293 if (retval == ZFCP_ERP_SUCCEEDED)
1294 zfcp_erp_strategy_followup_success(erp_action);
1295 if (retval == ZFCP_ERP_FAILED)
1296 zfcp_erp_strategy_followup_failed(erp_action);
1293 1297
1294 unlock: 1298 unlock:
1295 write_unlock(&adapter->erp_lock); 1299 write_unlock(&adapter->erp_lock);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2f0705d76b72..47daebfa7e59 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -79,11 +79,9 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
79 79
80 mutex_unlock(&wka_port->mutex); 80 mutex_unlock(&wka_port->mutex);
81 81
82 wait_event_timeout( 82 wait_event(wka_port->completion_wq,
83 wka_port->completion_wq, 83 wka_port->status == ZFCP_WKA_PORT_ONLINE ||
84 wka_port->status == ZFCP_WKA_PORT_ONLINE || 84 wka_port->status == ZFCP_WKA_PORT_OFFLINE);
85 wka_port->status == ZFCP_WKA_PORT_OFFLINE,
86 HZ >> 1);
87 85
88 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { 86 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
89 atomic_inc(&wka_port->refcount); 87 atomic_inc(&wka_port->refcount);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c57658f3d34f..47795fbf081f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -670,8 +670,11 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
670 zfcp_fsf_sbal_check(adapter), 5 * HZ); 670 zfcp_fsf_sbal_check(adapter), 5 * HZ);
671 if (ret > 0) 671 if (ret > 0)
672 return 0; 672 return 0;
673 if (!ret) 673 if (!ret) {
674 atomic_inc(&adapter->qdio_outb_full); 674 atomic_inc(&adapter->qdio_outb_full);
675 /* assume hanging outbound queue, try queue recovery */
676 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
677 }
675 678
676 spin_lock_bh(&adapter->req_q_lock); 679 spin_lock_bh(&adapter->req_q_lock);
677 return -EIO; 680 return -EIO;
@@ -722,7 +725,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
722 req = zfcp_fsf_alloc_qtcb(pool); 725 req = zfcp_fsf_alloc_qtcb(pool);
723 726
724 if (unlikely(!req)) 727 if (unlikely(!req))
725 return ERR_PTR(-EIO); 728 return ERR_PTR(-ENOMEM);
726 729
727 if (adapter->req_no == 0) 730 if (adapter->req_no == 0)
728 adapter->req_no++; 731 adapter->req_no++;
@@ -1010,6 +1013,23 @@ skip_fsfstatus:
1010 send_ct->handler(send_ct->handler_data); 1013 send_ct->handler(send_ct->handler_data);
1011} 1014}
1012 1015
1016static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1017 struct scatterlist *sg_req,
1018 struct scatterlist *sg_resp)
1019{
1020 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1021 sbale[2].addr = sg_virt(sg_req);
1022 sbale[2].length = sg_req->length;
1023 sbale[3].addr = sg_virt(sg_resp);
1024 sbale[3].length = sg_resp->length;
1025 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1026}
1027
1028static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1029{
1030 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1031}
1032
1013static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1033static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1014 struct scatterlist *sg_req, 1034 struct scatterlist *sg_req,
1015 struct scatterlist *sg_resp, 1035 struct scatterlist *sg_resp,
@@ -1020,30 +1040,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1020 int bytes; 1040 int bytes;
1021 1041
1022 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { 1042 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1023 if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE || 1043 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1024 !sg_is_last(sg_req) || !sg_is_last(sg_resp))
1025 return -EOPNOTSUPP; 1044 return -EOPNOTSUPP;
1026 1045
1027 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; 1046 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1028 sbale[2].addr = sg_virt(sg_req); 1047 return 0;
1029 sbale[2].length = sg_req->length; 1048 }
1030 sbale[3].addr = sg_virt(sg_resp); 1049
1031 sbale[3].length = sg_resp->length; 1050 /* use single, unchained SBAL if it can hold the request */
1032 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1051 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1052 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1033 return 0; 1053 return 0;
1034 } 1054 }
1035 1055
1036 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1056 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1037 sg_req, max_sbals); 1057 sg_req, max_sbals);
1038 if (bytes <= 0) 1058 if (bytes <= 0)
1039 return -ENOMEM; 1059 return -EIO;
1040 req->qtcb->bottom.support.req_buf_length = bytes; 1060 req->qtcb->bottom.support.req_buf_length = bytes;
1041 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 1061 req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1042 1062
1043 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, 1063 bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
1044 sg_resp, max_sbals); 1064 sg_resp, max_sbals);
1045 if (bytes <= 0) 1065 if (bytes <= 0)
1046 return -ENOMEM; 1066 return -EIO;
1047 req->qtcb->bottom.support.resp_buf_length = bytes; 1067 req->qtcb->bottom.support.resp_buf_length = bytes;
1048 1068
1049 return 0; 1069 return 0;
@@ -1607,10 +1627,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1607 case FSF_ACCESS_DENIED: 1627 case FSF_ACCESS_DENIED:
1608 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1628 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1609 break; 1629 break;
1610 case FSF_PORT_ALREADY_OPEN:
1611 break;
1612 case FSF_GOOD: 1630 case FSF_GOOD:
1613 wka_port->handle = header->port_handle; 1631 wka_port->handle = header->port_handle;
1632 /* fall through */
1633 case FSF_PORT_ALREADY_OPEN:
1614 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1634 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1615 } 1635 }
1616out: 1636out:
@@ -1731,15 +1751,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1731 zfcp_fsf_access_denied_port(req, port); 1751 zfcp_fsf_access_denied_port(req, port);
1732 break; 1752 break;
1733 case FSF_PORT_BOXED: 1753 case FSF_PORT_BOXED:
1734 zfcp_erp_port_boxed(port, "fscpph2", req);
1735 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1736 ZFCP_STATUS_FSFREQ_RETRY;
1737 /* can't use generic zfcp_erp_modify_port_status because 1754 /* can't use generic zfcp_erp_modify_port_status because
1738 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1755 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1739 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1756 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1740 list_for_each_entry(unit, &port->unit_list_head, list) 1757 list_for_each_entry(unit, &port->unit_list_head, list)
1741 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1758 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1742 &unit->status); 1759 &unit->status);
1760 zfcp_erp_port_boxed(port, "fscpph2", req);
1761 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1762 ZFCP_STATUS_FSFREQ_RETRY;
1763
1743 break; 1764 break;
1744 case FSF_ADAPTER_STATUS_AVAILABLE: 1765 case FSF_ADAPTER_STATUS_AVAILABLE:
1745 switch (header->fsf_status_qual.word[0]) { 1766 switch (header->fsf_status_qual.word[0]) {
@@ -2541,7 +2562,6 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2541 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, 2562 bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
2542 FSF_MAX_SBALS_PER_REQ); 2563 FSF_MAX_SBALS_PER_REQ);
2543 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2564 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2544 retval = -ENOMEM;
2545 zfcp_fsf_req_free(req); 2565 zfcp_fsf_req_free(req);
2546 goto out; 2566 goto out;
2547 } 2567 }
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 967ede73f4c5..6925a1784682 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -167,20 +167,21 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
167 struct zfcp_unit *unit = scpnt->device->hostdata; 167 struct zfcp_unit *unit = scpnt->device->hostdata;
168 struct zfcp_fsf_req *old_req, *abrt_req; 168 struct zfcp_fsf_req *old_req, *abrt_req;
169 unsigned long flags; 169 unsigned long flags;
170 unsigned long old_req_id = (unsigned long) scpnt->host_scribble; 170 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
171 int retval = SUCCESS; 171 int retval = SUCCESS;
172 int retry = 3; 172 int retry = 3;
173 char *dbf_tag;
173 174
174 /* avoid race condition between late normal completion and abort */ 175 /* avoid race condition between late normal completion and abort */
175 write_lock_irqsave(&adapter->abort_lock, flags); 176 write_lock_irqsave(&adapter->abort_lock, flags);
176 177
177 spin_lock(&adapter->req_list_lock); 178 spin_lock(&adapter->req_list_lock);
178 old_req = zfcp_reqlist_find(adapter, old_req_id); 179 old_req = zfcp_reqlist_find(adapter, old_reqid);
179 spin_unlock(&adapter->req_list_lock); 180 spin_unlock(&adapter->req_list_lock);
180 if (!old_req) { 181 if (!old_req) {
181 write_unlock_irqrestore(&adapter->abort_lock, flags); 182 write_unlock_irqrestore(&adapter->abort_lock, flags);
182 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 183 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
183 old_req_id); 184 old_reqid);
184 return FAILED; /* completion could be in progress */ 185 return FAILED; /* completion could be in progress */
185 } 186 }
186 old_req->data = NULL; 187 old_req->data = NULL;
@@ -189,7 +190,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
189 write_unlock_irqrestore(&adapter->abort_lock, flags); 190 write_unlock_irqrestore(&adapter->abort_lock, flags);
190 191
191 while (retry--) { 192 while (retry--) {
192 abrt_req = zfcp_fsf_abort_fcp_command(old_req_id, unit); 193 abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit);
193 if (abrt_req) 194 if (abrt_req)
194 break; 195 break;
195 196
@@ -197,7 +198,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
197 if (!(atomic_read(&adapter->status) & 198 if (!(atomic_read(&adapter->status) &
198 ZFCP_STATUS_COMMON_RUNNING)) { 199 ZFCP_STATUS_COMMON_RUNNING)) {
199 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 200 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
200 old_req_id); 201 old_reqid);
201 return SUCCESS; 202 return SUCCESS;
202 } 203 }
203 } 204 }
@@ -208,13 +209,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
208 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 209 abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
209 210
210 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) 211 if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
211 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, abrt_req, 0); 212 dbf_tag = "okay";
212 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) 213 else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
213 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, abrt_req, 0); 214 dbf_tag = "lte2";
214 else { 215 else {
215 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, abrt_req, 0); 216 dbf_tag = "fail";
216 retval = FAILED; 217 retval = FAILED;
217 } 218 }
219 zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid);
218 zfcp_fsf_req_free(abrt_req); 220 zfcp_fsf_req_free(abrt_req);
219 return retval; 221 return retval;
220} 222}
@@ -534,6 +536,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
534 struct fc_rport_identifiers ids; 536 struct fc_rport_identifiers ids;
535 struct fc_rport *rport; 537 struct fc_rport *rport;
536 538
539 if (port->rport)
540 return;
541
537 ids.node_name = port->wwnn; 542 ids.node_name = port->wwnn;
538 ids.port_name = port->wwpn; 543 ids.port_name = port->wwpn;
539 ids.port_id = port->d_id; 544 ids.port_id = port->d_id;
@@ -557,8 +562,10 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
557{ 562{
558 struct fc_rport *rport = port->rport; 563 struct fc_rport *rport = port->rport;
559 564
560 if (rport) 565 if (rport) {
561 fc_remote_port_delete(rport); 566 fc_remote_port_delete(rport);
567 port->rport = NULL;
568 }
562} 569}
563 570
564void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 571void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3e51e64d1108..0fe5cce818cb 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -494,9 +494,14 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
494 struct Scsi_Host *scsi_host = class_to_shost(dev); 494 struct Scsi_Host *scsi_host = class_to_shost(dev);
495 struct zfcp_adapter *adapter = 495 struct zfcp_adapter *adapter =
496 (struct zfcp_adapter *) scsi_host->hostdata[0]; 496 (struct zfcp_adapter *) scsi_host->hostdata[0];
497 u64 util;
498
499 spin_lock_bh(&adapter->qdio_stat_lock);
500 util = adapter->req_q_util;
501 spin_unlock_bh(&adapter->qdio_stat_lock);
497 502
498 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), 503 return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
499 (unsigned long long)adapter->req_q_util); 504 (unsigned long long)util);
500} 505}
501static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 506static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
502 507
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 2bc22be5f849..145ab9ba55ea 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -415,9 +415,9 @@ static void fc_exch_timeout(struct work_struct *work)
415 e_stat = ep->esb_stat; 415 e_stat = ep->esb_stat;
416 if (e_stat & ESB_ST_COMPLETE) { 416 if (e_stat & ESB_ST_COMPLETE) {
417 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL; 417 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
418 spin_unlock_bh(&ep->ex_lock);
418 if (e_stat & ESB_ST_REC_QUAL) 419 if (e_stat & ESB_ST_REC_QUAL)
419 fc_exch_rrq(ep); 420 fc_exch_rrq(ep);
420 spin_unlock_bh(&ep->ex_lock);
421 goto done; 421 goto done;
422 } else { 422 } else {
423 resp = ep->resp; 423 resp = ep->resp;
@@ -1624,14 +1624,14 @@ static void fc_exch_rrq(struct fc_exch *ep)
1624 struct fc_lport *lp; 1624 struct fc_lport *lp;
1625 struct fc_els_rrq *rrq; 1625 struct fc_els_rrq *rrq;
1626 struct fc_frame *fp; 1626 struct fc_frame *fp;
1627 struct fc_seq *rrq_sp;
1628 u32 did; 1627 u32 did;
1629 1628
1630 lp = ep->lp; 1629 lp = ep->lp;
1631 1630
1632 fp = fc_frame_alloc(lp, sizeof(*rrq)); 1631 fp = fc_frame_alloc(lp, sizeof(*rrq));
1633 if (!fp) 1632 if (!fp)
1634 return; 1633 goto retry;
1634
1635 rrq = fc_frame_payload_get(fp, sizeof(*rrq)); 1635 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1636 memset(rrq, 0, sizeof(*rrq)); 1636 memset(rrq, 0, sizeof(*rrq));
1637 rrq->rrq_cmd = ELS_RRQ; 1637 rrq->rrq_cmd = ELS_RRQ;
@@ -1647,13 +1647,20 @@ static void fc_exch_rrq(struct fc_exch *ep)
1647 fc_host_port_id(lp->host), FC_TYPE_ELS, 1647 fc_host_port_id(lp->host), FC_TYPE_ELS,
1648 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1648 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1649 1649
1650 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, 1650 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov))
1651 lp->e_d_tov); 1651 return;
1652 if (!rrq_sp) { 1652
1653 ep->esb_stat |= ESB_ST_REC_QUAL; 1653retry:
1654 fc_exch_timer_set_locked(ep, ep->r_a_tov); 1654 spin_lock_bh(&ep->ex_lock);
1655 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
1656 spin_unlock_bh(&ep->ex_lock);
1657 /* drop hold for rec qual */
1658 fc_exch_release(ep);
1655 return; 1659 return;
1656 } 1660 }
1661 ep->esb_stat |= ESB_ST_REC_QUAL;
1662 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1663 spin_unlock_bh(&ep->ex_lock);
1657} 1664}
1658 1665
1659 1666
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 716cc344c5df..a751f6230c22 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1974,10 +1974,10 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1974 * good and have never sent us a successful tmf response 1974 * good and have never sent us a successful tmf response
1975 * then sent more data for the cmd. 1975 * then sent more data for the cmd.
1976 */ 1976 */
1977 spin_lock(&session->lock); 1977 spin_lock_bh(&session->lock);
1978 fail_scsi_task(task, DID_ABORT); 1978 fail_scsi_task(task, DID_ABORT);
1979 conn->tmf_state = TMF_INITIAL; 1979 conn->tmf_state = TMF_INITIAL;
1980 spin_unlock(&session->lock); 1980 spin_unlock_bh(&session->lock);
1981 iscsi_start_tx(conn); 1981 iscsi_start_tx(conn);
1982 goto success_unlocked; 1982 goto success_unlocked;
1983 case TMF_TIMEDOUT: 1983 case TMF_TIMEDOUT:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 54fa1e42dc4d..b3381959acce 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -766,6 +766,7 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
766 if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr, 766 if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
767 SAS_ADDR_SIZE) && ephy->port) { 767 SAS_ADDR_SIZE) && ephy->port) {
768 sas_port_add_phy(ephy->port, phy->phy); 768 sas_port_add_phy(ephy->port, phy->phy);
769 phy->port = ephy->port;
769 phy->phy_state = PHY_DEVICE_DISCOVERED; 770 phy->phy_state = PHY_DEVICE_DISCOVERED;
770 return 0; 771 return 0;
771 } 772 }
@@ -945,11 +946,21 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
945 if (ex->ex_phy[i].phy_state == PHY_VACANT || 946 if (ex->ex_phy[i].phy_state == PHY_VACANT ||
946 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) 947 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
947 continue; 948 continue;
948 949 /*
950 * Due to races, the phy might not get added to the
951 * wide port, so we add the phy to the wide port here.
952 */
949 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == 953 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
950 SAS_ADDR(child->sas_addr)) 954 SAS_ADDR(child->sas_addr)) {
951 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; 955 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
956 res = sas_ex_join_wide_port(dev, i);
957 if (!res)
958 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
959 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
960
961 }
952 } 962 }
963 res = 0;
953 } 964 }
954 965
955 return res; 966 return res;
@@ -1598,7 +1609,7 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1598} 1609}
1599 1610
1600static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, 1611static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1601 int from_phy) 1612 int from_phy, bool update)
1602{ 1613{
1603 struct expander_device *ex = &dev->ex_dev; 1614 struct expander_device *ex = &dev->ex_dev;
1604 int res = 0; 1615 int res = 0;
@@ -1611,7 +1622,9 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1611 if (res) 1622 if (res)
1612 goto out; 1623 goto out;
1613 else if (phy_change_count != ex->ex_phy[i].phy_change_count) { 1624 else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1614 ex->ex_phy[i].phy_change_count = phy_change_count; 1625 if (update)
1626 ex->ex_phy[i].phy_change_count =
1627 phy_change_count;
1615 *phy_id = i; 1628 *phy_id = i;
1616 return 0; 1629 return 0;
1617 } 1630 }
@@ -1653,31 +1666,52 @@ out:
1653 kfree(rg_req); 1666 kfree(rg_req);
1654 return res; 1667 return res;
1655} 1668}
1669/**
1670 * sas_find_bcast_dev - find the device issue BROADCAST(CHANGE).
1671 * @dev:domain device to be detect.
1672 * @src_dev: the device which originated BROADCAST(CHANGE).
1673 *
1674 * Add self-configuration expander suport. Suppose two expander cascading,
1675 * when the first level expander is self-configuring, hotplug the disks in
1676 * second level expander, BROADCAST(CHANGE) will not only be originated
1677 * in the second level expander, but also be originated in the first level
1678 * expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say,
1679 * expander changed count in two level expanders will all increment at least
1680 * once, but the phy which chang count has changed is the source device which
1681 * we concerned.
1682 */
1656 1683
1657static int sas_find_bcast_dev(struct domain_device *dev, 1684static int sas_find_bcast_dev(struct domain_device *dev,
1658 struct domain_device **src_dev) 1685 struct domain_device **src_dev)
1659{ 1686{
1660 struct expander_device *ex = &dev->ex_dev; 1687 struct expander_device *ex = &dev->ex_dev;
1661 int ex_change_count = -1; 1688 int ex_change_count = -1;
1689 int phy_id = -1;
1662 int res; 1690 int res;
1691 struct domain_device *ch;
1663 1692
1664 res = sas_get_ex_change_count(dev, &ex_change_count); 1693 res = sas_get_ex_change_count(dev, &ex_change_count);
1665 if (res) 1694 if (res)
1666 goto out; 1695 goto out;
1667 if (ex_change_count != -1 && 1696 if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) {
1668 ex_change_count != ex->ex_change_count) { 1697 /* Just detect if this expander phys phy change count changed,
1669 *src_dev = dev; 1698 * in order to determine if this expander originate BROADCAST,
1670 ex->ex_change_count = ex_change_count; 1699 * and do not update phy change count field in our structure.
1671 } else { 1700 */
1672 struct domain_device *ch; 1701 res = sas_find_bcast_phy(dev, &phy_id, 0, false);
1673 1702 if (phy_id != -1) {
1674 list_for_each_entry(ch, &ex->children, siblings) { 1703 *src_dev = dev;
1675 if (ch->dev_type == EDGE_DEV || 1704 ex->ex_change_count = ex_change_count;
1676 ch->dev_type == FANOUT_DEV) { 1705 SAS_DPRINTK("Expander phy change count has changed\n");
1677 res = sas_find_bcast_dev(ch, src_dev); 1706 return res;
1678 if (src_dev) 1707 } else
1679 return res; 1708 SAS_DPRINTK("Expander phys DID NOT change\n");
1680 } 1709 }
1710 list_for_each_entry(ch, &ex->children, siblings) {
1711 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
1712 res = sas_find_bcast_dev(ch, src_dev);
1713 if (src_dev)
1714 return res;
1681 } 1715 }
1682 } 1716 }
1683out: 1717out:
@@ -1700,24 +1734,26 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
1700} 1734}
1701 1735
1702static void sas_unregister_devs_sas_addr(struct domain_device *parent, 1736static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1703 int phy_id) 1737 int phy_id, bool last)
1704{ 1738{
1705 struct expander_device *ex_dev = &parent->ex_dev; 1739 struct expander_device *ex_dev = &parent->ex_dev;
1706 struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; 1740 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1707 struct domain_device *child, *n; 1741 struct domain_device *child, *n;
1708 1742 if (last) {
1709 list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { 1743 list_for_each_entry_safe(child, n,
1710 if (SAS_ADDR(child->sas_addr) == 1744 &ex_dev->children, siblings) {
1711 SAS_ADDR(phy->attached_sas_addr)) { 1745 if (SAS_ADDR(child->sas_addr) ==
1712 if (child->dev_type == EDGE_DEV || 1746 SAS_ADDR(phy->attached_sas_addr)) {
1713 child->dev_type == FANOUT_DEV) 1747 if (child->dev_type == EDGE_DEV ||
1714 sas_unregister_ex_tree(child); 1748 child->dev_type == FANOUT_DEV)
1715 else 1749 sas_unregister_ex_tree(child);
1716 sas_unregister_dev(child); 1750 else
1717 break; 1751 sas_unregister_dev(child);
1752 break;
1753 }
1718 } 1754 }
1755 sas_disable_routing(parent, phy->attached_sas_addr);
1719 } 1756 }
1720 sas_disable_routing(parent, phy->attached_sas_addr);
1721 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1757 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1722 sas_port_delete_phy(phy->port, phy->phy); 1758 sas_port_delete_phy(phy->port, phy->phy);
1723 if (phy->port->num_phys == 0) 1759 if (phy->port->num_phys == 0)
@@ -1770,15 +1806,31 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1770{ 1806{
1771 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; 1807 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1772 struct domain_device *child; 1808 struct domain_device *child;
1773 int res; 1809 bool found = false;
1810 int res, i;
1774 1811
1775 SAS_DPRINTK("ex %016llx phy%d new device attached\n", 1812 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1776 SAS_ADDR(dev->sas_addr), phy_id); 1813 SAS_ADDR(dev->sas_addr), phy_id);
1777 res = sas_ex_phy_discover(dev, phy_id); 1814 res = sas_ex_phy_discover(dev, phy_id);
1778 if (res) 1815 if (res)
1779 goto out; 1816 goto out;
1817 /* to support the wide port inserted */
1818 for (i = 0; i < dev->ex_dev.num_phys; i++) {
1819 struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
1820 if (i == phy_id)
1821 continue;
1822 if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
1823 SAS_ADDR(ex_phy->attached_sas_addr)) {
1824 found = true;
1825 break;
1826 }
1827 }
1828 if (found) {
1829 sas_ex_join_wide_port(dev, phy_id);
1830 return 0;
1831 }
1780 res = sas_ex_discover_devices(dev, phy_id); 1832 res = sas_ex_discover_devices(dev, phy_id);
1781 if (res) 1833 if (!res)
1782 goto out; 1834 goto out;
1783 list_for_each_entry(child, &dev->ex_dev.children, siblings) { 1835 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1784 if (SAS_ADDR(child->sas_addr) == 1836 if (SAS_ADDR(child->sas_addr) ==
@@ -1793,7 +1845,7 @@ out:
1793 return res; 1845 return res;
1794} 1846}
1795 1847
1796static int sas_rediscover_dev(struct domain_device *dev, int phy_id) 1848static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
1797{ 1849{
1798 struct expander_device *ex = &dev->ex_dev; 1850 struct expander_device *ex = &dev->ex_dev;
1799 struct ex_phy *phy = &ex->ex_phy[phy_id]; 1851 struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -1804,11 +1856,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1804 switch (res) { 1856 switch (res) {
1805 case SMP_RESP_NO_PHY: 1857 case SMP_RESP_NO_PHY:
1806 phy->phy_state = PHY_NOT_PRESENT; 1858 phy->phy_state = PHY_NOT_PRESENT;
1807 sas_unregister_devs_sas_addr(dev, phy_id); 1859 sas_unregister_devs_sas_addr(dev, phy_id, last);
1808 goto out; break; 1860 goto out; break;
1809 case SMP_RESP_PHY_VACANT: 1861 case SMP_RESP_PHY_VACANT:
1810 phy->phy_state = PHY_VACANT; 1862 phy->phy_state = PHY_VACANT;
1811 sas_unregister_devs_sas_addr(dev, phy_id); 1863 sas_unregister_devs_sas_addr(dev, phy_id, last);
1812 goto out; break; 1864 goto out; break;
1813 case SMP_RESP_FUNC_ACC: 1865 case SMP_RESP_FUNC_ACC:
1814 break; 1866 break;
@@ -1816,7 +1868,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1816 1868
1817 if (SAS_ADDR(attached_sas_addr) == 0) { 1869 if (SAS_ADDR(attached_sas_addr) == 0) {
1818 phy->phy_state = PHY_EMPTY; 1870 phy->phy_state = PHY_EMPTY;
1819 sas_unregister_devs_sas_addr(dev, phy_id); 1871 sas_unregister_devs_sas_addr(dev, phy_id, last);
1820 } else if (SAS_ADDR(attached_sas_addr) == 1872 } else if (SAS_ADDR(attached_sas_addr) ==
1821 SAS_ADDR(phy->attached_sas_addr)) { 1873 SAS_ADDR(phy->attached_sas_addr)) {
1822 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", 1874 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
@@ -1828,12 +1880,27 @@ out:
1828 return res; 1880 return res;
1829} 1881}
1830 1882
1883/**
1884 * sas_rediscover - revalidate the domain.
1885 * @dev:domain device to be detect.
1886 * @phy_id: the phy id will be detected.
1887 *
1888 * NOTE: this process _must_ quit (return) as soon as any connection
1889 * errors are encountered. Connection recovery is done elsewhere.
1890 * Discover process only interrogates devices in order to discover the
1891 * domain.For plugging out, we un-register the device only when it is
1892 * the last phy in the port, for other phys in this port, we just delete it
1893 * from the port.For inserting, we do discovery when it is the
1894 * first phy,for other phys in this port, we add it to the port to
1895 * forming the wide-port.
1896 */
1831static int sas_rediscover(struct domain_device *dev, const int phy_id) 1897static int sas_rediscover(struct domain_device *dev, const int phy_id)
1832{ 1898{
1833 struct expander_device *ex = &dev->ex_dev; 1899 struct expander_device *ex = &dev->ex_dev;
1834 struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; 1900 struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
1835 int res = 0; 1901 int res = 0;
1836 int i; 1902 int i;
1903 bool last = true; /* is this the last phy of the port */
1837 1904
1838 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", 1905 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
1839 SAS_ADDR(dev->sas_addr), phy_id); 1906 SAS_ADDR(dev->sas_addr), phy_id);
@@ -1848,13 +1915,13 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
1848 SAS_ADDR(changed_phy->attached_sas_addr)) { 1915 SAS_ADDR(changed_phy->attached_sas_addr)) {
1849 SAS_DPRINTK("phy%d part of wide port with " 1916 SAS_DPRINTK("phy%d part of wide port with "
1850 "phy%d\n", phy_id, i); 1917 "phy%d\n", phy_id, i);
1851 goto out; 1918 last = false;
1919 break;
1852 } 1920 }
1853 } 1921 }
1854 res = sas_rediscover_dev(dev, phy_id); 1922 res = sas_rediscover_dev(dev, phy_id, last);
1855 } else 1923 } else
1856 res = sas_discover_new(dev, phy_id); 1924 res = sas_discover_new(dev, phy_id);
1857out:
1858 return res; 1925 return res;
1859} 1926}
1860 1927
@@ -1881,7 +1948,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
1881 1948
1882 do { 1949 do {
1883 phy_id = -1; 1950 phy_id = -1;
1884 res = sas_find_bcast_phy(dev, &phy_id, i); 1951 res = sas_find_bcast_phy(dev, &phy_id, i, true);
1885 if (phy_id == -1) 1952 if (phy_id == -1)
1886 break; 1953 break;
1887 res = sas_rediscover(dev, phy_id); 1954 res = sas_rediscover(dev, phy_id);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e6ac59c023f1..fe8b74c706d2 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -56,7 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
56 } 56 }
57 } 57 }
58 58
59 /* find a port */ 59 /* see if the phy should be part of a wide port */
60 spin_lock_irqsave(&sas_ha->phy_port_lock, flags); 60 spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
61 for (i = 0; i < sas_ha->num_phys; i++) { 61 for (i = 0; i < sas_ha->num_phys; i++) {
62 port = sas_ha->sas_port[i]; 62 port = sas_ha->sas_port[i];
@@ -69,12 +69,23 @@ static void sas_form_port(struct asd_sas_phy *phy)
69 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, 69 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
70 port->id); 70 port->id);
71 break; 71 break;
72 } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
73 memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
74 break;
75 } 72 }
76 spin_unlock(&port->phy_list_lock); 73 spin_unlock(&port->phy_list_lock);
77 } 74 }
75 /* The phy does not match any existing port, create a new one */
76 if (i == sas_ha->num_phys) {
77 for (i = 0; i < sas_ha->num_phys; i++) {
78 port = sas_ha->sas_port[i];
79 spin_lock(&port->phy_list_lock);
80 if (*(u64 *)port->sas_addr == 0
81 && port->num_phys == 0) {
82 memcpy(port->sas_addr, phy->sas_addr,
83 SAS_ADDR_SIZE);
84 break;
85 }
86 spin_unlock(&port->phy_list_lock);
87 }
88 }
78 89
79 if (i >= sas_ha->num_phys) { 90 if (i >= sas_ha->num_phys) {
80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 91 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index fcc184cd066d..cbceb0ebabf7 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -15,19 +15,18 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
15 uint32_t cnt; 15 uint32_t cnt;
16 uint8_t *c = b; 16 uint8_t *c = b;
17 17
18 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh " 18 printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
19 "Fh\n"); 19 "Fh\n");
20 printk("------------------------------------------------------------" 20 printk("------------------------------------------------------------"
21 "--\n"); 21 "--\n");
22 for (cnt = 0; cnt < size; cnt++, c++) { 22 for (cnt = 0; cnt < size; c++) {
23 printk(KERN_DEBUG "%02x", *c); 23 printk(KERN_INFO "%02x", *c);
24 if (!(cnt % 16)) 24 if (!(++cnt % 16))
25 printk(KERN_DEBUG "\n"); 25 printk(KERN_INFO "\n");
26 26
27 else 27 else
28 printk(KERN_DEBUG " "); 28 printk(KERN_INFO " ");
29 } 29 }
30 if (cnt % 16) 30 printk(KERN_INFO "\n");
31 printk(KERN_DEBUG "\n");
32} 31}
33 32
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index b586f27c3bd4..81b5f29254e2 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -100,7 +100,6 @@
100#define MAX_SRBS MAX_CMDS_TO_RISC 100#define MAX_SRBS MAX_CMDS_TO_RISC
101#define MBOX_AEN_REG_COUNT 5 101#define MBOX_AEN_REG_COUNT 5
102#define MAX_INIT_RETRIES 5 102#define MAX_INIT_RETRIES 5
103#define IOCB_HIWAT_CUSHION 16
104 103
105/* 104/*
106 * Buffer sizes 105 * Buffer sizes
@@ -184,6 +183,11 @@ struct srb {
184 uint16_t cc_stat; 183 uint16_t cc_stat;
185 u_long r_start; /* Time we recieve a cmd from OS */ 184 u_long r_start; /* Time we recieve a cmd from OS */
186 u_long u_start; /* Time when we handed the cmd to F/W */ 185 u_long u_start; /* Time when we handed the cmd to F/W */
186
187 /* Used for extended sense / status continuation */
188 uint8_t *req_sense_ptr;
189 uint16_t req_sense_len;
190 uint16_t reserved2;
187}; 191};
188 192
189/* 193/*
@@ -302,7 +306,6 @@ struct scsi_qla_host {
302 uint32_t tot_ddbs; 306 uint32_t tot_ddbs;
303 307
304 uint16_t iocb_cnt; 308 uint16_t iocb_cnt;
305 uint16_t iocb_hiwat;
306 309
307 /* SRB cache. */ 310 /* SRB cache. */
308#define SRB_MIN_REQ 128 311#define SRB_MIN_REQ 128
@@ -436,6 +439,8 @@ struct scsi_qla_host {
436 /* Map ddb_list entry by FW ddb index */ 439 /* Map ddb_list entry by FW ddb index */
437 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; 440 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
438 441
442 /* Saved srb for status continuation entry processing */
443 struct srb *status_srb;
439}; 444};
440 445
441static inline int is_qla4010(struct scsi_qla_host *ha) 446static inline int is_qla4010(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1b667a70cffa..9cd7a608df38 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -572,6 +572,7 @@ struct conn_event_log_entry {
572 *************************************************************************/ 572 *************************************************************************/
573#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */ 573#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
574#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */ 574#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
575#define IOCB_MAX_EXT_SENSEDATA_LEN 60 /* Bytes of extended sense data */
575 576
576/* IOCB header structure */ 577/* IOCB header structure */
577struct qla4_header { 578struct qla4_header {
@@ -733,6 +734,12 @@ struct status_entry {
733 734
734}; 735};
735 736
737/* Status Continuation entry */
738struct status_cont_entry {
739 struct qla4_header hdr; /* 00-03 */
740 uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */
741};
742
736struct passthru0 { 743struct passthru0 {
737 struct qla4_header hdr; /* 00-03 */ 744 struct qla4_header hdr; /* 00-03 */
738 uint32_t handle; /* 04-07 */ 745 uint32_t handle; /* 04-07 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 912a67494adf..e0c32159749c 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -10,9 +10,42 @@
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12 12
13
14#include <scsi/scsi_tcq.h> 13#include <scsi/scsi_tcq.h>
15 14
15static int
16qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
17{
18 uint16_t cnt;
19
20 /* Calculate number of free request entries. */
21 if ((req_cnt + 2) >= ha->req_q_count) {
22 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
23 if (ha->request_in < cnt)
24 ha->req_q_count = cnt - ha->request_in;
25 else
26 ha->req_q_count = REQUEST_QUEUE_DEPTH -
27 (ha->request_in - cnt);
28 }
29
30 /* Check if room for request in request ring. */
31 if ((req_cnt + 2) < ha->req_q_count)
32 return 1;
33 else
34 return 0;
35}
36
37static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
38{
39 /* Advance request queue pointer */
40 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
41 ha->request_in = 0;
42 ha->request_ptr = ha->request_ring;
43 } else {
44 ha->request_in++;
45 ha->request_ptr++;
46 }
47}
48
16/** 49/**
17 * qla4xxx_get_req_pkt - returns a valid entry in request queue. 50 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
18 * @ha: Pointer to host adapter structure. 51 * @ha: Pointer to host adapter structure.
@@ -26,35 +59,18 @@
26static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, 59static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
27 struct queue_entry **queue_entry) 60 struct queue_entry **queue_entry)
28{ 61{
29 uint16_t request_in; 62 uint16_t req_cnt = 1;
30 uint8_t status = QLA_SUCCESS;
31
32 *queue_entry = ha->request_ptr;
33 63
34 /* get the latest request_in and request_out index */ 64 if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
35 request_in = ha->request_in; 65 *queue_entry = ha->request_ptr;
36 ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
37
38 /* Advance request queue pointer and check for queue full */
39 if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
40 request_in = 0;
41 ha->request_ptr = ha->request_ring;
42 } else {
43 request_in++;
44 ha->request_ptr++;
45 }
46
47 /* request queue is full, try again later */
48 if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
49 /* restore request pointer */
50 ha->request_ptr = *queue_entry;
51 status = QLA_ERROR;
52 } else {
53 ha->request_in = request_in;
54 memset(*queue_entry, 0, sizeof(**queue_entry)); 66 memset(*queue_entry, 0, sizeof(**queue_entry));
67
68 qla4xxx_advance_req_ring_ptr(ha);
69 ha->req_q_count -= req_cnt;
70 return QLA_SUCCESS;
55 } 71 }
56 72
57 return status; 73 return QLA_ERROR;
58} 74}
59 75
60/** 76/**
@@ -100,21 +116,14 @@ exit_send_marker:
100 return status; 116 return status;
101} 117}
102 118
103static struct continuation_t1_entry* qla4xxx_alloc_cont_entry( 119static struct continuation_t1_entry *
104 struct scsi_qla_host *ha) 120qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
105{ 121{
106 struct continuation_t1_entry *cont_entry; 122 struct continuation_t1_entry *cont_entry;
107 123
108 cont_entry = (struct continuation_t1_entry *)ha->request_ptr; 124 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
109 125
110 /* Advance request queue pointer */ 126 qla4xxx_advance_req_ring_ptr(ha);
111 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
112 ha->request_in = 0;
113 ha->request_ptr = ha->request_ring;
114 } else {
115 ha->request_in++;
116 ha->request_ptr++;
117 }
118 127
119 /* Load packet defaults */ 128 /* Load packet defaults */
120 cont_entry->hdr.entryType = ET_CONTINUE; 129 cont_entry->hdr.entryType = ET_CONTINUE;
@@ -197,13 +206,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
197 struct scsi_cmnd *cmd = srb->cmd; 206 struct scsi_cmnd *cmd = srb->cmd;
198 struct ddb_entry *ddb_entry; 207 struct ddb_entry *ddb_entry;
199 struct command_t3_entry *cmd_entry; 208 struct command_t3_entry *cmd_entry;
200
201 int nseg; 209 int nseg;
202 uint16_t tot_dsds; 210 uint16_t tot_dsds;
203 uint16_t req_cnt; 211 uint16_t req_cnt;
204
205 unsigned long flags; 212 unsigned long flags;
206 uint16_t cnt;
207 uint32_t index; 213 uint32_t index;
208 char tag[2]; 214 char tag[2];
209 215
@@ -217,6 +223,19 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
217 223
218 index = (uint32_t)cmd->request->tag; 224 index = (uint32_t)cmd->request->tag;
219 225
226 /*
227 * Check to see if adapter is online before placing request on
228 * request queue. If a reset occurs and a request is in the queue,
229 * the firmware will still attempt to process the request, retrieving
230 * garbage for pointers.
231 */
232 if (!test_bit(AF_ONLINE, &ha->flags)) {
233 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
234 "Do not issue command.\n",
235 ha->host_no, __func__));
236 goto queuing_error;
237 }
238
220 /* Calculate the number of request entries needed. */ 239 /* Calculate the number of request entries needed. */
221 nseg = scsi_dma_map(cmd); 240 nseg = scsi_dma_map(cmd);
222 if (nseg < 0) 241 if (nseg < 0)
@@ -224,17 +243,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
224 tot_dsds = nseg; 243 tot_dsds = nseg;
225 244
226 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 245 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
227 246 if (!qla4xxx_space_in_req_ring(ha, req_cnt))
228 if (ha->req_q_count < (req_cnt + 2)) {
229 cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
230 if (ha->request_in < cnt)
231 ha->req_q_count = cnt - ha->request_in;
232 else
233 ha->req_q_count = REQUEST_QUEUE_DEPTH -
234 (ha->request_in - cnt);
235 }
236
237 if (ha->req_q_count < (req_cnt + 2))
238 goto queuing_error; 247 goto queuing_error;
239 248
240 /* total iocbs active */ 249 /* total iocbs active */
@@ -286,32 +295,10 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
286 break; 295 break;
287 } 296 }
288 297
289 298 qla4xxx_advance_req_ring_ptr(ha);
290 /* Advance request queue pointer */
291 ha->request_in++;
292 if (ha->request_in == REQUEST_QUEUE_DEPTH) {
293 ha->request_in = 0;
294 ha->request_ptr = ha->request_ring;
295 } else
296 ha->request_ptr++;
297
298
299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); 299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
300 wmb(); 300 wmb();
301 301
302 /*
303 * Check to see if adapter is online before placing request on
304 * request queue. If a reset occurs and a request is in the queue,
305 * the firmware will still attempt to process the request, retrieving
306 * garbage for pointers.
307 */
308 if (!test_bit(AF_ONLINE, &ha->flags)) {
309 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
310 "Do not issue command.\n",
311 ha->host_no, __func__));
312 goto queuing_error;
313 }
314
315 srb->cmd->host_scribble = (unsigned char *)srb; 302 srb->cmd->host_scribble = (unsigned char *)srb;
316 303
317 /* update counters */ 304 /* update counters */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 799120fcb9be..8025ee16588e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -11,6 +11,98 @@
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12 12
13/** 13/**
14 * qla4xxx_copy_sense - copy sense data into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
19static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20 struct status_entry *sts_entry,
21 struct srb *srb)
22{
23 struct scsi_cmnd *cmd = srb->cmd;
24 uint16_t sense_len;
25
26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28 if (sense_len == 0)
29 return;
30
31 /* Save total available sense length,
32 * not to exceed cmd's sense buffer size */
33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
34 srb->req_sense_ptr = cmd->sense_buffer;
35 srb->req_sense_len = sense_len;
36
37 /* Copy sense from sts_entry pkt */
38 sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
39 memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
40
41 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
42 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
43 cmd->device->channel, cmd->device->id,
44 cmd->device->lun, __func__,
45 sts_entry->senseData[2] & 0x0f,
46 sts_entry->senseData[7],
47 sts_entry->senseData[12],
48 sts_entry->senseData[13]));
49
50 DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
51 srb->flags |= SRB_GOT_SENSE;
52
53 /* Update srb, in case a sts_cont pkt follows */
54 srb->req_sense_ptr += sense_len;
55 srb->req_sense_len -= sense_len;
56 if (srb->req_sense_len != 0)
57 ha->status_srb = srb;
58 else
59 ha->status_srb = NULL;
60}
61
62/**
63 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
64 * @ha: SCSI driver HA context
65 * @sts_cont: Entry pointer
66 *
67 * Extended sense data.
68 */
69static void
70qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
71 struct status_cont_entry *sts_cont)
72{
73 struct srb *srb = ha->status_srb;
74 struct scsi_cmnd *cmd;
75 uint8_t sense_len;
76
77 if (srb == NULL)
78 return;
79
80 cmd = srb->cmd;
81 if (cmd == NULL) {
82 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
83 "back to OS srb=%p srb->state:%d\n", ha->host_no,
84 __func__, srb, srb->state));
85 ha->status_srb = NULL;
86 return;
87 }
88
89 /* Copy sense data. */
90 sense_len = min_t(uint16_t, srb->req_sense_len,
91 IOCB_MAX_EXT_SENSEDATA_LEN);
92 memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
93 DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
94
95 srb->req_sense_ptr += sense_len;
96 srb->req_sense_len -= sense_len;
97
98 /* Place command on done queue. */
99 if (srb->req_sense_len == 0) {
100 qla4xxx_srb_compl(ha, srb);
101 ha->status_srb = NULL;
102 }
103}
104
105/**
14 * qla4xxx_status_entry - processes status IOCBs 106 * qla4xxx_status_entry - processes status IOCBs
15 * @ha: Pointer to host adapter structure. 107 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure. 108 * @sts_entry: Pointer to status entry structure.
@@ -23,7 +115,6 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
23 struct srb *srb; 115 struct srb *srb;
24 struct ddb_entry *ddb_entry; 116 struct ddb_entry *ddb_entry;
25 uint32_t residual; 117 uint32_t residual;
26 uint16_t sensebytecnt;
27 118
28 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
29 if (!srb) { 120 if (!srb) {
@@ -92,24 +183,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
92 break; 183 break;
93 184
94 /* Copy Sense Data into sense buffer. */ 185 /* Copy Sense Data into sense buffer. */
95 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 186 qla4xxx_copy_sense(ha, sts_entry, srb);
96
97 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
98 if (sensebytecnt == 0)
99 break;
100
101 memcpy(cmd->sense_buffer, sts_entry->senseData,
102 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
103
104 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
105 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
106 cmd->device->channel, cmd->device->id,
107 cmd->device->lun, __func__,
108 sts_entry->senseData[2] & 0x0f,
109 sts_entry->senseData[12],
110 sts_entry->senseData[13]));
111
112 srb->flags |= SRB_GOT_SENSE;
113 break; 187 break;
114 188
115 case SCS_INCOMPLETE: 189 case SCS_INCOMPLETE:
@@ -176,23 +250,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
176 break; 250 break;
177 251
178 /* Copy Sense Data into sense buffer. */ 252 /* Copy Sense Data into sense buffer. */
179 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 253 qla4xxx_copy_sense(ha, sts_entry, srb);
180
181 sensebytecnt =
182 le16_to_cpu(sts_entry->senseDataByteCnt);
183 if (sensebytecnt == 0)
184 break;
185
186 memcpy(cmd->sense_buffer, sts_entry->senseData,
187 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
188
189 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
190 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
191 cmd->device->channel, cmd->device->id,
192 cmd->device->lun, __func__,
193 sts_entry->senseData[2] & 0x0f,
194 sts_entry->senseData[12],
195 sts_entry->senseData[13]));
196 } else { 254 } else {
197 /* 255 /*
198 * If RISC reports underrun and target does not 256 * If RISC reports underrun and target does not
@@ -268,9 +326,10 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
268 326
269status_entry_exit: 327status_entry_exit:
270 328
271 /* complete the request */ 329 /* complete the request, if not waiting for status_continuation pkt */
272 srb->cc_stat = sts_entry->completionStatus; 330 srb->cc_stat = sts_entry->completionStatus;
273 qla4xxx_srb_compl(ha, srb); 331 if (ha->status_srb == NULL)
332 qla4xxx_srb_compl(ha, srb);
274} 333}
275 334
276/** 335/**
@@ -305,10 +364,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
305 /* process entry */ 364 /* process entry */
306 switch (sts_entry->hdr.entryType) { 365 switch (sts_entry->hdr.entryType) {
307 case ET_STATUS: 366 case ET_STATUS:
308 /* 367 /* Common status */
309 * Common status - Single completion posted in single
310 * IOSB.
311 */
312 qla4xxx_status_entry(ha, sts_entry); 368 qla4xxx_status_entry(ha, sts_entry);
313 break; 369 break;
314 370
@@ -316,9 +372,8 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
316 break; 372 break;
317 373
318 case ET_STATUS_CONTINUATION: 374 case ET_STATUS_CONTINUATION:
319 /* Just throw away the status continuation entries */ 375 qla4xxx_status_cont_entry(ha,
320 DEBUG2(printk("scsi%ld: %s: Status Continuation entry " 376 (struct status_cont_entry *) sts_entry);
321 "- ignoring\n", ha->host_no, __func__));
322 break; 377 break;
323 378
324 case ET_COMMAND: 379 case ET_COMMAND:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 051b0f5e8c8e..09d6d4b76f39 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -385,16 +385,6 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
385 mbox_sts[0])); 385 mbox_sts[0]));
386 return QLA_ERROR; 386 return QLA_ERROR;
387 } 387 }
388
389 /* High-water mark of IOCBs */
390 ha->iocb_hiwat = mbox_sts[2];
391 if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
392 ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
393 else
394 dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d "
395 "firmware IOCBs available (%d).\n",
396 IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
397
398 return QLA_SUCCESS; 388 return QLA_SUCCESS;
399} 389}
400 390
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ec9da6ce8489..40e3cafb3a9c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -66,6 +66,7 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
66static int qla4xxx_host_get_param(struct Scsi_Host *shost, 66static int qla4xxx_host_get_param(struct Scsi_Host *shost,
67 enum iscsi_host_param param, char *buf); 67 enum iscsi_host_param param, char *buf);
68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
69static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
69 70
70/* 71/*
71 * SCSI host template entry points 72 * SCSI host template entry points
@@ -89,6 +90,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
89 .eh_device_reset_handler = qla4xxx_eh_device_reset, 90 .eh_device_reset_handler = qla4xxx_eh_device_reset,
90 .eh_target_reset_handler = qla4xxx_eh_target_reset, 91 .eh_target_reset_handler = qla4xxx_eh_target_reset,
91 .eh_host_reset_handler = qla4xxx_eh_host_reset, 92 .eh_host_reset_handler = qla4xxx_eh_host_reset,
93 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
92 94
93 .slave_configure = qla4xxx_slave_configure, 95 .slave_configure = qla4xxx_slave_configure,
94 .slave_alloc = qla4xxx_slave_alloc, 96 .slave_alloc = qla4xxx_slave_alloc,
@@ -124,6 +126,21 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
124 126
125static struct scsi_transport_template *qla4xxx_scsi_transport; 127static struct scsi_transport_template *qla4xxx_scsi_transport;
126 128
129static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
130{
131 struct iscsi_cls_session *session;
132 struct ddb_entry *ddb_entry;
133
134 session = starget_to_session(scsi_target(sc->device));
135 ddb_entry = session->dd_data;
136
137 /* if we are not logged in then the LLD is going to clean up the cmd */
138 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
139 return BLK_EH_RESET_TIMER;
140 else
141 return BLK_EH_NOT_HANDLED;
142}
143
127static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) 144static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
128{ 145{
129 struct ddb_entry *ddb_entry = session->dd_data; 146 struct ddb_entry *ddb_entry = session->dd_data;
@@ -904,18 +921,17 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
904 /* Flush any pending ddb changed AENs */ 921 /* Flush any pending ddb changed AENs */
905 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 922 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
906 923
924 qla4xxx_flush_active_srbs(ha);
925
907 /* Reset the firmware. If successful, function 926 /* Reset the firmware. If successful, function
908 * returns with ISP interrupts enabled. 927 * returns with ISP interrupts enabled.
909 */ 928 */
910 if (status == QLA_SUCCESS) { 929 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
911 DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n", 930 ha->host_no, __func__));
912 ha->host_no, __func__)); 931 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
913 qla4xxx_flush_active_srbs(ha); 932 status = qla4xxx_soft_reset(ha);
914 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) 933 else
915 status = qla4xxx_soft_reset(ha); 934 status = QLA_ERROR;
916 else
917 status = QLA_ERROR;
918 }
919 935
920 /* Flush any pending ddb changed AENs */ 936 /* Flush any pending ddb changed AENs */
921 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 937 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
@@ -1527,11 +1543,9 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1527{ 1543{
1528 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 1544 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1529 struct ddb_entry *ddb_entry = cmd->device->hostdata; 1545 struct ddb_entry *ddb_entry = cmd->device->hostdata;
1530 struct srb *sp;
1531 int ret = FAILED, stat; 1546 int ret = FAILED, stat;
1532 1547
1533 sp = (struct srb *) cmd->SCp.ptr; 1548 if (!ddb_entry)
1534 if (!sp || !ddb_entry)
1535 return ret; 1549 return ret;
1536 1550
1537 dev_info(&ha->pdev->dev, 1551 dev_info(&ha->pdev->dev,
@@ -1644,7 +1658,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1644 ha = (struct scsi_qla_host *) cmd->device->host->hostdata; 1658 ha = (struct scsi_qla_host *) cmd->device->host->hostdata;
1645 1659
1646 dev_info(&ha->pdev->dev, 1660 dev_info(&ha->pdev->dev,
1647 "scsi(%ld:%d:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, 1661 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
1648 cmd->device->channel, cmd->device->id, cmd->device->lun); 1662 cmd->device->channel, cmd->device->id, cmd->device->lun);
1649 1663
1650 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) { 1664 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index ab984cb89cea..6980cb279c81 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,5 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.01.00-k8" 8#define QLA4XXX_DRIVER_VERSION "5.01.00-k9"
9 9
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 783e33c65eb7..b47240ca4b19 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -990,7 +990,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
990 struct iscsi_uevent *ev; 990 struct iscsi_uevent *ev;
991 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 991 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
992 992
993 skb = alloc_skb(len, GFP_NOIO); 993 skb = alloc_skb(len, GFP_ATOMIC);
994 if (!skb) { 994 if (!skb) {
995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); 995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
996 return -ENOMEM; 996 return -ENOMEM;
@@ -1012,7 +1012,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
1012 1012
1013 memcpy((char *)ev + sizeof(*ev), data, data_size); 1013 memcpy((char *)ev + sizeof(*ev), data, data_size);
1014 1014
1015 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); 1015 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
1016} 1016}
1017EXPORT_SYMBOL_GPL(iscsi_offload_mesg); 1017EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1018 1018
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5616cd780ff3..b7b9fec67a98 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1840,6 +1840,18 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1840 kfree(buffer); 1840 kfree(buffer);
1841} 1841}
1842 1842
1843static int sd_try_extended_inquiry(struct scsi_device *sdp)
1844{
1845 /*
1846 * Although VPD inquiries can go to SCSI-2 type devices,
1847 * some USB ones crash on receiving them, and the pages
1848 * we currently ask for are for SPC-3 and beyond
1849 */
1850 if (sdp->scsi_level > SCSI_SPC_2)
1851 return 1;
1852 return 0;
1853}
1854
1843/** 1855/**
1844 * sd_revalidate_disk - called the first time a new disk is seen, 1856 * sd_revalidate_disk - called the first time a new disk is seen,
1845 * performs disk spin up, read_capacity, etc. 1857 * performs disk spin up, read_capacity, etc.
@@ -1877,8 +1889,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
1877 */ 1889 */
1878 if (sdkp->media_present) { 1890 if (sdkp->media_present) {
1879 sd_read_capacity(sdkp, buffer); 1891 sd_read_capacity(sdkp, buffer);
1880 sd_read_block_limits(sdkp); 1892
1881 sd_read_block_characteristics(sdkp); 1893 if (sd_try_extended_inquiry(sdp)) {
1894 sd_read_block_limits(sdkp);
1895 sd_read_block_characteristics(sdkp);
1896 }
1897
1882 sd_read_write_protect_flag(sdkp, buffer); 1898 sd_read_write_protect_flag(sdkp, buffer);
1883 sd_read_cache_type(sdkp, buffer); 1899 sd_read_cache_type(sdkp, buffer);
1884 sd_read_app_tag_own(sdkp, buffer); 1900 sd_read_app_tag_own(sdkp, buffer);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index ef7870f5ea08..857b3668b3ba 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -957,9 +957,14 @@ static int __devinit sticore_pci_init(struct pci_dev *pd,
957#ifdef CONFIG_PCI 957#ifdef CONFIG_PCI
958 unsigned long fb_base, rom_base; 958 unsigned long fb_base, rom_base;
959 unsigned int fb_len, rom_len; 959 unsigned int fb_len, rom_len;
960 int err;
960 struct sti_struct *sti; 961 struct sti_struct *sti;
961 962
962 pci_enable_device(pd); 963 err = pci_enable_device(pd);
964 if (err < 0) {
965 dev_err(&pd->dev, "Cannot enable PCI device\n");
966 return err;
967 }
963 968
964 fb_base = pci_resource_start(pd, 0); 969 fb_base = pci_resource_start(pd, 0);
965 fb_len = pci_resource_len(pd, 0); 970 fb_len = pci_resource_len(pd, 0);
@@ -1048,7 +1053,7 @@ static void __devinit sti_init_roms(void)
1048 1053
1049 /* Register drivers for native & PCI cards */ 1054 /* Register drivers for native & PCI cards */
1050 register_parisc_driver(&pa_sti_driver); 1055 register_parisc_driver(&pa_sti_driver);
1051 pci_register_driver(&pci_sti_driver); 1056 WARN_ON(pci_register_driver(&pci_sti_driver));
1052 1057
1053 /* if we didn't find the given default sti, take the first one */ 1058 /* if we didn't find the given default sti, take the first one */
1054 if (!default_sti) 1059 if (!default_sti)
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index fecb307d28e9..aec7cefdef21 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -18,6 +18,7 @@
18#include <linux/bitops.h> 18#include <linux/bitops.h>
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/delay.h>
21 22
22#define DRV_NAME "WDOG COH 901 327" 23#define DRV_NAME "WDOG COH 901 327"
23 24
@@ -92,6 +93,8 @@ static struct clk *clk;
92static void coh901327_enable(u16 timeout) 93static void coh901327_enable(u16 timeout)
93{ 94{
94 u16 val; 95 u16 val;
96 unsigned long freq;
97 unsigned long delay_ns;
95 98
96 clk_enable(clk); 99 clk_enable(clk);
97 /* Restart timer if it is disabled */ 100 /* Restart timer if it is disabled */
@@ -102,6 +105,14 @@ static void coh901327_enable(u16 timeout)
102 /* Acknowledge any pending interrupt so it doesn't just fire off */ 105 /* Acknowledge any pending interrupt so it doesn't just fire off */
103 writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, 106 writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
104 virtbase + U300_WDOG_IER); 107 virtbase + U300_WDOG_IER);
108 /*
109 * The interrupt is cleared in the 32 kHz clock domain.
110 * Wait 3 32 kHz cycles for it to take effect
111 */
112 freq = clk_get_rate(clk);
113 delay_ns = (1000000000 + freq - 1) / freq; /* Freq to ns and round up */
114 delay_ns = 3 * delay_ns; /* Wait 3 cycles */
115 ndelay(delay_ns);
105 /* Enable the watchdog interrupt */ 116 /* Enable the watchdog interrupt */
106 writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); 117 writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
107 /* Activate the watchdog timer */ 118 /* Activate the watchdog timer */
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 92888aa90749..e85b1e4389e0 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,10 @@
1Version 1.60
2-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path.
4Set s_maxbytes to smaller (the max that vfs can handle) so that
5sendfile will now work over cifs mounts again. Add noforcegid
6and noforceuid mount parameters.
7
1Version 1.59 8Version 1.59
2------------ 9------------
3Client uses server inode numbers (which are persistent) rather than 10Client uses server inode numbers (which are persistent) rather than
diff --git a/fs/cifs/README b/fs/cifs/README
index ad92921dbde4..79c1a93400be 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -262,11 +262,11 @@ A partial list of the supported mount options follows:
262 mount. 262 mount.
263 domain Set the SMB/CIFS workgroup name prepended to the 263 domain Set the SMB/CIFS workgroup name prepended to the
264 username during CIFS session establishment 264 username during CIFS session establishment
265 forceuid Set the default uid for inodes based on the uid 265 forceuid Set the default uid for inodes to the uid
266 passed in. For mounts to servers 266 passed in on mount. For mounts to servers
267 which do support the CIFS Unix extensions, such as a 267 which do support the CIFS Unix extensions, such as a
268 properly configured Samba server, the server provides 268 properly configured Samba server, the server provides
269 the uid, gid and mode so this parameter should not be 269 the uid, gid and mode so this parameter should not be
270 specified unless the server and clients uid and gid 270 specified unless the server and clients uid and gid
271 numbering differ. If the server and client are in the 271 numbering differ. If the server and client are in the
272 same domain (e.g. running winbind or nss_ldap) and 272 same domain (e.g. running winbind or nss_ldap) and
@@ -278,11 +278,7 @@ A partial list of the supported mount options follows:
278 of existing files will be the uid (gid) of the person 278 of existing files will be the uid (gid) of the person
279 who executed the mount (root, except when mount.cifs 279 who executed the mount (root, except when mount.cifs
280 is configured setuid for user mounts) unless the "uid=" 280 is configured setuid for user mounts) unless the "uid="
281 (gid) mount option is specified. For the uid (gid) of newly 281 (gid) mount option is specified. Also note that permission
282 created files and directories, ie files created since
283 the last mount of the server share, the expected uid
284 (gid) is cached as long as the inode remains in
285 memory on the client. Also note that permission
286 checks (authorization checks) on accesses to a file occur 282 checks (authorization checks) on accesses to a file occur
287 at the server, but there are cases in which an administrator 283 at the server, but there are cases in which an administrator
288 may want to restrict at the client as well. For those 284 may want to restrict at the client as well. For those
@@ -290,12 +286,15 @@ A partial list of the supported mount options follows:
290 (such as Windows), permissions can also be checked at the 286 (such as Windows), permissions can also be checked at the
291 client, and a crude form of client side permission checking 287 client, and a crude form of client side permission checking
292 can be enabled by specifying file_mode and dir_mode on 288 can be enabled by specifying file_mode and dir_mode on
293 the client. Note that the mount.cifs helper must be 289 the client. (default)
294 at version 1.10 or higher to support specifying the uid 290 forcegid (similar to above but for the groupid instead of uid) (default)
295 (or gid) in non-numeric form. 291 noforceuid Fill in file owner information (uid) by requesting it from
296 forcegid (similar to above but for the groupid instead of uid) 292 the server if possible. With this option, the value given in
293 the uid= option (on mount) will only be used if the server
294 can not support returning uids on inodes.
295 noforcegid (similar to above but for the group owner, gid, instead of uid)
297 uid Set the default uid for inodes, and indicate to the 296 uid Set the default uid for inodes, and indicate to the
298 cifs kernel driver which local user mounted . If the server 297 cifs kernel driver which local user mounted. If the server
299 supports the unix extensions the default uid is 298 supports the unix extensions the default uid is
300 not used to fill in the owner fields of inodes (files) 299 not used to fill in the owner fields of inodes (files)
301 unless the "forceuid" parameter is specified. 300 unless the "forceuid" parameter is specified.
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 3bb11be8b6a8..606912d8f2a8 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -55,7 +55,7 @@ void cifs_dfs_release_automount_timer(void)
55 * i.e. strips from UNC trailing path that is not part of share 55 * i.e. strips from UNC trailing path that is not part of share
56 * name and fixup missing '\' in the begining of DFS node refferal 56 * name and fixup missing '\' in the begining of DFS node refferal
57 * if neccessary. 57 * if neccessary.
58 * Returns pointer to share name on success or NULL on error. 58 * Returns pointer to share name on success or ERR_PTR on error.
59 * Caller is responsible for freeing returned string. 59 * Caller is responsible for freeing returned string.
60 */ 60 */
61static char *cifs_get_share_name(const char *node_name) 61static char *cifs_get_share_name(const char *node_name)
@@ -68,7 +68,7 @@ static char *cifs_get_share_name(const char *node_name)
68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */, 68 UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
69 GFP_KERNEL); 69 GFP_KERNEL);
70 if (!UNC) 70 if (!UNC)
71 return NULL; 71 return ERR_PTR(-ENOMEM);
72 72
73 /* get share name and server name */ 73 /* get share name and server name */
74 if (node_name[1] != '\\') { 74 if (node_name[1] != '\\') {
@@ -87,7 +87,7 @@ static char *cifs_get_share_name(const char *node_name)
87 cERROR(1, ("%s: no server name end in node name: %s", 87 cERROR(1, ("%s: no server name end in node name: %s",
88 __func__, node_name)); 88 __func__, node_name));
89 kfree(UNC); 89 kfree(UNC);
90 return NULL; 90 return ERR_PTR(-EINVAL);
91 } 91 }
92 92
93 /* find sharename end */ 93 /* find sharename end */
@@ -133,6 +133,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 *devname = cifs_get_share_name(ref->node_name); 135 *devname = cifs_get_share_name(ref->node_name);
136 if (IS_ERR(*devname)) {
137 rc = PTR_ERR(*devname);
138 *devname = NULL;
139 goto compose_mount_options_err;
140 }
141
136 rc = dns_resolve_server_name_to_ip(*devname, &srvIP); 142 rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
137 if (rc != 0) { 143 if (rc != 0) {
138 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", 144 cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d",
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 60e3c4253de0..714a542cbafc 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,7 +44,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes,
44 int maxwords = maxbytes / 2; 44 int maxwords = maxbytes / 2;
45 char tmp[NLS_MAX_CHARSET_SIZE]; 45 char tmp[NLS_MAX_CHARSET_SIZE];
46 46
47 for (i = 0; from[i] && i < maxwords; i++) { 47 for (i = 0; i < maxwords && from[i]; i++) {
48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, 48 charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
49 NLS_MAX_CHARSET_SIZE); 49 NLS_MAX_CHARSET_SIZE);
50 if (charlen > 0) 50 if (charlen > 0)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 44f30504b82d..84b75253b05a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -376,10 +376,14 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
376 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); 376 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
378 seq_printf(s, ",forceuid"); 378 seq_printf(s, ",forceuid");
379 else
380 seq_printf(s, ",noforceuid");
379 381
380 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); 382 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
382 seq_printf(s, ",forcegid"); 384 seq_printf(s, ",forcegid");
385 else
386 seq_printf(s, ",noforcegid");
383 387
384 cifs_show_address(s, tcon->ses->server); 388 cifs_show_address(s, tcon->ses->server);
385 389
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fc44d316d0bb..1f3345d7fa79 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -803,6 +803,10 @@ cifs_parse_mount_options(char *options, const char *devname,
803 char *data; 803 char *data;
804 unsigned int temp_len, i, j; 804 unsigned int temp_len, i, j;
805 char separator[2]; 805 char separator[2];
806 short int override_uid = -1;
807 short int override_gid = -1;
808 bool uid_specified = false;
809 bool gid_specified = false;
806 810
807 separator[0] = ','; 811 separator[0] = ',';
808 separator[1] = 0; 812 separator[1] = 0;
@@ -1093,18 +1097,20 @@ cifs_parse_mount_options(char *options, const char *devname,
1093 "too long.\n"); 1097 "too long.\n");
1094 return 1; 1098 return 1;
1095 } 1099 }
1096 } else if (strnicmp(data, "uid", 3) == 0) { 1100 } else if (!strnicmp(data, "uid", 3) && value && *value) {
1097 if (value && *value) 1101 vol->linux_uid = simple_strtoul(value, &value, 0);
1098 vol->linux_uid = 1102 uid_specified = true;
1099 simple_strtoul(value, &value, 0); 1103 } else if (!strnicmp(data, "forceuid", 8)) {
1100 } else if (strnicmp(data, "forceuid", 8) == 0) { 1104 override_uid = 1;
1101 vol->override_uid = 1; 1105 } else if (!strnicmp(data, "noforceuid", 10)) {
1102 } else if (strnicmp(data, "gid", 3) == 0) { 1106 override_uid = 0;
1103 if (value && *value) 1107 } else if (!strnicmp(data, "gid", 3) && value && *value) {
1104 vol->linux_gid = 1108 vol->linux_gid = simple_strtoul(value, &value, 0);
1105 simple_strtoul(value, &value, 0); 1109 gid_specified = true;
1106 } else if (strnicmp(data, "forcegid", 8) == 0) { 1110 } else if (!strnicmp(data, "forcegid", 8)) {
1107 vol->override_gid = 1; 1111 override_gid = 1;
1112 } else if (!strnicmp(data, "noforcegid", 10)) {
1113 override_gid = 0;
1108 } else if (strnicmp(data, "file_mode", 4) == 0) { 1114 } else if (strnicmp(data, "file_mode", 4) == 0) {
1109 if (value && *value) { 1115 if (value && *value) {
1110 vol->file_mode = 1116 vol->file_mode =
@@ -1355,6 +1361,18 @@ cifs_parse_mount_options(char *options, const char *devname,
1355 if (vol->UNCip == NULL) 1361 if (vol->UNCip == NULL)
1356 vol->UNCip = &vol->UNC[2]; 1362 vol->UNCip = &vol->UNC[2];
1357 1363
1364 if (uid_specified)
1365 vol->override_uid = override_uid;
1366 else if (override_uid == 1)
1367 printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
1368 "specified with no uid= option.\n");
1369
1370 if (gid_specified)
1371 vol->override_gid = override_gid;
1372 else if (override_gid == 1)
1373 printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
1374 "specified with no gid= option.\n");
1375
1358 return 0; 1376 return 0;
1359} 1377}
1360 1378
@@ -2544,11 +2562,20 @@ remote_path_check:
2544 2562
2545 if (mount_data != mount_data_global) 2563 if (mount_data != mount_data_global)
2546 kfree(mount_data); 2564 kfree(mount_data);
2565
2547 mount_data = cifs_compose_mount_options( 2566 mount_data = cifs_compose_mount_options(
2548 cifs_sb->mountdata, full_path + 1, 2567 cifs_sb->mountdata, full_path + 1,
2549 referrals, &fake_devname); 2568 referrals, &fake_devname);
2550 kfree(fake_devname); 2569
2551 free_dfs_info_array(referrals, num_referrals); 2570 free_dfs_info_array(referrals, num_referrals);
2571 kfree(fake_devname);
2572 kfree(full_path);
2573
2574 if (IS_ERR(mount_data)) {
2575 rc = PTR_ERR(mount_data);
2576 mount_data = NULL;
2577 goto mount_fail_check;
2578 }
2552 2579
2553 if (tcon) 2580 if (tcon)
2554 cifs_put_tcon(tcon); 2581 cifs_put_tcon(tcon);
@@ -2556,8 +2583,6 @@ remote_path_check:
2556 cifs_put_smb_ses(pSesInfo); 2583 cifs_put_smb_ses(pSesInfo);
2557 2584
2558 cleanup_volume_info(&volume_info); 2585 cleanup_volume_info(&volume_info);
2559 FreeXid(xid);
2560 kfree(full_path);
2561 referral_walks_count++; 2586 referral_walks_count++;
2562 goto try_mount_again; 2587 goto try_mount_again;
2563 } 2588 }
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 3d3ddb3f5177..2dfd47714ae5 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -412,8 +412,10 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
412 return 0; /* Do not request flush for shadow page cache */ 412 return 0; /* Do not request flush for shadow page cache */
413 if (!sb) { 413 if (!sb) {
414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs); 414 writer = nilfs_get_writer(NILFS_MDT(inode)->mi_nilfs);
415 if (!writer) 415 if (!writer) {
416 nilfs_put_writer(NILFS_MDT(inode)->mi_nilfs);
416 return -EROFS; 417 return -EROFS;
418 }
417 sb = writer->s_super; 419 sb = writer->s_super;
418 } 420 }
419 421
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 8b5e4778cf28..51ff3d0a4ee2 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1859,12 +1859,26 @@ static void nilfs_end_page_io(struct page *page, int err)
1859 if (!page) 1859 if (!page)
1860 return; 1860 return;
1861 1861
1862 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) 1862 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1863 /* 1863 /*
1864 * For b-tree node pages, this function may be called twice 1864 * For b-tree node pages, this function may be called twice
1865 * or more because they might be split in a segment. 1865 * or more because they might be split in a segment.
1866 */ 1866 */
1867 if (PageDirty(page)) {
1868 /*
1869 * For pages holding split b-tree node buffers, dirty
1870 * flag on the buffers may be cleared discretely.
1871 * In that case, the page is once redirtied for
1872 * remaining buffers, and it must be cancelled if
1873 * all the buffers get cleaned later.
1874 */
1875 lock_page(page);
1876 if (nilfs_page_buffers_clean(page))
1877 __nilfs_clear_page_dirty(page);
1878 unlock_page(page);
1879 }
1867 return; 1880 return;
1881 }
1868 1882
1869 __nilfs_end_page_io(page, err); 1883 __nilfs_end_page_io(page, err);
1870} 1884}
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 3e798593b17b..ab0b85cf21f3 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,6 +242,10 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
242acpi_status acpi_os_validate_interface(char *interface); 242acpi_status acpi_os_validate_interface(char *interface);
243acpi_status acpi_osi_invalidate(char* interface); 243acpi_status acpi_osi_invalidate(char* interface);
244 244
245acpi_status
246acpi_os_validate_address(u8 space_id, acpi_physical_address address,
247 acpi_size length, char *name);
248
245u64 acpi_os_get_timer(void); 249u64 acpi_os_get_timer(void);
246 250
247acpi_status acpi_os_signal(u32 function, void *info); 251acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 7174818c2c13..9d4c00491547 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -257,9 +257,12 @@
257 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 257 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 258 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 259 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 261 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 262 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
263 {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
262 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 264 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
265 {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
263 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 266 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
264 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 267 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
265 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 268 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -288,6 +291,7 @@
288 {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ 291 {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
289 {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ 292 {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
290 {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 293 {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
294 {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
291 {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ 295 {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
292 {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ 296 {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
293 {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ 297 {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
@@ -325,6 +329,7 @@
325 {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 329 {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
326 {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 330 {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
327 {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 331 {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
332 {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
328 {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ 333 {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
329 {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 334 {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
330 {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 335 {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index acef2a770b6b..ad27c7da8798 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
82 82
83#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 83#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
84#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 84#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
85#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) 85#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
86#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 86#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
87 ACCEPT_SOURCE_ROUTE) 87 ACCEPT_SOURCE_ROUTE)
88#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) 88#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index bd15d7a5f5ce..e604e6ef72dd 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -181,8 +181,9 @@ struct perf_counter_attr {
181 freq : 1, /* use freq, not period */ 181 freq : 1, /* use freq, not period */
182 inherit_stat : 1, /* per task counts */ 182 inherit_stat : 1, /* per task counts */
183 enable_on_exec : 1, /* next exec enables */ 183 enable_on_exec : 1, /* next exec enables */
184 task : 1, /* trace fork/exit */
184 185
185 __reserved_1 : 51; 186 __reserved_1 : 50;
186 187
187 __u32 wakeup_events; /* wakeup every n events */ 188 __u32 wakeup_events; /* wakeup every n events */
188 __u32 __reserved_2; 189 __u32 __reserved_2;
@@ -311,6 +312,15 @@ enum perf_event_type {
311 /* 312 /*
312 * struct { 313 * struct {
313 * struct perf_event_header header; 314 * struct perf_event_header header;
315 * u32 pid, ppid;
316 * u32 tid, ptid;
317 * };
318 */
319 PERF_EVENT_EXIT = 4,
320
321 /*
322 * struct {
323 * struct perf_event_header header;
314 * u64 time; 324 * u64 time;
315 * u64 id; 325 * u64 id;
316 * u64 stream_id; 326 * u64 stream_id;
@@ -323,6 +333,7 @@ enum perf_event_type {
323 * struct { 333 * struct {
324 * struct perf_event_header header; 334 * struct perf_event_header header;
325 * u32 pid, ppid; 335 * u32 pid, ppid;
336 * u32 tid, ptid;
326 * }; 337 * };
327 */ 338 */
328 PERF_EVENT_FORK = 7, 339 PERF_EVENT_FORK = 7,
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 80072611d26a..c274993234e3 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,17 @@ struct rfcomm_dev_list_req {
355}; 355};
356 356
357int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); 357int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
358
359#ifdef CONFIG_BT_RFCOMM_TTY
358int rfcomm_init_ttys(void); 360int rfcomm_init_ttys(void);
359void rfcomm_cleanup_ttys(void); 361void rfcomm_cleanup_ttys(void);
360 362#else
363static inline int rfcomm_init_ttys(void)
364{
365 return 0;
366}
367static inline void rfcomm_cleanup_ttys(void)
368{
369}
370#endif
361#endif /* __RFCOMM_H */ 371#endif /* __RFCOMM_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1a21895b732b..d1892d66701a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -979,6 +979,10 @@ struct cfg80211_ops {
979 * channels at a later time. This can be used for devices which do not 979 * channels at a later time. This can be used for devices which do not
980 * have calibration information gauranteed for frequencies or settings 980 * have calibration information gauranteed for frequencies or settings
981 * outside of its regulatory domain. 981 * outside of its regulatory domain.
982 * @disable_beacon_hints: enable this if your driver needs to ensure that
983 * passive scan flags and beaconing flags may not be lifted by cfg80211
984 * due to regulatory beacon hints. For more information on beacon
985 * hints read the documenation for regulatory_hint_found_beacon()
982 * @reg_notifier: the driver's regulatory notification callback 986 * @reg_notifier: the driver's regulatory notification callback
983 * @regd: the driver's regulatory domain, if one was requested via 987 * @regd: the driver's regulatory domain, if one was requested via
984 * the regulatory_hint() API. This can be used by the driver 988 * the regulatory_hint() API. This can be used by the driver
@@ -1004,6 +1008,7 @@ struct wiphy {
1004 1008
1005 bool custom_regulatory; 1009 bool custom_regulatory;
1006 bool strict_regulatory; 1010 bool strict_regulatory;
1011 bool disable_beacon_hints;
1007 1012
1008 enum cfg80211_signal_type signal_type; 1013 enum cfg80211_signal_type signal_type;
1009 1014
diff --git a/init/Kconfig b/init/Kconfig
index cb2c09270226..3f7e60995c80 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -940,6 +940,7 @@ menu "Performance Counters"
940 940
941config PERF_COUNTERS 941config PERF_COUNTERS
942 bool "Kernel Performance Counters" 942 bool "Kernel Performance Counters"
943 default y if PROFILING
943 depends on HAVE_PERF_COUNTERS 944 depends on HAVE_PERF_COUNTERS
944 select ANON_INODES 945 select ANON_INODES
945 help 946 help
@@ -961,9 +962,17 @@ config PERF_COUNTERS
961 Say Y if unsure. 962 Say Y if unsure.
962 963
963config EVENT_PROFILE 964config EVENT_PROFILE
964 bool "Tracepoint profile sources" 965 bool "Tracepoint profiling sources"
965 depends on PERF_COUNTERS && EVENT_TRACING 966 depends on PERF_COUNTERS && EVENT_TRACING
966 default y 967 default y
968 help
969 Allow the use of tracepoints as software performance counters.
970
971 When this is enabled, you can create perf counters based on
972 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
973 found in debugfs://tracing/events/*/*/id. (The -e/--events
974 option to the perf tool can parse and interpret symbolic
975 tracepoints, in the subsystem:tracepoint_name format.)
967 976
968endmenu 977endmenu
969 978
diff --git a/kernel/fork.c b/kernel/fork.c
index 29b532e718f7..466531eb92cc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1269,6 +1269,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1269 write_unlock_irq(&tasklist_lock); 1269 write_unlock_irq(&tasklist_lock);
1270 proc_fork_connector(p); 1270 proc_fork_connector(p);
1271 cgroup_post_fork(p); 1271 cgroup_post_fork(p);
1272 perf_counter_fork(p);
1272 return p; 1273 return p;
1273 1274
1274bad_fork_free_pid: 1275bad_fork_free_pid:
@@ -1410,9 +1411,6 @@ long do_fork(unsigned long clone_flags,
1410 init_completion(&vfork); 1411 init_completion(&vfork);
1411 } 1412 }
1412 1413
1413 if (!(clone_flags & CLONE_THREAD))
1414 perf_counter_fork(p);
1415
1416 audit_finish_fork(p); 1414 audit_finish_fork(p);
1417 tracehook_report_clone(regs, clone_flags, nr, p); 1415 tracehook_report_clone(regs, clone_flags, nr, p);
1418 1416
diff --git a/kernel/panic.c b/kernel/panic.c
index 984b3ecbd72c..512ab73b0ca3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -301,6 +301,7 @@ int oops_may_print(void)
301 */ 301 */
302void oops_enter(void) 302void oops_enter(void)
303{ 303{
304 tracing_off();
304 /* can't trust the integrity of the kernel anymore: */ 305 /* can't trust the integrity of the kernel anymore: */
305 debug_locks_off(); 306 debug_locks_off();
306 do_oops_enter_exit(); 307 do_oops_enter_exit();
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 950931041954..199ed4771315 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -1654,6 +1655,8 @@ static void free_counter(struct perf_counter *counter)
1654 atomic_dec(&nr_mmap_counters); 1655 atomic_dec(&nr_mmap_counters);
1655 if (counter->attr.comm) 1656 if (counter->attr.comm)
1656 atomic_dec(&nr_comm_counters); 1657 atomic_dec(&nr_comm_counters);
1658 if (counter->attr.task)
1659 atomic_dec(&nr_task_counters);
1657 } 1660 }
1658 1661
1659 if (counter->destroy) 1662 if (counter->destroy)
@@ -1688,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file)
1688 return 0; 1691 return 0;
1689} 1692}
1690 1693
1694static u64 perf_counter_read_tree(struct perf_counter *counter)
1695{
1696 struct perf_counter *child;
1697 u64 total = 0;
1698
1699 total += perf_counter_read(counter);
1700 list_for_each_entry(child, &counter->child_list, child_list)
1701 total += perf_counter_read(child);
1702
1703 return total;
1704}
1705
1691/* 1706/*
1692 * Read the performance counter - simple non blocking version for now 1707 * Read the performance counter - simple non blocking version for now
1693 */ 1708 */
@@ -1707,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1707 1722
1708 WARN_ON_ONCE(counter->ctx->parent_ctx); 1723 WARN_ON_ONCE(counter->ctx->parent_ctx);
1709 mutex_lock(&counter->child_mutex); 1724 mutex_lock(&counter->child_mutex);
1710 values[0] = perf_counter_read(counter); 1725 values[0] = perf_counter_read_tree(counter);
1711 n = 1; 1726 n = 1;
1712 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1727 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1713 values[n++] = counter->total_time_enabled + 1728 values[n++] = counter->total_time_enabled +
@@ -2819,10 +2834,12 @@ perf_counter_read_event(struct perf_counter *counter,
2819} 2834}
2820 2835
2821/* 2836/*
2822 * fork tracking 2837 * task tracking -- fork/exit
2838 *
2839 * enabled by: attr.comm | attr.mmap | attr.task
2823 */ 2840 */
2824 2841
2825struct perf_fork_event { 2842struct perf_task_event {
2826 struct task_struct *task; 2843 struct task_struct *task;
2827 2844
2828 struct { 2845 struct {
@@ -2830,37 +2847,42 @@ struct perf_fork_event {
2830 2847
2831 u32 pid; 2848 u32 pid;
2832 u32 ppid; 2849 u32 ppid;
2850 u32 tid;
2851 u32 ptid;
2833 } event; 2852 } event;
2834}; 2853};
2835 2854
2836static void perf_counter_fork_output(struct perf_counter *counter, 2855static void perf_counter_task_output(struct perf_counter *counter,
2837 struct perf_fork_event *fork_event) 2856 struct perf_task_event *task_event)
2838{ 2857{
2839 struct perf_output_handle handle; 2858 struct perf_output_handle handle;
2840 int size = fork_event->event.header.size; 2859 int size = task_event->event.header.size;
2841 struct task_struct *task = fork_event->task; 2860 struct task_struct *task = task_event->task;
2842 int ret = perf_output_begin(&handle, counter, size, 0, 0); 2861 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2843 2862
2844 if (ret) 2863 if (ret)
2845 return; 2864 return;
2846 2865
2847 fork_event->event.pid = perf_counter_pid(counter, task); 2866 task_event->event.pid = perf_counter_pid(counter, task);
2848 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 2867 task_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2849 2868
2850 perf_output_put(&handle, fork_event->event); 2869 task_event->event.tid = perf_counter_tid(counter, task);
2870 task_event->event.ptid = perf_counter_tid(counter, task->real_parent);
2871
2872 perf_output_put(&handle, task_event->event);
2851 perf_output_end(&handle); 2873 perf_output_end(&handle);
2852} 2874}
2853 2875
2854static int perf_counter_fork_match(struct perf_counter *counter) 2876static int perf_counter_task_match(struct perf_counter *counter)
2855{ 2877{
2856 if (counter->attr.comm || counter->attr.mmap) 2878 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2857 return 1; 2879 return 1;
2858 2880
2859 return 0; 2881 return 0;
2860} 2882}
2861 2883
2862static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 2884static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2863 struct perf_fork_event *fork_event) 2885 struct perf_task_event *task_event)
2864{ 2886{
2865 struct perf_counter *counter; 2887 struct perf_counter *counter;
2866 2888
@@ -2869,19 +2891,19 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2869 2891
2870 rcu_read_lock(); 2892 rcu_read_lock();
2871 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 2893 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2872 if (perf_counter_fork_match(counter)) 2894 if (perf_counter_task_match(counter))
2873 perf_counter_fork_output(counter, fork_event); 2895 perf_counter_task_output(counter, task_event);
2874 } 2896 }
2875 rcu_read_unlock(); 2897 rcu_read_unlock();
2876} 2898}
2877 2899
2878static void perf_counter_fork_event(struct perf_fork_event *fork_event) 2900static void perf_counter_task_event(struct perf_task_event *task_event)
2879{ 2901{
2880 struct perf_cpu_context *cpuctx; 2902 struct perf_cpu_context *cpuctx;
2881 struct perf_counter_context *ctx; 2903 struct perf_counter_context *ctx;
2882 2904
2883 cpuctx = &get_cpu_var(perf_cpu_context); 2905 cpuctx = &get_cpu_var(perf_cpu_context);
2884 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 2906 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2885 put_cpu_var(perf_cpu_context); 2907 put_cpu_var(perf_cpu_context);
2886 2908
2887 rcu_read_lock(); 2909 rcu_read_lock();
@@ -2891,32 +2913,40 @@ static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2891 */ 2913 */
2892 ctx = rcu_dereference(current->perf_counter_ctxp); 2914 ctx = rcu_dereference(current->perf_counter_ctxp);
2893 if (ctx) 2915 if (ctx)
2894 perf_counter_fork_ctx(ctx, fork_event); 2916 perf_counter_task_ctx(ctx, task_event);
2895 rcu_read_unlock(); 2917 rcu_read_unlock();
2896} 2918}
2897 2919
2898void perf_counter_fork(struct task_struct *task) 2920static void perf_counter_task(struct task_struct *task, int new)
2899{ 2921{
2900 struct perf_fork_event fork_event; 2922 struct perf_task_event task_event;
2901 2923
2902 if (!atomic_read(&nr_comm_counters) && 2924 if (!atomic_read(&nr_comm_counters) &&
2903 !atomic_read(&nr_mmap_counters)) 2925 !atomic_read(&nr_mmap_counters) &&
2926 !atomic_read(&nr_task_counters))
2904 return; 2927 return;
2905 2928
2906 fork_event = (struct perf_fork_event){ 2929 task_event = (struct perf_task_event){
2907 .task = task, 2930 .task = task,
2908 .event = { 2931 .event = {
2909 .header = { 2932 .header = {
2910 .type = PERF_EVENT_FORK, 2933 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2911 .misc = 0, 2934 .misc = 0,
2912 .size = sizeof(fork_event.event), 2935 .size = sizeof(task_event.event),
2913 }, 2936 },
2914 /* .pid */ 2937 /* .pid */
2915 /* .ppid */ 2938 /* .ppid */
2939 /* .tid */
2940 /* .ptid */
2916 }, 2941 },
2917 }; 2942 };
2918 2943
2919 perf_counter_fork_event(&fork_event); 2944 perf_counter_task_event(&task_event);
2945}
2946
2947void perf_counter_fork(struct task_struct *task)
2948{
2949 perf_counter_task(task, 1);
2920} 2950}
2921 2951
2922/* 2952/*
@@ -3875,6 +3905,8 @@ done:
3875 atomic_inc(&nr_mmap_counters); 3905 atomic_inc(&nr_mmap_counters);
3876 if (counter->attr.comm) 3906 if (counter->attr.comm)
3877 atomic_inc(&nr_comm_counters); 3907 atomic_inc(&nr_comm_counters);
3908 if (counter->attr.task)
3909 atomic_inc(&nr_task_counters);
3878 } 3910 }
3879 3911
3880 return counter; 3912 return counter;
@@ -4236,8 +4268,10 @@ void perf_counter_exit_task(struct task_struct *child)
4236 struct perf_counter_context *child_ctx; 4268 struct perf_counter_context *child_ctx;
4237 unsigned long flags; 4269 unsigned long flags;
4238 4270
4239 if (likely(!child->perf_counter_ctxp)) 4271 if (likely(!child->perf_counter_ctxp)) {
4272 perf_counter_task(child, 0);
4240 return; 4273 return;
4274 }
4241 4275
4242 local_irq_save(flags); 4276 local_irq_save(flags);
4243 /* 4277 /*
@@ -4255,15 +4289,22 @@ void perf_counter_exit_task(struct task_struct *child)
4255 * incremented the context's refcount before we do put_ctx below. 4289 * incremented the context's refcount before we do put_ctx below.
4256 */ 4290 */
4257 spin_lock(&child_ctx->lock); 4291 spin_lock(&child_ctx->lock);
4258 child->perf_counter_ctxp = NULL;
4259 /* 4292 /*
4260 * If this context is a clone; unclone it so it can't get 4293 * If this context is a clone; unclone it so it can't get
4261 * swapped to another process while we're removing all 4294 * swapped to another process while we're removing all
4262 * the counters from it. 4295 * the counters from it.
4263 */ 4296 */
4264 unclone_ctx(child_ctx); 4297 unclone_ctx(child_ctx);
4265 spin_unlock(&child_ctx->lock); 4298 spin_unlock_irqrestore(&child_ctx->lock, flags);
4266 local_irq_restore(flags); 4299
4300 /*
4301 * Report the task dead after unscheduling the counters so that we
4302 * won't get any samples after PERF_EVENT_EXIT. We can however still
4303 * get a few PERF_EVENT_READ events.
4304 */
4305 perf_counter_task(child, 0);
4306
4307 child->perf_counter_ctxp = NULL;
4267 4308
4268 /* 4309 /*
4269 * We can recurse on the same lock type through: 4310 * We can recurse on the same lock type through:
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 052ec4d195c7..d089d052c4a9 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer)
202 return -EOPNOTSUPP; 202 return -EOPNOTSUPP;
203} 203}
204 204
205static int no_nsleep(const clockid_t which_clock, int flags,
206 struct timespec *tsave, struct timespec __user *rmtp)
207{
208 return -EOPNOTSUPP;
209}
210
205/* 211/*
206 * Return nonzero if we know a priori this clockid_t value is bogus. 212 * Return nonzero if we know a priori this clockid_t value is bogus.
207 */ 213 */
@@ -254,6 +260,7 @@ static __init int init_posix_timers(void)
254 .clock_get = posix_get_monotonic_raw, 260 .clock_get = posix_get_monotonic_raw,
255 .clock_set = do_posix_clock_nosettime, 261 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create, 262 .timer_create = no_timer_create,
263 .nsleep = no_nsleep,
257 }; 264 };
258 265
259 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 266 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dde..d014efbf947a 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 if (lowest_mask) 84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87 /*
88 * We have to ensure that we have at least one bit
89 * still set in the array, since the map could have
90 * been concurrently emptied between the first and
91 * second reads of vec->mask. If we hit this
92 * condition, simply act as though we never hit this
93 * priority level and continue on.
94 */
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
86 return 1; 99 return 1;
87 } 100 }
88 101
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9ffb2b2ceba4..652e8bdef9aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
612{ 612{
613#ifdef CONFIG_SCHEDSTATS 613#ifdef CONFIG_SCHEDSTATS
614 struct task_struct *tsk = NULL;
615
616 if (entity_is_task(se))
617 tsk = task_of(se);
618
614 if (se->sleep_start) { 619 if (se->sleep_start) {
615 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 620 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
616 struct task_struct *tsk = task_of(se);
617 621
618 if ((s64)delta < 0) 622 if ((s64)delta < 0)
619 delta = 0; 623 delta = 0;
@@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
624 se->sleep_start = 0; 628 se->sleep_start = 0;
625 se->sum_sleep_runtime += delta; 629 se->sum_sleep_runtime += delta;
626 630
627 account_scheduler_latency(tsk, delta >> 10, 1); 631 if (tsk)
632 account_scheduler_latency(tsk, delta >> 10, 1);
628 } 633 }
629 if (se->block_start) { 634 if (se->block_start) {
630 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 635 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
631 struct task_struct *tsk = task_of(se);
632 636
633 if ((s64)delta < 0) 637 if ((s64)delta < 0)
634 delta = 0; 638 delta = 0;
@@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
639 se->block_start = 0; 643 se->block_start = 0;
640 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
641 645
642 /* 646 if (tsk) {
643 * Blocking time is in units of nanosecs, so shift by 20 to 647 /*
644 * get a milliseconds-range estimation of the amount of 648 * Blocking time is in units of nanosecs, so shift by
645 * time that the task spent sleeping: 649 * 20 to get a milliseconds-range estimation of the
646 */ 650 * amount of time that the task spent sleeping:
647 if (unlikely(prof_on == SLEEP_PROFILING)) { 651 */
648 652 if (unlikely(prof_on == SLEEP_PROFILING)) {
649 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), 653 profile_hits(SLEEP_PROFILING,
650 delta >> 20); 654 (void *)get_wchan(tsk),
655 delta >> 20);
656 }
657 account_scheduler_latency(tsk, delta >> 10, 0);
651 } 658 }
652 account_scheduler_latency(tsk, delta >> 10, 0);
653 } 659 }
654#endif 660#endif
655} 661}
diff --git a/kernel/signal.c b/kernel/signal.c
index ccf1ceedaebe..64c5deeaca5d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2454 stack_t oss; 2454 stack_t oss;
2455 int error; 2455 int error;
2456 2456
2457 if (uoss) { 2457 oss.ss_sp = (void __user *) current->sas_ss_sp;
2458 oss.ss_sp = (void __user *) current->sas_ss_sp; 2458 oss.ss_size = current->sas_ss_size;
2459 oss.ss_size = current->sas_ss_size; 2459 oss.ss_flags = sas_ss_flags(sp);
2460 oss.ss_flags = sas_ss_flags(sp);
2461 }
2462 2460
2463 if (uss) { 2461 if (uss) {
2464 void __user *ss_sp; 2462 void __user *ss_sp;
@@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2466 int ss_flags; 2464 int ss_flags;
2467 2465
2468 error = -EFAULT; 2466 error = -EFAULT;
2469 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2467 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2470 || __get_user(ss_sp, &uss->ss_sp) 2468 goto out;
2471 || __get_user(ss_flags, &uss->ss_flags) 2469 error = __get_user(ss_sp, &uss->ss_sp) |
2472 || __get_user(ss_size, &uss->ss_size)) 2470 __get_user(ss_flags, &uss->ss_flags) |
2471 __get_user(ss_size, &uss->ss_size);
2472 if (error)
2473 goto out; 2473 goto out;
2474 2474
2475 error = -EPERM; 2475 error = -EPERM;
@@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2501 current->sas_ss_size = ss_size; 2501 current->sas_ss_size = ss_size;
2502 } 2502 }
2503 2503
2504 error = 0;
2504 if (uoss) { 2505 if (uoss) {
2505 error = -EFAULT; 2506 error = -EFAULT;
2506 if (copy_to_user(uoss, &oss, sizeof(oss))) 2507 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2507 goto out; 2508 goto out;
2509 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2510 __put_user(oss.ss_size, &uoss->ss_size) |
2511 __put_user(oss.ss_flags, &uoss->ss_flags);
2508 } 2512 }
2509 2513
2510 error = 0;
2511out: 2514out:
2512 return error; 2515 return error;
2513} 2516}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1f3ec2afa511..1e1d23c26308 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1662,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1662 1662
1663 mutex_lock(&ftrace_regex_lock); 1663 mutex_lock(&ftrace_regex_lock);
1664 if ((file->f_mode & FMODE_WRITE) && 1664 if ((file->f_mode & FMODE_WRITE) &&
1665 !(file->f_flags & O_APPEND)) 1665 (file->f_flags & O_TRUNC))
1666 ftrace_filter_reset(enable); 1666 ftrace_filter_reset(enable);
1667 1667
1668 if (file->f_mode & FMODE_READ) { 1668 if (file->f_mode & FMODE_READ) {
@@ -2577,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2577 2577
2578 mutex_lock(&graph_lock); 2578 mutex_lock(&graph_lock);
2579 if ((file->f_mode & FMODE_WRITE) && 2579 if ((file->f_mode & FMODE_WRITE) &&
2580 !(file->f_flags & O_APPEND)) { 2580 (file->f_flags & O_TRUNC)) {
2581 ftrace_graph_count = 0; 2581 ftrace_graph_count = 0;
2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2583 } 2583 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8bc8d8afea6a..8930e39b9d8c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2031,7 +2031,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2031 2031
2032 /* If this file was open for write, then erase contents */ 2032 /* If this file was open for write, then erase contents */
2033 if ((file->f_mode & FMODE_WRITE) && 2033 if ((file->f_mode & FMODE_WRITE) &&
2034 !(file->f_flags & O_APPEND)) { 2034 (file->f_flags & O_TRUNC)) {
2035 long cpu = (long) inode->i_private; 2035 long cpu = (long) inode->i_private;
2036 2036
2037 if (cpu == TRACE_PIPE_ALL_CPU) 2037 if (cpu == TRACE_PIPE_ALL_CPU)
@@ -3085,7 +3085,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3085 break; 3085 break;
3086 } 3086 }
3087 3087
3088 trace_consume(iter); 3088 if (ret != TRACE_TYPE_NO_CONSUME)
3089 trace_consume(iter);
3089 rem -= count; 3090 rem -= count;
3090 if (!find_next_entry_inc(iter)) { 3091 if (!find_next_entry_inc(iter)) {
3091 rem = 0; 3092 rem = 0;
@@ -4233,8 +4234,11 @@ static void __ftrace_dump(bool disable_tracing)
4233 iter.pos = -1; 4234 iter.pos = -1;
4234 4235
4235 if (find_next_entry_inc(&iter) != NULL) { 4236 if (find_next_entry_inc(&iter) != NULL) {
4236 print_trace_line(&iter); 4237 int ret;
4237 trace_consume(&iter); 4238
4239 ret = print_trace_line(&iter);
4240 if (ret != TRACE_TYPE_NO_CONSUME)
4241 trace_consume(&iter);
4238 } 4242 }
4239 4243
4240 trace_printk_seq(&iter.seq); 4244 trace_printk_seq(&iter.seq);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53c8fd376a88..23d2972b22d6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -376,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file)
376 const struct seq_operations *seq_ops; 376 const struct seq_operations *seq_ops;
377 377
378 if ((file->f_mode & FMODE_WRITE) && 378 if ((file->f_mode & FMODE_WRITE) &&
379 !(file->f_flags & O_APPEND)) 379 (file->f_flags & O_TRUNC))
380 ftrace_clear_events(); 380 ftrace_clear_events();
381 381
382 seq_ops = inode->i_private; 382 seq_ops = inode->i_private;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d2249abafb53..420ec3487579 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter)
843 843
844 switch (entry->type) { 844 switch (entry->type) {
845 case TRACE_GRAPH_ENT: { 845 case TRACE_GRAPH_ENT: {
846 struct ftrace_graph_ent_entry *field; 846 /*
847 * print_graph_entry() may consume the current event,
848 * thus @field may become invalid, so we need to save it.
849 * sizeof(struct ftrace_graph_ent_entry) is very small,
850 * it can be safely saved at the stack.
851 */
852 struct ftrace_graph_ent_entry *field, saved;
847 trace_assign_type(field, entry); 853 trace_assign_type(field, entry);
848 return print_graph_entry(field, s, iter); 854 saved = *field;
855 return print_graph_entry(&saved, s, iter);
849 } 856 }
850 case TRACE_GRAPH_RET: { 857 case TRACE_GRAPH_RET: {
851 struct ftrace_graph_ret_entry *field; 858 struct ftrace_graph_ret_entry *field;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 7b6278110827..687699d365ae 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v)
176 const char *str = *fmt; 176 const char *str = *fmt;
177 int i; 177 int i;
178 178
179 seq_printf(m, "0x%lx : \"", (unsigned long)fmt); 179 seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
180 180
181 /* 181 /*
182 * Tabs and new lines need to be converted. 182 * Tabs and new lines need to be converted.
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 0e7894ce8882..08f1636d296a 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -254,7 +254,6 @@ void *flex_array_get(struct flex_array *fa, int element_nr)
254{ 254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr); 255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part; 256 struct flex_array_part *part;
257 int index;
258 257
259 if (element_nr >= fa->total_nr_elements) 258 if (element_nr >= fa->total_nr_elements)
260 return NULL; 259 return NULL;
@@ -264,6 +263,5 @@ void *flex_array_get(struct flex_array *fa, int element_nr)
264 part = (struct flex_array_part *)&fa->parts[0]; 263 part = (struct flex_array_part *)&fa->parts[0];
265 else 264 else
266 part = fa->parts[part_nr]; 265 part = fa->parts[part_nr];
267 index = index_inside_part(fa, element_nr);
268 return &part->elements[index_inside_part(fa, element_nr)]; 266 return &part->elements[index_inside_part(fa, element_nr)];
269} 267}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index e50566ebf9f9..94b3388c188b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2080,28 +2080,41 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL);
2080/* ---- Initialization ---- */ 2080/* ---- Initialization ---- */
2081static int __init rfcomm_init(void) 2081static int __init rfcomm_init(void)
2082{ 2082{
2083 int ret;
2084
2083 l2cap_load(); 2085 l2cap_load();
2084 2086
2085 hci_register_cb(&rfcomm_cb); 2087 hci_register_cb(&rfcomm_cb);
2086 2088
2087 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2089 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
2088 if (IS_ERR(rfcomm_thread)) { 2090 if (IS_ERR(rfcomm_thread)) {
2089 hci_unregister_cb(&rfcomm_cb); 2091 ret = PTR_ERR(rfcomm_thread);
2090 return PTR_ERR(rfcomm_thread); 2092 goto out_thread;
2091 } 2093 }
2092 2094
2093 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2095 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
2094 BT_ERR("Failed to create RFCOMM info file"); 2096 BT_ERR("Failed to create RFCOMM info file");
2095 2097
2096 rfcomm_init_sockets(); 2098 ret = rfcomm_init_ttys();
2099 if (ret)
2100 goto out_tty;
2097 2101
2098#ifdef CONFIG_BT_RFCOMM_TTY 2102 ret = rfcomm_init_sockets();
2099 rfcomm_init_ttys(); 2103 if (ret)
2100#endif 2104 goto out_sock;
2101 2105
2102 BT_INFO("RFCOMM ver %s", VERSION); 2106 BT_INFO("RFCOMM ver %s", VERSION);
2103 2107
2104 return 0; 2108 return 0;
2109
2110out_sock:
2111 rfcomm_cleanup_ttys();
2112out_tty:
2113 kthread_stop(rfcomm_thread);
2114out_thread:
2115 hci_unregister_cb(&rfcomm_cb);
2116
2117 return ret;
2105} 2118}
2106 2119
2107static void __exit rfcomm_exit(void) 2120static void __exit rfcomm_exit(void)
@@ -2112,9 +2125,7 @@ static void __exit rfcomm_exit(void)
2112 2125
2113 kthread_stop(rfcomm_thread); 2126 kthread_stop(rfcomm_thread);
2114 2127
2115#ifdef CONFIG_BT_RFCOMM_TTY
2116 rfcomm_cleanup_ttys(); 2128 rfcomm_cleanup_ttys();
2117#endif
2118 2129
2119 rfcomm_cleanup_sockets(); 2130 rfcomm_cleanup_sockets();
2120} 2131}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7f482784e9f7..0b85e8116859 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1132,7 +1132,7 @@ error:
1132 return err; 1132 return err;
1133} 1133}
1134 1134
1135void __exit rfcomm_cleanup_sockets(void) 1135void rfcomm_cleanup_sockets(void)
1136{ 1136{
1137 class_remove_file(bt_class, &class_attr_rfcomm); 1137 class_remove_file(bt_class, &class_attr_rfcomm);
1138 1138
diff --git a/net/core/dev.c b/net/core/dev.c
index 70c27e0c7c32..43e61ba7bd95 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr)
3865 3865
3866 ASSERT_RTNL(); 3866 ASSERT_RTNL();
3867 3867
3868 netif_addr_lock_bh(dev);
3868 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 3869 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3869 NETDEV_HW_ADDR_T_UNICAST); 3870 NETDEV_HW_ADDR_T_UNICAST);
3870 if (!err) 3871 if (!err)
3871 __dev_set_rx_mode(dev); 3872 __dev_set_rx_mode(dev);
3873 netif_addr_unlock_bh(dev);
3872 return err; 3874 return err;
3873} 3875}
3874EXPORT_SYMBOL(dev_unicast_delete); 3876EXPORT_SYMBOL(dev_unicast_delete);
@@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr)
3889 3891
3890 ASSERT_RTNL(); 3892 ASSERT_RTNL();
3891 3893
3894 netif_addr_lock_bh(dev);
3892 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 3895 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3893 NETDEV_HW_ADDR_T_UNICAST); 3896 NETDEV_HW_ADDR_T_UNICAST);
3894 if (!err) 3897 if (!err)
3895 __dev_set_rx_mode(dev); 3898 __dev_set_rx_mode(dev);
3899 netif_addr_unlock_bh(dev);
3896 return err; 3900 return err;
3897} 3901}
3898EXPORT_SYMBOL(dev_unicast_add); 3902EXPORT_SYMBOL(dev_unicast_add);
@@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3949 * @from: source device 3953 * @from: source device
3950 * 3954 *
3951 * Add newly added addresses to the destination device and release 3955 * Add newly added addresses to the destination device and release
3952 * addresses that have no users left. 3956 * addresses that have no users left. The source device must be
3957 * locked by netif_tx_lock_bh.
3953 * 3958 *
3954 * This function is intended to be called from the dev->set_rx_mode 3959 * This function is intended to be called from the dev->set_rx_mode
3955 * function of layered software devices. 3960 * function of layered software devices.
@@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3958{ 3963{
3959 int err = 0; 3964 int err = 0;
3960 3965
3961 ASSERT_RTNL();
3962
3963 if (to->addr_len != from->addr_len) 3966 if (to->addr_len != from->addr_len)
3964 return -EINVAL; 3967 return -EINVAL;
3965 3968
3969 netif_addr_lock_bh(to);
3966 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 3970 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3967 if (!err) 3971 if (!err)
3968 __dev_set_rx_mode(to); 3972 __dev_set_rx_mode(to);
3973 netif_addr_unlock_bh(to);
3969 return err; 3974 return err;
3970} 3975}
3971EXPORT_SYMBOL(dev_unicast_sync); 3976EXPORT_SYMBOL(dev_unicast_sync);
@@ -3981,28 +3986,30 @@ EXPORT_SYMBOL(dev_unicast_sync);
3981 */ 3986 */
3982void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3987void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3983{ 3988{
3984 ASSERT_RTNL();
3985
3986 if (to->addr_len != from->addr_len) 3989 if (to->addr_len != from->addr_len)
3987 return; 3990 return;
3988 3991
3992 netif_addr_lock_bh(from);
3993 netif_addr_lock(to);
3989 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 3994 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
3990 __dev_set_rx_mode(to); 3995 __dev_set_rx_mode(to);
3996 netif_addr_unlock(to);
3997 netif_addr_unlock_bh(from);
3991} 3998}
3992EXPORT_SYMBOL(dev_unicast_unsync); 3999EXPORT_SYMBOL(dev_unicast_unsync);
3993 4000
3994static void dev_unicast_flush(struct net_device *dev) 4001static void dev_unicast_flush(struct net_device *dev)
3995{ 4002{
3996 /* rtnl_mutex must be held here */ 4003 netif_addr_lock_bh(dev);
3997
3998 __hw_addr_flush(&dev->uc); 4004 __hw_addr_flush(&dev->uc);
4005 netif_addr_unlock_bh(dev);
3999} 4006}
4000 4007
4001static void dev_unicast_init(struct net_device *dev) 4008static void dev_unicast_init(struct net_device *dev)
4002{ 4009{
4003 /* rtnl_mutex must be held here */ 4010 netif_addr_lock_bh(dev);
4004
4005 __hw_addr_init(&dev->uc); 4011 __hw_addr_init(&dev->uc);
4012 netif_addr_unlock_bh(dev);
4006} 4013}
4007 4014
4008 4015
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b7292a2719dc..197283072cc8 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data)
488 */ 488 */
489 489
490 ng->len = id; 490 ng->len = id;
491 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); 491 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
492 492
493 rcu_assign_pointer(net->gen, ng); 493 rcu_assign_pointer(net->gen, ng);
494 call_rcu(&old_ng->rcu, net_generic_release); 494 call_rcu(&old_ng->rcu, net_generic_release);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c29d75d8f1b1..090e9991ac2a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1304,7 +1304,9 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1304 hbuffer[k++] = hex_asc_lo(n->ha[j]); 1304 hbuffer[k++] = hex_asc_lo(n->ha[j]);
1305 hbuffer[k++] = ':'; 1305 hbuffer[k++] = ':';
1306 } 1306 }
1307 hbuffer[--k] = 0; 1307 if (k != 0)
1308 --k;
1309 hbuffer[k] = 0;
1308#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 1310#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1309 } 1311 }
1310#endif 1312#endif
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index aca22b00b6a3..07e7e41816be 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -721,7 +721,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
721{ 721{
722 struct ieee80211_local *local = (void *) data; 722 struct ieee80211_local *local = (void *) data;
723 723
724 if (local->quiescing) 724 if (local->quiescing || local->suspended)
725 return; 725 return;
726 726
727 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); 727 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7a549f9deb96..5e3d476972f9 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -55,15 +55,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
55 55
56 rcu_read_unlock(); 56 rcu_read_unlock();
57 57
58 /* flush again, in case driver queued work */
59 flush_workqueue(local->hw.workqueue);
60
61 /* stop hardware - this must stop RX */
62 if (local->open_count) {
63 ieee80211_led_radio(local, false);
64 drv_stop(local);
65 }
66
67 /* remove STAs */ 58 /* remove STAs */
68 spin_lock_irqsave(&local->sta_lock, flags); 59 spin_lock_irqsave(&local->sta_lock, flags);
69 list_for_each_entry(sta, &local->sta_list, list) { 60 list_for_each_entry(sta, &local->sta_list, list) {
@@ -111,7 +102,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
111 drv_remove_interface(local, &conf); 102 drv_remove_interface(local, &conf);
112 } 103 }
113 104
105 /* stop hardware - this must stop RX */
106 if (local->open_count) {
107 ieee80211_led_radio(local, false);
108 drv_stop(local);
109 }
110
111 /*
112 * flush again, in case driver queued work -- it
113 * shouldn't be doing (or cancel everything in the
114 * stop callback) that but better safe than sorry.
115 */
116 flush_workqueue(local->hw.workqueue);
117
114 local->suspended = true; 118 local->suspended = true;
119 /* need suspended to be visible before quiescing is false */
120 barrier();
115 local->quiescing = false; 121 local->quiescing = false;
116 122
117 return 0; 123 return 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index de5bba7f910a..0936fc24942d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2453,6 +2453,18 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2453 return; 2453 return;
2454 } 2454 }
2455 2455
2456 /*
2457 * If we're suspending, it is possible although not too likely
2458 * that we'd be receiving frames after having already partially
2459 * quiesced the stack. We can't process such frames then since
2460 * that might, for example, cause stations to be added or other
2461 * driver callbacks be invoked.
2462 */
2463 if (unlikely(local->quiescing || local->suspended)) {
2464 kfree_skb(skb);
2465 return;
2466 }
2467
2456 if (status->flag & RX_FLAG_HT) { 2468 if (status->flag & RX_FLAG_HT) {
2457 /* rate_idx is MCS index */ 2469 /* rate_idx is MCS index */
2458 if (WARN_ON(status->rate_idx < 0 || 2470 if (WARN_ON(status->rate_idx < 0 ||
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index b0e582f2d37a..16e6c4378ff1 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -151,7 +151,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
151 addr6 = addr; 151 addr6 = addr;
152 mask6 = mask; 152 mask6 = mask;
153 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); 153 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
154 if (map4 == NULL) 154 if (map6 == NULL)
155 goto cfg_unlbl_map_add_failure; 155 goto cfg_unlbl_map_add_failure;
156 map6->type = NETLBL_NLTYPE_UNLABELED; 156 map6->type = NETLBL_NLTYPE_UNLABELED;
157 ipv6_addr_copy(&map6->list.addr, addr6); 157 ipv6_addr_copy(&map6->list.addr, addr6);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5e14371cda70..75a406d33619 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1089,17 +1089,18 @@ static void handle_reg_beacon(struct wiphy *wiphy,
1089 1089
1090 chan->beacon_found = true; 1090 chan->beacon_found = true;
1091 1091
1092 if (wiphy->disable_beacon_hints)
1093 return;
1094
1092 chan_before.center_freq = chan->center_freq; 1095 chan_before.center_freq = chan->center_freq;
1093 chan_before.flags = chan->flags; 1096 chan_before.flags = chan->flags;
1094 1097
1095 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && 1098 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
1096 !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1097 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1099 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
1098 channel_changed = true; 1100 channel_changed = true;
1099 } 1101 }
1100 1102
1101 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 1103 if (chan->flags & IEEE80211_CHAN_NO_IBSS) {
1102 !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) {
1103 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1104 chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
1104 channel_changed = true; 1105 channel_changed = true;
1105 } 1106 }
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index e37829a49dc4..4e167a8e11be 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -30,7 +30,8 @@ int set_regdom(const struct ieee80211_regdomain *rd);
30 * non-radar 5 GHz channels. 30 * non-radar 5 GHz channels.
31 * 31 *
32 * Drivers do not need to call this, cfg80211 will do it for after a scan 32 * Drivers do not need to call this, cfg80211 will do it for after a scan
33 * on a newly found BSS. 33 * on a newly found BSS. If you cannot make use of this feature you can
34 * set the wiphy->disable_beacon_hints to true.
34 */ 35 */
35int regulatory_hint_found_beacon(struct wiphy *wiphy, 36int regulatory_hint_found_beacon(struct wiphy *wiphy,
36 struct ieee80211_channel *beacon_chan, 37 struct ieee80211_channel *beacon_chan,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 9271118e1fc4..7e595ce24eeb 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -118,7 +118,7 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
118 118
119 if (!ie1 && !ie2) 119 if (!ie1 && !ie2)
120 return 0; 120 return 0;
121 if (!ie1) 121 if (!ie1 || !ie2)
122 return -1; 122 return -1;
123 123
124 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); 124 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1]));
@@ -171,6 +171,8 @@ static bool is_mesh(struct cfg80211_bss *a,
171 ie = find_ie(WLAN_EID_MESH_CONFIG, 171 ie = find_ie(WLAN_EID_MESH_CONFIG,
172 a->information_elements, 172 a->information_elements,
173 a->len_information_elements); 173 a->len_information_elements);
174 if (!ie)
175 return false;
174 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 176 if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
175 return false; 177 return false;
176 178
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 7109e2b5bc0a..d29baa2e063a 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -403,7 +403,6 @@ while (<IN>) {
403 # section found, now is this a start of a function? 403 # section found, now is this a start of a function?
404 } elsif ($read_function && /$function_regex/) { 404 } elsif ($read_function && /$function_regex/) {
405 $text_found = 1; 405 $text_found = 1;
406 $offset = hex $1;
407 $text = $2; 406 $text = $2;
408 407
409 # if this is either a local function or a weak function 408 # if this is either a local function or a weak function
@@ -412,10 +411,12 @@ while (<IN>) {
412 if (!defined($locals{$text}) && !defined($weak{$text})) { 411 if (!defined($locals{$text}) && !defined($weak{$text})) {
413 $ref_func = $text; 412 $ref_func = $text;
414 $read_function = 0; 413 $read_function = 0;
414 $offset = hex $1;
415 } else { 415 } else {
416 # if we already have a function, and this is weak, skip it 416 # if we already have a function, and this is weak, skip it
417 if (!defined($ref_func) || !defined($weak{$text})) { 417 if (!defined($ref_func) && !defined($weak{$text})) {
418 $ref_func = $text; 418 $ref_func = $text;
419 $offset = hex $1;
419 } 420 }
420 } 421 }
421 } elsif ($read_headers && /$mcount_section/) { 422 } elsif ($read_headers && /$mcount_section/) {
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index a5e9b876ca09..4b20fa47c3ab 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -345,7 +345,7 @@ BUILTIN_OBJS += builtin-stat.o
345BUILTIN_OBJS += builtin-top.o 345BUILTIN_OBJS += builtin-top.o
346 346
347PERFLIBS = $(LIB_FILE) 347PERFLIBS = $(LIB_FILE)
348EXTLIBS = -lbfd 348EXTLIBS = -lbfd -liberty
349 349
350# 350#
351# Platform specific tweaks 351# Platform specific tweaks
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index b20a4b6e31b7..ce4f28645e64 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -99,6 +99,7 @@ struct comm_event {
99struct fork_event { 99struct fork_event {
100 struct perf_event_header header; 100 struct perf_event_header header;
101 u32 pid, ppid; 101 u32 pid, ppid;
102 u32 tid, ptid;
102}; 103};
103 104
104struct lost_event { 105struct lost_event {
@@ -252,7 +253,7 @@ static int strcommon(const char *pathname)
252{ 253{
253 int n = 0; 254 int n = 0;
254 255
255 while (pathname[n] == cwd[n] && n < cwdlen) 256 while (n < cwdlen && pathname[n] == cwd[n])
256 ++n; 257 ++n;
257 258
258 return n; 259 return n;
@@ -1608,15 +1609,27 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1608} 1609}
1609 1610
1610static int 1611static int
1611process_fork_event(event_t *event, unsigned long offset, unsigned long head) 1612process_task_event(event_t *event, unsigned long offset, unsigned long head)
1612{ 1613{
1613 struct thread *thread = threads__findnew(event->fork.pid); 1614 struct thread *thread = threads__findnew(event->fork.pid);
1614 struct thread *parent = threads__findnew(event->fork.ppid); 1615 struct thread *parent = threads__findnew(event->fork.ppid);
1615 1616
1616 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 1617 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1617 (void *)(offset + head), 1618 (void *)(offset + head),
1618 (void *)(long)(event->header.size), 1619 (void *)(long)(event->header.size),
1619 event->fork.pid, event->fork.ppid); 1620 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
1621 event->fork.pid, event->fork.tid,
1622 event->fork.ppid, event->fork.ptid);
1623
1624 /*
1625 * A thread clone will have the same PID for both
1626 * parent and child.
1627 */
1628 if (thread == parent)
1629 return 0;
1630
1631 if (event->header.type == PERF_EVENT_EXIT)
1632 return 0;
1620 1633
1621 if (!thread || !parent || thread__fork(thread, parent)) { 1634 if (!thread || !parent || thread__fork(thread, parent)) {
1622 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1635 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
@@ -1706,7 +1719,8 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1706 return process_comm_event(event, offset, head); 1719 return process_comm_event(event, offset, head);
1707 1720
1708 case PERF_EVENT_FORK: 1721 case PERF_EVENT_FORK:
1709 return process_fork_event(event, offset, head); 1722 case PERF_EVENT_EXIT:
1723 return process_task_event(event, offset, head);
1710 1724
1711 case PERF_EVENT_LOST: 1725 case PERF_EVENT_LOST:
1712 return process_lost_event(event, offset, head); 1726 return process_lost_event(event, offset, head);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index c0a423004e15..f139f1ab9333 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -285,6 +285,7 @@ static const char *skip_symbols[] = {
285 "enter_idle", 285 "enter_idle",
286 "exit_idle", 286 "exit_idle",
287 "mwait_idle", 287 "mwait_idle",
288 "mwait_idle_with_hints",
288 "ppc64_runlatch_off", 289 "ppc64_runlatch_off",
289 "pseries_dedicated_idle_sleep", 290 "pseries_dedicated_idle_sleep",
290 NULL 291 NULL
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index c6e5dc0dc82f..2726fe40eb5d 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -318,7 +318,7 @@ char *quote_path_relative(const char *in, int len,
318 strbuf_addch(out, '"'); 318 strbuf_addch(out, '"');
319 if (prefix) { 319 if (prefix) {
320 int off = 0; 320 int off = 0;
321 while (prefix[off] && off < len && prefix[off] == in[off]) 321 while (off < len && prefix[off] && prefix[off] == in[off])
322 if (prefix[off] == '/') { 322 if (prefix[off] == '/') {
323 prefix += off + 1; 323 prefix += off + 1;
324 in += off + 1; 324 in += off + 1;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 28106059bf12..b4fe0579bd6b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -565,7 +565,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
565 goto out_elf_end; 565 goto out_elf_end;
566 566
567 secstrs = elf_getdata(sec_strndx, NULL); 567 secstrs = elf_getdata(sec_strndx, NULL);
568 if (symstrs == NULL) 568 if (secstrs == NULL)
569 goto out_elf_end; 569 goto out_elf_end;
570 570
571 nr_syms = shdr.sh_size / shdr.sh_entsize; 571 nr_syms = shdr.sh_size / shdr.sh_entsize;