aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-12-15 17:13:26 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-12-15 17:13:26 -0500
commit7e1548a597ef7e26d5d62f8be3be6da9e101b26c (patch)
treefe6cbf4d9a3c1afdba04fb276fef0f932403727c /drivers
parent1f7f569c0ae6e619504095eabf796edd712d943d (diff)
parent2619bc327417f549f1c89d5ef9b4a4aa768f41a2 (diff)
Merge branch 'omap3-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap-2.6 into devel
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/ata_piix.c9
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_ninja32.c9
-rw-r--r--drivers/ata/pata_sis.c1
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/firewire/fw-ohci.c11
-rw-r--r--drivers/firewire/fw-transaction.c3
-rw-r--r--drivers/firewire/fw-transaction.h2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c639
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c8
-rw-r--r--drivers/ide/Kconfig14
-rw-r--r--drivers/ide/alim15x3.c2
-rw-r--r--drivers/ide/amd74xx.c11
-rw-r--r--drivers/ide/ide-io.c24
-rw-r--r--drivers/ide/ide-iops.c9
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/pmac.c30
-rw-r--r--drivers/ide/sgiioc4.c6
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c8
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c6
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c4
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/mtd/devices/m25p80.c28
-rw-r--r--drivers/mtd/maps/physmap.c26
-rw-r--r--drivers/mtd/nand/fsl_upm.c8
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c1
-rw-r--r--drivers/mtd/onenand/omap2.c27
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bnx2.c35
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/e1000e/netdev.c1
-rw-r--r--drivers/net/enc28j60.c16
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/phy/vitesse.c64
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/wireless/ath5k/base.c35
-rw-r--r--drivers/net/wireless/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath5k/debug.c10
-rw-r--r--drivers/net/wireless/ath9k/beacon.c10
-rw-r--r--drivers/net/wireless/ath9k/recv.c19
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/pci/pcie/aspm.c29
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/pci/slot.c1
-rw-r--r--drivers/rtc/rtc-ds1672.c6
-rw-r--r--drivers/rtc/rtc-max6900.c6
-rw-r--r--drivers/rtc/rtc-starfire.c66
-rw-r--r--drivers/rtc/rtc-twl4030.c2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c20
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c23
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/serial/ioc3_serial.c6
-rw-r--r--drivers/serial/mpc52xx_uart.c4
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/usb/host/ohci-omap.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/video/aty/radeon_accel.c3
-rw-r--r--drivers/video/aty/radeon_base.c6
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/macfb.c74
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c4
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c31
-rw-r--r--drivers/watchdog/iTCO_wdt.c164
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
107 files changed, 1099 insertions, 712 deletions
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a0a178dd189c..1423b0c0cd2e 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
174 break; 174 break;
175 case POWER_SUPPLY_PROP_CURRENT_NOW: 175 case POWER_SUPPLY_PROP_CURRENT_NOW:
176 val->intval = battery->current_now * 1000; 176 val->intval = battery->current_now * 1000;
177 /* if power units are mW, convert to mA by
178 dividing by current voltage (mV/1000) */
179 if (!battery->power_unit) {
180 if (battery->voltage_now) {
181 val->intval /= battery->voltage_now;
182 val->intval *= 1000;
183 } else
184 val->intval = -1;
185 }
186 break; 177 break;
187 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 178 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
188 case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: 179 case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 78fbec8ceda0..421b7c71e72d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,7 +153,7 @@ config SATA_PROMISE
153 If unsure, say N. 153 If unsure, say N.
154 154
155config SATA_SX4 155config SATA_SX4
156 tristate "Promise SATA SX4 support" 156 tristate "Promise SATA SX4 support (Experimental)"
157 depends on PCI && EXPERIMENTAL 157 depends on PCI && EXPERIMENTAL
158 help 158 help
159 This option enables support for Promise Serial ATA SX4. 159 This option enables support for Promise Serial ATA SX4.
@@ -219,8 +219,8 @@ config PATA_ACPI
219 otherwise unsupported hardware. 219 otherwise unsupported hardware.
220 220
221config PATA_ALI 221config PATA_ALI
222 tristate "ALi PATA support (Experimental)" 222 tristate "ALi PATA support"
223 depends on PCI && EXPERIMENTAL 223 depends on PCI
224 help 224 help
225 This option enables support for the ALi ATA interfaces 225 This option enables support for the ALi ATA interfaces
226 found on the many ALi chipsets. 226 found on the many ALi chipsets.
@@ -263,7 +263,7 @@ config PATA_ATIIXP
263 If unsure, say N. 263 If unsure, say N.
264 264
265config PATA_CMD640_PCI 265config PATA_CMD640_PCI
266 tristate "CMD640 PCI PATA support (Very Experimental)" 266 tristate "CMD640 PCI PATA support (Experimental)"
267 depends on PCI && EXPERIMENTAL 267 depends on PCI && EXPERIMENTAL
268 help 268 help
269 This option enables support for the CMD640 PCI IDE 269 This option enables support for the CMD640 PCI IDE
@@ -291,8 +291,8 @@ config PATA_CS5520
291 If unsure, say N. 291 If unsure, say N.
292 292
293config PATA_CS5530 293config PATA_CS5530
294 tristate "CS5530 PATA support (Experimental)" 294 tristate "CS5530 PATA support"
295 depends on PCI && EXPERIMENTAL 295 depends on PCI
296 help 296 help
297 This option enables support for the Cyrix/NatSemi/AMD CS5530 297 This option enables support for the Cyrix/NatSemi/AMD CS5530
298 companion chip used with the MediaGX/Geode processor family. 298 companion chip used with the MediaGX/Geode processor family.
@@ -309,8 +309,8 @@ config PATA_CS5535
309 If unsure, say N. 309 If unsure, say N.
310 310
311config PATA_CS5536 311config PATA_CS5536
312 tristate "CS5536 PATA support (Experimental)" 312 tristate "CS5536 PATA support"
313 depends on PCI && X86 && !X86_64 && EXPERIMENTAL 313 depends on PCI && X86 && !X86_64
314 help 314 help
315 This option enables support for the AMD CS5536 315 This option enables support for the AMD CS5536
316 companion chip used with the Geode LX processor family. 316 companion chip used with the Geode LX processor family.
@@ -363,7 +363,7 @@ config PATA_HPT37X
363 If unsure, say N. 363 If unsure, say N.
364 364
365config PATA_HPT3X2N 365config PATA_HPT3X2N
366 tristate "HPT 372N/302N PATA support (Very Experimental)" 366 tristate "HPT 372N/302N PATA support (Experimental)"
367 depends on PCI && EXPERIMENTAL 367 depends on PCI && EXPERIMENTAL
368 help 368 help
369 This option enables support for the N variant HPT PATA 369 This option enables support for the N variant HPT PATA
@@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
389 problems with DMA on this chipset. 389 problems with DMA on this chipset.
390 390
391config PATA_ISAPNP 391config PATA_ISAPNP
392 tristate "ISA Plug and Play PATA support (Experimental)" 392 tristate "ISA Plug and Play PATA support"
393 depends on EXPERIMENTAL && ISAPNP 393 depends on ISAPNP
394 help 394 help
395 This option enables support for ISA plug & play ATA 395 This option enables support for ISA plug & play ATA
396 controllers such as those found on old soundcards. 396 controllers such as those found on old soundcards.
@@ -498,8 +498,8 @@ config PATA_NINJA32
498 If unsure, say N. 498 If unsure, say N.
499 499
500config PATA_NS87410 500config PATA_NS87410
501 tristate "Nat Semi NS87410 PATA support (Experimental)" 501 tristate "Nat Semi NS87410 PATA support"
502 depends on PCI && EXPERIMENTAL 502 depends on PCI
503 help 503 help
504 This option enables support for the National Semiconductor 504 This option enables support for the National Semiconductor
505 NS87410 PCI-IDE controller. 505 NS87410 PCI-IDE controller.
@@ -507,8 +507,8 @@ config PATA_NS87410
507 If unsure, say N. 507 If unsure, say N.
508 508
509config PATA_NS87415 509config PATA_NS87415
510 tristate "Nat Semi NS87415 PATA support (Experimental)" 510 tristate "Nat Semi NS87415 PATA support"
511 depends on PCI && EXPERIMENTAL 511 depends on PCI
512 help 512 help
513 This option enables support for the National Semiconductor 513 This option enables support for the National Semiconductor
514 NS87415 PCI-IDE controller. 514 NS87415 PCI-IDE controller.
@@ -544,8 +544,8 @@ config PATA_PCMCIA
544 If unsure, say N. 544 If unsure, say N.
545 545
546config PATA_PDC_OLD 546config PATA_PDC_OLD
547 tristate "Older Promise PATA controller support (Experimental)" 547 tristate "Older Promise PATA controller support"
548 depends on PCI && EXPERIMENTAL 548 depends on PCI
549 help 549 help
550 This option enables support for the Promise 20246, 20262, 20263, 550 This option enables support for the Promise 20246, 20262, 20263,
551 20265 and 20267 adapters. 551 20265 and 20267 adapters.
@@ -559,7 +559,7 @@ config PATA_QDI
559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus. 559 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
560 560
561config PATA_RADISYS 561config PATA_RADISYS
562 tristate "RADISYS 82600 PATA support (Very Experimental)" 562 tristate "RADISYS 82600 PATA support (Experimental)"
563 depends on PCI && EXPERIMENTAL 563 depends on PCI && EXPERIMENTAL
564 help 564 help
565 This option enables support for the RADISYS 82600 565 This option enables support for the RADISYS 82600
@@ -586,8 +586,8 @@ config PATA_RZ1000
586 If unsure, say N. 586 If unsure, say N.
587 587
588config PATA_SC1200 588config PATA_SC1200
589 tristate "SC1200 PATA support (Very Experimental)" 589 tristate "SC1200 PATA support"
590 depends on PCI && EXPERIMENTAL 590 depends on PCI
591 help 591 help
592 This option enables support for the NatSemi/AMD SC1200 SoC 592 This option enables support for the NatSemi/AMD SC1200 SoC
593 companion chip used with the Geode processor family. 593 companion chip used with the Geode processor family.
@@ -620,8 +620,8 @@ config PATA_SIL680
620 If unsure, say N. 620 If unsure, say N.
621 621
622config PATA_SIS 622config PATA_SIS
623 tristate "SiS PATA support (Experimental)" 623 tristate "SiS PATA support"
624 depends on PCI && EXPERIMENTAL 624 depends on PCI
625 help 625 help
626 This option enables support for SiS PATA controllers 626 This option enables support for SiS PATA controllers
627 627
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d6d97d8f3fa4..c11936e13dd3 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
1072 * matching is necessary because dmi_system_id.matches is 1072 * matching is necessary because dmi_system_id.matches is
1073 * limited to four entries. 1073 * limited to four entries.
1074 */ 1074 */
1075 if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") && 1075 if (dmi_get_system_info(DMI_SYS_VENDOR) &&
1076 dmi_get_system_info(DMI_PRODUCT_NAME) &&
1077 dmi_get_system_info(DMI_PRODUCT_VERSION) &&
1078 dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
1079 dmi_get_system_info(DMI_BOARD_VENDOR) &&
1080 dmi_get_system_info(DMI_BOARD_NAME) &&
1081 dmi_get_system_info(DMI_BOARD_VERSION) &&
1082 !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
1076 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") && 1083 !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
1077 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") && 1084 !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
1078 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") && 1085 !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index f2b83eabc7c7..a098ba8eaab6 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
382 /* PCI clocking determines the ATA timing values to use */ 382 /* PCI clocking determines the ATA timing values to use */
383 /* info_hpt366 is safe against re-entry so we can scribble on it */ 383 /* info_hpt366 is safe against re-entry so we can scribble on it */
384 switch((reg1 & 0x700) >> 8) { 384 switch((reg1 & 0x700) >> 8) {
385 case 5: 385 case 9:
386 hpriv = &hpt366_40; 386 hpriv = &hpt366_40;
387 break; 387 break;
388 case 9: 388 case 5:
389 hpriv = &hpt366_25; 389 hpriv = &hpt366_25;
390 break; 390 break;
391 default: 391 default:
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 4e466eae8b46..4dd9a3b031e4 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "pata_ninja32" 46#define DRV_NAME "pata_ninja32"
47#define DRV_VERSION "0.1.1" 47#define DRV_VERSION "0.1.3"
48 48
49 49
50/** 50/**
@@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
130 return rc; 130 return rc;
131 pci_set_master(dev); 131 pci_set_master(dev);
132 132
133 /* Set up the register mappings */ 133 /* Set up the register mappings. We use the I/O mapping as only the
134 older chips also have MMIO on BAR 1 */
134 base = host->iomap[0]; 135 base = host->iomap[0];
135 if (!base) 136 if (!base)
136 return -ENOMEM; 137 return -ENOMEM;
@@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
167#endif 168#endif
168 169
169static const struct pci_device_id ninja32[] = { 170static const struct pci_device_id ninja32[] = {
171 { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
172 { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
173 { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
170 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 174 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
171 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 175 { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
176 { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
172 { }, 177 { },
173}; 178};
174 179
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index d34236611752..e4be55e047f6 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ 56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ 57 { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ 58 { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
59 { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
60 /* end marker */ 59 /* end marker */
61 { 0, } 60 { 0, }
62}; 61};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 615412364e99..6b969f8c684f 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2705,7 +2705,7 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_
2705 2705
2706 /* XXX DEV_LABEL is a guess */ 2706 /* XXX DEV_LABEL is a guess */
2707 if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) { 2707 if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
2708 return -EINVAL; 2708 err = -EINVAL;
2709 goto out_disable; 2709 goto out_disable;
2710 } 2710 }
2711 2711
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f20bf359b84f..dc7a8c352da2 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) 302static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
303{ 303{
304 if (class_pktcdvd) { 304 if (class_pktcdvd) {
305 pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL, 305 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
306 "%s", pd->name); 306 "%s", pd->name);
307 if (IS_ERR(pd->dev)) 307 if (IS_ERR(pd->dev))
308 pd->dev = NULL; 308 pd->dev = NULL;
@@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2790 return 0; 2790 return 0;
2791 2791
2792out_mem: 2792out_mem:
2793 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 2793 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2794 /* This is safe: open() is still holding a reference. */ 2794 /* This is safe: open() is still holding a reference. */
2795 module_put(THIS_MODULE); 2795 module_put(THIS_MODULE);
2796 return ret; 2796 return ret;
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
2975 pkt_debugfs_dev_remove(pd); 2975 pkt_debugfs_dev_remove(pd);
2976 pkt_sysfs_dev_remove(pd); 2976 pkt_sysfs_dev_remove(pd);
2977 2977
2978 blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE); 2978 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2979 2979
2980 remove_proc_entry(pd->name, pkt_proc); 2980 remove_proc_entry(pd->name, pkt_proc);
2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); 2981 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 3b23270eaa65..a8f15e6be594 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
418 TTY_OVERRUN); 418 TTY_OVERRUN);
419 /* 419 /*
420 If the flip buffer itself is 420 If the flip buffer itself is
421 overflowing, we still loose 421 overflowing, we still lose
422 the next incoming character. 422 the next incoming character.
423 */ 423 */
424 if (tty_buffer_request_room(tty, 1) != 424 if (tty_buffer_request_room(tty, 1) !=
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 46610b090415..ab9c01e462ef 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
974 packet->ack = RCODE_SEND_ERROR; 974 packet->ack = RCODE_SEND_ERROR;
975 return -1; 975 return -1;
976 } 976 }
977 packet->payload_bus = payload_bus;
977 978
978 d[2].req_count = cpu_to_le16(packet->payload_length); 979 d[2].req_count = cpu_to_le16(packet->payload_length);
979 d[2].data_address = cpu_to_le32(payload_bus); 980 d[2].data_address = cpu_to_le32(payload_bus);
@@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context,
1025 struct driver_data *driver_data; 1026 struct driver_data *driver_data;
1026 struct fw_packet *packet; 1027 struct fw_packet *packet;
1027 struct fw_ohci *ohci = context->ohci; 1028 struct fw_ohci *ohci = context->ohci;
1028 dma_addr_t payload_bus;
1029 int evt; 1029 int evt;
1030 1030
1031 if (last->transfer_status == 0) 1031 if (last->transfer_status == 0)
@@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context,
1038 /* This packet was cancelled, just continue. */ 1038 /* This packet was cancelled, just continue. */
1039 return 1; 1039 return 1;
1040 1040
1041 payload_bus = le32_to_cpu(last->data_address); 1041 if (packet->payload_bus)
1042 if (payload_bus != 0) 1042 dma_unmap_single(ohci->card.device, packet->payload_bus,
1043 dma_unmap_single(ohci->card.device, payload_bus,
1044 packet->payload_length, DMA_TO_DEVICE); 1043 packet->payload_length, DMA_TO_DEVICE);
1045 1044
1046 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1045 evt = le16_to_cpu(last->transfer_status) & 0x1f;
@@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1697 if (packet->ack != 0) 1696 if (packet->ack != 0)
1698 goto out; 1697 goto out;
1699 1698
1699 if (packet->payload_bus)
1700 dma_unmap_single(ohci->card.device, packet->payload_bus,
1701 packet->payload_length, DMA_TO_DEVICE);
1702
1700 log_ar_at_event('T', packet->speed, packet->header, 0x20); 1703 log_ar_at_event('T', packet->speed, packet->header, 0x20);
1701 driver_data->packet = NULL; 1704 driver_data->packet = NULL;
1702 packet->ack = RCODE_CANCELLED; 1705 packet->ack = RCODE_CANCELLED;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 022ac4fabb67..2884f876397b 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
207 packet->speed = speed; 207 packet->speed = speed;
208 packet->generation = generation; 208 packet->generation = generation;
209 packet->ack = 0; 209 packet->ack = 0;
210 packet->payload_bus = 0;
210} 211}
211 212
212/** 213/**
@@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
581 BUG(); 582 BUG();
582 return; 583 return;
583 } 584 }
585
586 response->payload_bus = 0;
584} 587}
585EXPORT_SYMBOL(fw_fill_response); 588EXPORT_SYMBOL(fw_fill_response);
586 589
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index aed7dbb17cda..839466f0a795 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/spinlock_types.h> 28#include <linux/spinlock_types.h>
29#include <linux/timer.h> 29#include <linux/timer.h>
30#include <linux/types.h>
30#include <linux/workqueue.h> 31#include <linux/workqueue.h>
31 32
32#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 33#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@@ -153,6 +154,7 @@ struct fw_packet {
153 size_t header_length; 154 size_t header_length;
154 void *payload; 155 void *payload;
155 size_t payload_length; 156 size_t payload_length;
157 dma_addr_t payload_bus;
156 u32 timestamp; 158 u32 timestamp;
157 159
158 /* 160 /*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba89b42f790a..553dd4bc3075 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
847 * and the registers being closely associated. 847 * and the registers being closely associated.
848 * 848 *
849 * According to chipset errata, on the 965GM, MSI interrupts may 849 * According to chipset errata, on the 965GM, MSI interrupts may
850 * be lost or delayed 850 * be lost or delayed, but we use them anyways to avoid
851 * stuck interrupts on some machines.
851 */ 852 */
852 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 853 if (!IS_I945G(dev) && !IS_I945GM(dev))
853 pci_enable_msi(dev->pdev); 854 pci_enable_msi(dev->pdev);
854 855
855 intel_opregion_init(dev); 856 intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0ec..adc972cc6bfc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
244 * List of objects currently involved in rendering from the 244 * List of objects currently involved in rendering from the
245 * ringbuffer. 245 * ringbuffer.
246 * 246 *
247 * Includes buffers having the contents of their GPU caches
248 * flushed, not necessarily primitives. last_rendering_seqno
249 * represents when the rendering involved will be completed.
250 *
247 * A reference is held on the buffer while on this list. 251 * A reference is held on the buffer while on this list.
248 */ 252 */
249 struct list_head active_list; 253 struct list_head active_list;
@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
253 * still have a write_domain which needs to be flushed before 257 * still have a write_domain which needs to be flushed before
254 * unbinding. 258 * unbinding.
255 * 259 *
260 * last_rendering_seqno is 0 while an object is in this list.
261 *
256 * A reference is held on the buffer while on this list. 262 * A reference is held on the buffer while on this list.
257 */ 263 */
258 struct list_head flushing_list; 264 struct list_head flushing_list;
@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
261 * LRU list of objects which are not in the ringbuffer and 267 * LRU list of objects which are not in the ringbuffer and
262 * are ready to unbind, but are still in the GTT. 268 * are ready to unbind, but are still in the GTT.
263 * 269 *
270 * last_rendering_seqno is 0 while an object is in this list.
271 *
264 * A reference is not held on the buffer while on this list, 272 * A reference is not held on the buffer while on this list,
265 * as merely being GTT-bound shouldn't prevent its being 273 * as merely being GTT-bound shouldn't prevent its being
266 * freed, and we'll pull it off the list in the free path. 274 * freed, and we'll pull it off the list in the free path.
@@ -371,8 +379,8 @@ struct drm_i915_gem_object {
371 uint32_t agp_type; 379 uint32_t agp_type;
372 380
373 /** 381 /**
374 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 382 * If present, while GEM_DOMAIN_CPU is in the read domain this array
375 * GEM_DOMAIN_CPU is not in the object's read domain. 383 * flags which individual pages are valid.
376 */ 384 */
377 uint8_t *page_cpu_valid; 385 uint8_t *page_cpu_valid;
378}; 386};
@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
394 /** Time at which this request was emitted, in jiffies. */ 402 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies; 403 unsigned long emitted_jiffies;
396 404
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list; 405 struct list_head list;
401}; 406};
402 407
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f8..ad672d854828 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,21 +33,21 @@
33 33
34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35 35
36static int 36static void
37i915_gem_object_set_domain(struct drm_gem_object *obj, 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38 uint32_t read_domains, 38 uint32_t read_domains,
39 uint32_t write_domain); 39 uint32_t write_domain);
40static int 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41i915_gem_object_set_domain_range(struct drm_gem_object *obj, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint64_t offset, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint64_t size, 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44 uint32_t read_domains, 44 int write);
45 uint32_t write_domain); 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46static int 46 int write);
47i915_gem_set_domain(struct drm_gem_object *obj, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 struct drm_file *file_priv, 48 uint64_t offset,
49 uint32_t read_domains, 49 uint64_t size);
50 uint32_t write_domain); 50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
52static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
162 162
163 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
164 164
165 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
166 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
167 if (ret != 0) { 167 if (ret != 0) {
168 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
169 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
260 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
261 return ret; 261 return ret;
262 } 262 }
263 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
264 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
265 if (ret) 264 if (ret)
266 goto fail; 265 goto fail;
267 266
@@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
320 319
321 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
322 321
323 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
324 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
325 if (ret) { 323 if (ret) {
326 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
327 return ret; 325 return ret;
@@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
397} 395}
398 396
399/** 397/**
400 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
401 */ 400 */
402int 401int
403i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
405{ 404{
406 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
407 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
408 int ret; 409 int ret;
409 410
410 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
411 return -ENODEV; 412 return -ENODEV;
412 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
413 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
414 if (obj == NULL) 428 if (obj == NULL)
415 return -EBADF; 429 return -EBADF;
@@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
417 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
418#if WATCH_BUF 432#if WATCH_BUF
419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
420 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
421#endif 435#endif
422 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
423 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
424 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
426 return ret; 451 return ret;
@@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
455 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
456 481
457 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
458 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
459 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
460 drm_agp_chipset_flush(dev); 485
461 }
462 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
463 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
464 return ret; 488 return ret;
@@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
532} 556}
533 557
534static void 558static void
535i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
536{ 560{
537 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
538 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
546 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
548 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
549} 574}
550 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
551 587
552static void 588static void
553i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
562 else 598 else
563 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
564 600
601 obj_priv->last_rendering_seqno = 0;
565 if (obj_priv->active) { 602 if (obj_priv->active) {
566 obj_priv->active = 0; 603 obj_priv->active = 0;
567 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
610 647
611 request->seqno = seqno; 648 request->seqno = seqno;
612 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
613 request->flush_domains = flush_domains;
614 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
615 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
616 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
617 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
618 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
619 return seqno; 674 return seqno;
@@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
676 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
677#endif 732#endif
678 733
679 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
680 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
681 &dev_priv->mm.flushing_list); 736 else
682 } else {
683 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
684 }
685 }
686
687 if (request->flush_domains != 0) {
688 struct drm_i915_gem_object *obj_priv, *next;
689
690 /* Clear the write domain and activity from any buffers
691 * that are just waiting for a flush matching the one retired.
692 */
693 list_for_each_entry_safe(obj_priv, next,
694 &dev_priv->mm.flushing_list, list) {
695 struct drm_gem_object *obj = obj_priv->obj;
696
697 if (obj->write_domain & request->flush_domains) {
698 obj->write_domain = 0;
699 i915_gem_object_move_to_inactive(obj);
700 }
701 }
702
703 } 738 }
704} 739}
705 740
@@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
892 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
893 int ret; 928 int ret;
894 929
895 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
896 * create a new seqno to wait for. 931 * not for emitting required flushes.
897 */ 932 */
898 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
899 uint32_t write_domain = obj->write_domain;
900#if WATCH_BUF
901 DRM_INFO("%s: flushing object %p from write domain %08x\n",
902 __func__, obj, write_domain);
903#endif
904 i915_gem_flush(dev, 0, write_domain);
905
906 i915_gem_object_move_to_active(obj);
907 obj_priv->last_rendering_seqno = i915_add_request(dev,
908 write_domain);
909 BUG_ON(obj_priv->last_rendering_seqno == 0);
910#if WATCH_LRU
911 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
912#endif
913 }
914 934
915 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
916 * it. 936 * it.
@@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
950 return -EINVAL; 970 return -EINVAL;
951 } 971 }
952 972
953 /* Wait for any rendering to complete
954 */
955 ret = i915_gem_object_wait_rendering(obj);
956 if (ret) {
957 DRM_ERROR("wait_rendering failed: %d\n", ret);
958 return ret;
959 }
960
961 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
962 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
963 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
964 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
965 * before we unbind. 977 * before we unbind.
966 */ 978 */
967 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
968 I915_GEM_DOMAIN_CPU);
969 if (ret) { 980 if (ret) {
970 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
971 return ret; 983 return ret;
972 } 984 }
973 985
@@ -1083,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
1083} 1095}
1084 1096
1085static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 if (ret == -ENOMEM)
1108 return 0;
1109 return ret;
1110}
1111
1112static int
1086i915_gem_object_get_page_list(struct drm_gem_object *obj) 1113i915_gem_object_get_page_list(struct drm_gem_object *obj)
1087{ 1114{
1088 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1115 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1168 1195
1169 ret = i915_gem_evict_something(dev); 1196 ret = i915_gem_evict_something(dev);
1170 if (ret != 0) { 1197 if (ret != 0) {
1171 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1198 if (ret != -ERESTARTSYS)
1199 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1172 return ret; 1200 return ret;
1173 } 1201 }
1174 goto search_free; 1202 goto search_free;
@@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1228 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1256 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1229} 1257}
1230 1258
1259/** Flushes any GPU write domain for the object if it's dirty. */
1260static void
1261i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1262{
1263 struct drm_device *dev = obj->dev;
1264 uint32_t seqno;
1265
1266 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1267 return;
1268
1269 /* Queue the GPU write cache flushing we need. */
1270 i915_gem_flush(dev, 0, obj->write_domain);
1271 seqno = i915_add_request(dev, obj->write_domain);
1272 obj->write_domain = 0;
1273 i915_gem_object_move_to_active(obj, seqno);
1274}
1275
1276/** Flushes the GTT write domain for the object if it's dirty. */
1277static void
1278i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1279{
1280 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1281 return;
1282
1283 /* No actual flushing is required for the GTT write domain. Writes
1284 * to it immediately go to main memory as far as we know, so there's
1285 * no chipset flush. It also doesn't land in render cache.
1286 */
1287 obj->write_domain = 0;
1288}
1289
1290/** Flushes the CPU write domain for the object if it's dirty. */
1291static void
1292i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1293{
1294 struct drm_device *dev = obj->dev;
1295
1296 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1297 return;
1298
1299 i915_gem_clflush_object(obj);
1300 drm_agp_chipset_flush(dev);
1301 obj->write_domain = 0;
1302}
1303
1304/**
1305 * Moves a single object to the GTT read, and possibly write domain.
1306 *
1307 * This function returns when the move is complete, including waiting on
1308 * flushes to occur.
1309 */
1310static int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1314 int ret;
1315
1316 /* Not valid to be called on unbound objects. */
1317 if (obj_priv->gtt_space == NULL)
1318 return -EINVAL;
1319
1320 i915_gem_object_flush_gpu_write_domain(obj);
1321 /* Wait on any GPU rendering and flushing to occur. */
1322 ret = i915_gem_object_wait_rendering(obj);
1323 if (ret != 0)
1324 return ret;
1325
1326 /* If we're writing through the GTT domain, then CPU and GPU caches
1327 * will need to be invalidated at next use.
1328 */
1329 if (write)
1330 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1331
1332 i915_gem_object_flush_cpu_write_domain(obj);
1333
1334 /* It should now be out of any other write domains, and we can update
1335 * the domain values for our changes.
1336 */
1337 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1338 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1339 if (write) {
1340 obj->write_domain = I915_GEM_DOMAIN_GTT;
1341 obj_priv->dirty = 1;
1342 }
1343
1344 return 0;
1345}
1346
1347/**
1348 * Moves a single object to the CPU read, and possibly write domain.
1349 *
1350 * This function returns when the move is complete, including waiting on
1351 * flushes to occur.
1352 */
1353static int
1354i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1355{
1356 struct drm_device *dev = obj->dev;
1357 int ret;
1358
1359 i915_gem_object_flush_gpu_write_domain(obj);
1360 /* Wait on any GPU rendering and flushing to occur. */
1361 ret = i915_gem_object_wait_rendering(obj);
1362 if (ret != 0)
1363 return ret;
1364
1365 i915_gem_object_flush_gtt_write_domain(obj);
1366
1367 /* If we have a partially-valid cache of the object in the CPU,
1368 * finish invalidating it and free the per-page flags.
1369 */
1370 i915_gem_object_set_to_full_cpu_read_domain(obj);
1371
1372 /* Flush the CPU cache if it's still invalid. */
1373 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1374 i915_gem_clflush_object(obj);
1375 drm_agp_chipset_flush(dev);
1376
1377 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1378 }
1379
1380 /* It should now be out of any other write domains, and we can update
1381 * the domain values for our changes.
1382 */
1383 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1384
1385 /* If we're writing through the CPU, then the GPU read domains will
1386 * need to be invalidated at next use.
1387 */
1388 if (write) {
1389 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1390 obj->write_domain = I915_GEM_DOMAIN_CPU;
1391 }
1392
1393 return 0;
1394}
1395
1231/* 1396/*
1232 * Set the next domain for the specified object. This 1397 * Set the next domain for the specified object. This
1233 * may not actually perform the necessary flushing/invaliding though, 1398 * may not actually perform the necessary flushing/invaliding though,
@@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1339 * MI_FLUSH 1504 * MI_FLUSH
1340 * drm_agp_chipset_flush 1505 * drm_agp_chipset_flush
1341 */ 1506 */
1342static int 1507static void
1343i915_gem_object_set_domain(struct drm_gem_object *obj, 1508i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1344 uint32_t read_domains, 1509 uint32_t read_domains,
1345 uint32_t write_domain) 1510 uint32_t write_domain)
1346{ 1511{
1347 struct drm_device *dev = obj->dev; 1512 struct drm_device *dev = obj->dev;
1348 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1513 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1349 uint32_t invalidate_domains = 0; 1514 uint32_t invalidate_domains = 0;
1350 uint32_t flush_domains = 0; 1515 uint32_t flush_domains = 0;
1351 int ret; 1516
1517 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1518 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1352 1519
1353#if WATCH_BUF 1520#if WATCH_BUF
1354 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1521 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1385 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1552 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1386 __func__, flush_domains, invalidate_domains); 1553 __func__, flush_domains, invalidate_domains);
1387#endif 1554#endif
1388 /*
1389 * If we're invaliding the CPU cache and flushing a GPU cache,
1390 * then pause for rendering so that the GPU caches will be
1391 * flushed before the cpu cache is invalidated
1392 */
1393 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1394 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1395 I915_GEM_DOMAIN_GTT))) {
1396 ret = i915_gem_object_wait_rendering(obj);
1397 if (ret)
1398 return ret;
1399 }
1400 i915_gem_clflush_object(obj); 1555 i915_gem_clflush_object(obj);
1401 } 1556 }
1402 1557
1403 if ((write_domain | flush_domains) != 0) 1558 if ((write_domain | flush_domains) != 0)
1404 obj->write_domain = write_domain; 1559 obj->write_domain = write_domain;
1405
1406 /* If we're invalidating the CPU domain, clear the per-page CPU
1407 * domain list as well.
1408 */
1409 if (obj_priv->page_cpu_valid != NULL &&
1410 (write_domain != 0 ||
1411 read_domains & I915_GEM_DOMAIN_CPU)) {
1412 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1413 DRM_MEM_DRIVER);
1414 obj_priv->page_cpu_valid = NULL;
1415 }
1416 obj->read_domains = read_domains; 1560 obj->read_domains = read_domains;
1417 1561
1418 dev->invalidate_domains |= invalidate_domains; 1562 dev->invalidate_domains |= invalidate_domains;
@@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1423 obj->read_domains, obj->write_domain, 1567 obj->read_domains, obj->write_domain,
1424 dev->invalidate_domains, dev->flush_domains); 1568 dev->invalidate_domains, dev->flush_domains);
1425#endif 1569#endif
1426 return 0;
1427} 1570}
1428 1571
1429/** 1572/**
1430 * Set the read/write domain on a range of the object. 1573 * Moves the object from a partially CPU read to a full one.
1431 * 1574 *
1432 * Currently only implemented for CPU reads, otherwise drops to normal 1575 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1433 * i915_gem_object_set_domain(). 1576 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1434 */ 1577 */
1435static int 1578static void
1436i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1437 uint64_t offset,
1438 uint64_t size,
1439 uint32_t read_domains,
1440 uint32_t write_domain)
1441{ 1580{
1581 struct drm_device *dev = obj->dev;
1442 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1582 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1443 int ret, i;
1444 1583
1445 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1584 if (!obj_priv->page_cpu_valid)
1446 return 0; 1585 return;
1447 1586
1448 if (read_domains != I915_GEM_DOMAIN_CPU || 1587 /* If we're partially in the CPU read domain, finish moving it in.
1449 write_domain != 0) 1588 */
1450 return i915_gem_object_set_domain(obj, 1589 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1451 read_domains, write_domain); 1590 int i;
1452 1591
1453 /* Wait on any GPU rendering to the object to be flushed. */ 1592 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1593 if (obj_priv->page_cpu_valid[i])
1594 continue;
1595 drm_clflush_pages(obj_priv->page_list + i, 1);
1596 }
1597 drm_agp_chipset_flush(dev);
1598 }
1599
1600 /* Free the page_cpu_valid mappings which are now stale, whether
1601 * or not we've got I915_GEM_DOMAIN_CPU.
1602 */
1603 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1604 DRM_MEM_DRIVER);
1605 obj_priv->page_cpu_valid = NULL;
1606}
1607
1608/**
1609 * Set the CPU read domain on a range of the object.
1610 *
1611 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1612 * not entirely valid. The page_cpu_valid member of the object flags which
1613 * pages have been flushed, and will be respected by
1614 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1615 * of the whole object.
1616 *
1617 * This function returns when the move is complete, including waiting on
1618 * flushes to occur.
1619 */
1620static int
1621i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1622 uint64_t offset, uint64_t size)
1623{
1624 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1625 int i, ret;
1626
1627 if (offset == 0 && size == obj->size)
1628 return i915_gem_object_set_to_cpu_domain(obj, 0);
1629
1630 i915_gem_object_flush_gpu_write_domain(obj);
1631 /* Wait on any GPU rendering and flushing to occur. */
1454 ret = i915_gem_object_wait_rendering(obj); 1632 ret = i915_gem_object_wait_rendering(obj);
1455 if (ret) 1633 if (ret != 0)
1456 return ret; 1634 return ret;
1635 i915_gem_object_flush_gtt_write_domain(obj);
1457 1636
1637 /* If we're already fully in the CPU read domain, we're done. */
1638 if (obj_priv->page_cpu_valid == NULL &&
1639 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1640 return 0;
1641
1642 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1643 * newly adding I915_GEM_DOMAIN_CPU
1644 */
1458 if (obj_priv->page_cpu_valid == NULL) { 1645 if (obj_priv->page_cpu_valid == NULL) {
1459 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1646 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1460 DRM_MEM_DRIVER); 1647 DRM_MEM_DRIVER);
1461 } 1648 if (obj_priv->page_cpu_valid == NULL)
1649 return -ENOMEM;
1650 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1651 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1462 1652
1463 /* Flush the cache on any pages that are still invalid from the CPU's 1653 /* Flush the cache on any pages that are still invalid from the CPU's
1464 * perspective. 1654 * perspective.
1465 */ 1655 */
1466 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1656 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1657 i++) {
1467 if (obj_priv->page_cpu_valid[i]) 1658 if (obj_priv->page_cpu_valid[i])
1468 continue; 1659 continue;
1469 1660
@@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1472 obj_priv->page_cpu_valid[i] = 1; 1663 obj_priv->page_cpu_valid[i] = 1;
1473 } 1664 }
1474 1665
1475 return 0; 1666 /* It should now be out of any other write domains, and we can update
1476} 1667 * the domain values for our changes.
1477
1478/**
1479 * Once all of the objects have been set in the proper domain,
1480 * perform the necessary flush and invalidate operations.
1481 *
1482 * Returns the write domains flushed, for use in flush tracking.
1483 */
1484static uint32_t
1485i915_gem_dev_set_domain(struct drm_device *dev)
1486{
1487 uint32_t flush_domains = dev->flush_domains;
1488
1489 /*
1490 * Now that all the buffers are synced to the proper domains,
1491 * flush and invalidate the collected domains
1492 */ 1668 */
1493 if (dev->invalidate_domains | dev->flush_domains) { 1669 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1494#if WATCH_EXEC
1495 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1496 __func__,
1497 dev->invalidate_domains,
1498 dev->flush_domains);
1499#endif
1500 i915_gem_flush(dev,
1501 dev->invalidate_domains,
1502 dev->flush_domains);
1503 dev->invalidate_domains = 0;
1504 dev->flush_domains = 0;
1505 }
1506 1670
1507 return flush_domains; 1671 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1672
1673 return 0;
1508} 1674}
1509 1675
1510/** 1676/**
@@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1585 return -EINVAL; 1751 return -EINVAL;
1586 } 1752 }
1587 1753
1754 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1755 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1756 DRM_ERROR("reloc with read/write CPU domains: "
1757 "obj %p target %d offset %d "
1758 "read %08x write %08x",
1759 obj, reloc.target_handle,
1760 (int) reloc.offset,
1761 reloc.read_domains,
1762 reloc.write_domain);
1763 return -EINVAL;
1764 }
1765
1588 if (reloc.write_domain && target_obj->pending_write_domain && 1766 if (reloc.write_domain && target_obj->pending_write_domain &&
1589 reloc.write_domain != target_obj->pending_write_domain) { 1767 reloc.write_domain != target_obj->pending_write_domain) {
1590 DRM_ERROR("Write domain conflict: " 1768 DRM_ERROR("Write domain conflict: "
@@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1625 continue; 1803 continue;
1626 } 1804 }
1627 1805
1628 /* Now that we're going to actually write some data in, 1806 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1629 * make sure that any rendering using this buffer's contents 1807 if (ret != 0) {
1630 * is completed. 1808 drm_gem_object_unreference(target_obj);
1631 */ 1809 i915_gem_object_unpin(obj);
1632 i915_gem_object_wait_rendering(obj); 1810 return -EINVAL;
1633
1634 /* As we're writing through the gtt, flush
1635 * any CPU writes before we write the relocations
1636 */
1637 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1638 i915_gem_clflush_object(obj);
1639 drm_agp_chipset_flush(dev);
1640 obj->write_domain = 0;
1641 } 1811 }
1642 1812
1643 /* Map the page containing the relocation we're going to 1813 /* Map the page containing the relocation we're going to
@@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1779 int ret, i, pinned = 0; 1949 int ret, i, pinned = 0;
1780 uint64_t exec_offset; 1950 uint64_t exec_offset;
1781 uint32_t seqno, flush_domains; 1951 uint32_t seqno, flush_domains;
1952 int pin_tries;
1782 1953
1783#if WATCH_EXEC 1954#if WATCH_EXEC
1784 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1955 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1827 return -EBUSY; 1998 return -EBUSY;
1828 } 1999 }
1829 2000
1830 /* Zero the gloabl flush/invalidate flags. These 2001 /* Look up object handles */
1831 * will be modified as each object is bound to the
1832 * gtt
1833 */
1834 dev->invalidate_domains = 0;
1835 dev->flush_domains = 0;
1836
1837 /* Look up object handles and perform the relocations */
1838 for (i = 0; i < args->buffer_count; i++) { 2002 for (i = 0; i < args->buffer_count; i++) {
1839 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2003 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1840 exec_list[i].handle); 2004 exec_list[i].handle);
@@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1844 ret = -EBADF; 2008 ret = -EBADF;
1845 goto err; 2009 goto err;
1846 } 2010 }
2011 }
1847 2012
1848 object_list[i]->pending_read_domains = 0; 2013 /* Pin and relocate */
1849 object_list[i]->pending_write_domain = 0; 2014 for (pin_tries = 0; ; pin_tries++) {
1850 ret = i915_gem_object_pin_and_relocate(object_list[i], 2015 ret = 0;
1851 file_priv, 2016 for (i = 0; i < args->buffer_count; i++) {
1852 &exec_list[i]); 2017 object_list[i]->pending_read_domains = 0;
1853 if (ret) { 2018 object_list[i]->pending_write_domain = 0;
1854 DRM_ERROR("object bind and relocate failed %d\n", ret); 2019 ret = i915_gem_object_pin_and_relocate(object_list[i],
2020 file_priv,
2021 &exec_list[i]);
2022 if (ret)
2023 break;
2024 pinned = i + 1;
2025 }
2026 /* success */
2027 if (ret == 0)
2028 break;
2029
2030 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret);
1855 goto err; 2033 goto err;
1856 } 2034 }
1857 pinned = i + 1; 2035
2036 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]);
2039
2040 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev);
2042 if (ret)
2043 goto err;
1858 } 2044 }
1859 2045
1860 /* Set the pending read domains for the batch buffer to COMMAND */ 2046 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1864 2050
1865 i915_verify_inactive(dev, __FILE__, __LINE__); 2051 i915_verify_inactive(dev, __FILE__, __LINE__);
1866 2052
2053 /* Zero the global flush/invalidate flags. These
2054 * will be modified as new domains are computed
2055 * for each object
2056 */
2057 dev->invalidate_domains = 0;
2058 dev->flush_domains = 0;
2059
1867 for (i = 0; i < args->buffer_count; i++) { 2060 for (i = 0; i < args->buffer_count; i++) {
1868 struct drm_gem_object *obj = object_list[i]; 2061 struct drm_gem_object *obj = object_list[i];
1869 2062
1870 /* make sure all previous memory operations have passed */ 2063 /* Compute new gpu domains and update invalidate/flush */
1871 ret = i915_gem_object_set_domain(obj, 2064 i915_gem_object_set_to_gpu_domain(obj,
1872 obj->pending_read_domains, 2065 obj->pending_read_domains,
1873 obj->pending_write_domain); 2066 obj->pending_write_domain);
1874 if (ret)
1875 goto err;
1876 } 2067 }
1877 2068
1878 i915_verify_inactive(dev, __FILE__, __LINE__); 2069 i915_verify_inactive(dev, __FILE__, __LINE__);
1879 2070
1880 /* Flush/invalidate caches and chipset buffer */ 2071 if (dev->invalidate_domains | dev->flush_domains) {
1881 flush_domains = i915_gem_dev_set_domain(dev); 2072#if WATCH_EXEC
2073 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2074 __func__,
2075 dev->invalidate_domains,
2076 dev->flush_domains);
2077#endif
2078 i915_gem_flush(dev,
2079 dev->invalidate_domains,
2080 dev->flush_domains);
2081 if (dev->flush_domains)
2082 (void)i915_add_request(dev, dev->flush_domains);
2083 }
1882 2084
1883 i915_verify_inactive(dev, __FILE__, __LINE__); 2085 i915_verify_inactive(dev, __FILE__, __LINE__);
1884 2086
@@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1898 ~0); 2100 ~0);
1899#endif 2101#endif
1900 2102
1901 (void)i915_add_request(dev, flush_domains);
1902
1903 /* Exec the batchbuffer */ 2103 /* Exec the batchbuffer */
1904 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1905 if (ret) { 2105 if (ret) {
@@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1927 i915_file_priv->mm.last_gem_seqno = seqno; 2127 i915_file_priv->mm.last_gem_seqno = seqno;
1928 for (i = 0; i < args->buffer_count; i++) { 2128 for (i = 0; i < args->buffer_count; i++) {
1929 struct drm_gem_object *obj = object_list[i]; 2129 struct drm_gem_object *obj = object_list[i];
1930 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1931 2130
1932 i915_gem_object_move_to_active(obj); 2131 i915_gem_object_move_to_active(obj, seqno);
1933 obj_priv->last_rendering_seqno = seqno;
1934#if WATCH_LRU 2132#if WATCH_LRU
1935 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1936#endif 2134#endif
@@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2061 /* XXX - flush the CPU caches for pinned objects 2259 /* XXX - flush the CPU caches for pinned objects
2062 * as the X server doesn't manage domains yet 2260 * as the X server doesn't manage domains yet
2063 */ 2261 */
2064 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2262 i915_gem_object_flush_cpu_write_domain(obj);
2065 i915_gem_clflush_object(obj);
2066 drm_agp_chipset_flush(dev);
2067 obj->write_domain = 0;
2068 }
2069 args->offset = obj_priv->gtt_offset; 2263 args->offset = obj_priv->gtt_offset;
2070 drm_gem_object_unreference(obj); 2264 drm_gem_object_unreference(obj);
2071 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
@@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2167 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2361 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2168} 2362}
2169 2363
2170static int
2171i915_gem_set_domain(struct drm_gem_object *obj,
2172 struct drm_file *file_priv,
2173 uint32_t read_domains,
2174 uint32_t write_domain)
2175{
2176 struct drm_device *dev = obj->dev;
2177 int ret;
2178 uint32_t flush_domains;
2179
2180 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2181
2182 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2183 if (ret)
2184 return ret;
2185 flush_domains = i915_gem_dev_set_domain(obj->dev);
2186
2187 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2188 (void) i915_add_request(dev, flush_domains);
2189
2190 return 0;
2191}
2192
2193/** Unbinds all objects that are on the given buffer list. */ 2364/** Unbinds all objects that are on the given buffer list. */
2194static int 2365static int
2195i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2366i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) { 122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
123 /* GM965 only does bit 11-based channel 123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
124 * randomization 124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
125 */ 126 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e6..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0) 523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
525 526
526/** 965 MCH register controlling DRAM channel configuration */ 527/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206 528#define C0DRB3 0x10206
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 7a183789be97..3bbb871b25d5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
299 atomic_t swi_emitted; 299 atomic_t swi_emitted;
300 int vblank_crtc; 300 int vblank_crtc;
301 uint32_t irq_enable_reg; 301 uint32_t irq_enable_reg;
302 int irq_enabled;
303 uint32_t r500_disp_irq_reg; 302 uint32_t r500_disp_irq_reg;
304 303
305 struct radeon_surface surfaces[RADEON_MAX_SURFACES]; 304 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 97c0599fdb1e..99be11418ac2 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
44 else 44 else
45 dev_priv->irq_enable_reg &= ~mask; 45 dev_priv->irq_enable_reg &= ~mask;
46 46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 47 if (!dev->irq_enabled)
48 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48} 49}
49 50
50static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) 51static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
56 else 57 else
57 dev_priv->r500_disp_irq_reg &= ~mask; 58 dev_priv->r500_disp_irq_reg &= ~mask;
58 59
59 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 60 if (!dev->irq_enabled)
61 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
60} 62}
61 63
62int radeon_enable_vblank(struct drm_device *dev, int crtc) 64int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
355 if (!dev_priv) 357 if (!dev_priv)
356 return; 358 return;
357 359
358 dev_priv->irq_enabled = 0;
359
360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
362 /* Disable *all* interrupts */ 362 /* Disable *all* interrupts */
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 57630402ea67..1a00bb7db872 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
669 669
670endif 670endif
671 671
672# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
672config BLK_DEV_IDE_PMAC 673config BLK_DEV_IDE_PMAC
673 tristate "PowerMac on-board IDE support" 674 tristate "PowerMac on-board IDE support"
674 depends on PPC_PMAC && IDE=y 675 depends on PPC_PMAC && IDE=y
675 select IDE_TIMINGS 676 select IDE_TIMINGS
677 select BLK_DEV_IDEDMA_PCI
676 help 678 help
677 This driver provides support for the on-board IDE controller on 679 This driver provides support for the on-board IDE controller on
678 most of the recent Apple Power Macintoshes and PowerBooks. 680 most of the recent Apple Power Macintoshes and PowerBooks.
@@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
689 CD-ROM on hda. This option changes this to more natural hda for 691 CD-ROM on hda. This option changes this to more natural hda for
690 hard disk and hdc for CD-ROM. 692 hard disk and hdc for CD-ROM.
691 693
692config BLK_DEV_IDEDMA_PMAC
693 bool "PowerMac IDE DMA support"
694 depends on BLK_DEV_IDE_PMAC
695 select BLK_DEV_IDEDMA_PCI
696 help
697 This option allows the driver for the on-board IDE controller on
698 Power Macintoshes and PowerBooks to use DMA (direct memory access)
699 to transfer data to and from memory. Saying Y is safe and improves
700 performance.
701
702config BLK_DEV_IDE_AU1XXX 694config BLK_DEV_IDE_AU1XXX
703 bool "IDE for AMD Alchemy Au1200" 695 bool "IDE for AMD Alchemy Au1200"
704 depends on SOC_AU1200 696 depends on SOC_AU1200
@@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
912endif 904endif
913 905
914config BLK_DEV_IDEDMA 906config BLK_DEV_IDEDMA
915 def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \ 907 def_bool BLK_DEV_IDEDMA_SFF || \
916 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 908 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
917 909
918endif # IDE 910endif # IDE
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index e56c7b72f9e2..45d2356bb725 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -591,7 +591,7 @@ static int __init ali15x3_ide_init(void)
591 591
592static void __exit ali15x3_ide_exit(void) 592static void __exit ali15x3_ide_exit(void)
593{ 593{
594 return pci_unregister_driver(&alim15x3_pci_driver); 594 pci_unregister_driver(&alim15x3_pci_driver);
595} 595}
596 596
597module_init(ali15x3_ide_init); 597module_init(ali15x3_ide_init);
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 81ec73134eda..c6bcd3014a29 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -3,7 +3,7 @@
3 * IDE driver for Linux. 3 * IDE driver for Linux.
4 * 4 *
5 * Copyright (c) 2000-2002 Vojtech Pavlik 5 * Copyright (c) 2000-2002 Vojtech Pavlik
6 * Copyright (c) 2007 Bartlomiej Zolnierkiewicz 6 * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
7 * 7 *
8 * Based on the work of: 8 * Based on the work of:
9 * Andre Hedrick 9 * Andre Hedrick
@@ -263,6 +263,15 @@ static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_
263 d.udma_mask = ATA_UDMA5; 263 d.udma_mask = ATA_UDMA5;
264 } 264 }
265 265
266 /*
267 * It seems that on some nVidia controllers using AltStatus
268 * register can be unreliable so default to Status register
269 * if the device is in Compatibility Mode.
270 */
271 if (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
272 ide_pci_is_in_compatibility_mode(dev))
273 d.host_flags |= IDE_HFLAG_BROKEN_ALTSTATUS;
274
266 printk(KERN_INFO "%s %s: UDMA%s controller\n", 275 printk(KERN_INFO "%s %s: UDMA%s controller\n",
267 d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]); 276 d.name, pci_name(dev), amd_dma[fls(d.udma_mask) - 1]);
268 277
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 7162d67562af..cc35d6dbd410 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -132,10 +132,14 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
132} 132}
133EXPORT_SYMBOL(ide_end_request); 133EXPORT_SYMBOL(ide_end_request);
134 134
135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
136{ 136{
137 struct request_pm_state *pm = rq->data; 137 struct request_pm_state *pm = rq->data;
138 138
139#ifdef DEBUG_PM
140 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
141 drive->name, pm->pm_step);
142#endif
139 if (drive->media != ide_disk) 143 if (drive->media != ide_disk)
140 return; 144 return;
141 145
@@ -172,7 +176,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
172 /* Not supported? Switch to next step now. */ 176 /* Not supported? Switch to next step now. */
173 if (ata_id_flush_enabled(drive->id) == 0 || 177 if (ata_id_flush_enabled(drive->id) == 0 ||
174 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 178 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
175 ide_complete_power_step(drive, rq, 0, 0); 179 ide_complete_power_step(drive, rq);
176 return ide_stopped; 180 return ide_stopped;
177 } 181 }
178 if (ata_id_flush_ext_enabled(drive->id)) 182 if (ata_id_flush_ext_enabled(drive->id))
@@ -191,7 +195,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
191 if (drive->media != ide_disk) 195 if (drive->media != ide_disk)
192 pm->pm_step = IDE_PM_RESTORE_DMA; 196 pm->pm_step = IDE_PM_RESTORE_DMA;
193 else 197 else
194 ide_complete_power_step(drive, rq, 0, 0); 198 ide_complete_power_step(drive, rq);
195 return ide_stopped; 199 return ide_stopped;
196 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 200 case IDE_PM_IDLE: /* Resume step 2 (idle) */
197 args->tf.command = ATA_CMD_IDLEIMMEDIATE; 201 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
@@ -322,11 +326,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
322 } 326 }
323 } else if (blk_pm_request(rq)) { 327 } else if (blk_pm_request(rq)) {
324 struct request_pm_state *pm = rq->data; 328 struct request_pm_state *pm = rq->data;
325#ifdef DEBUG_PM 329
326 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 330 ide_complete_power_step(drive, rq);
327 drive->name, rq->pm->pm_step, stat, err);
328#endif
329 ide_complete_power_step(drive, rq, stat, err);
330 if (pm->pm_step == IDE_PM_COMPLETED) 331 if (pm->pm_step == IDE_PM_COMPLETED)
331 ide_complete_pm_request(drive, rq); 332 ide_complete_pm_request(drive, rq);
332 return; 333 return;
@@ -804,7 +805,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
804 struct request_pm_state *pm = rq->data; 805 struct request_pm_state *pm = rq->data;
805#ifdef DEBUG_PM 806#ifdef DEBUG_PM
806 printk("%s: start_power_step(step: %d)\n", 807 printk("%s: start_power_step(step: %d)\n",
807 drive->name, rq->pm->pm_step); 808 drive->name, pm->pm_step);
808#endif 809#endif
809 startstop = ide_start_power_step(drive, rq); 810 startstop = ide_start_power_step(drive, rq);
810 if (startstop == ide_stopped && 811 if (startstop == ide_stopped &&
@@ -967,14 +968,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
967 ide_startstop_t startstop; 968 ide_startstop_t startstop;
968 int loops = 0; 969 int loops = 0;
969 970
970 /* for atari only: POSSIBLY BROKEN HERE(?) */
971 ide_get_lock(ide_intr, hwgroup);
972
973 /* caller must own ide_lock */ 971 /* caller must own ide_lock */
974 BUG_ON(!irqs_disabled()); 972 BUG_ON(!irqs_disabled());
975 973
976 while (!hwgroup->busy) { 974 while (!hwgroup->busy) {
977 hwgroup->busy = 1; 975 hwgroup->busy = 1;
976 /* for atari only */
977 ide_get_lock(ide_intr, hwgroup);
978 drive = choose_drive(hwgroup); 978 drive = choose_drive(hwgroup);
979 if (drive == NULL) { 979 if (drive == NULL) {
980 int sleeping = 0; 980 int sleeping = 0;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 5d6ba14e211d..c41c3b9b6f02 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -457,18 +457,14 @@ int drive_is_ready (ide_drive_t *drive)
457 if (drive->waiting_for_dma) 457 if (drive->waiting_for_dma)
458 return hwif->dma_ops->dma_test_irq(drive); 458 return hwif->dma_ops->dma_test_irq(drive);
459 459
460#if 0
461 /* need to guarantee 400ns since last command was issued */
462 udelay(1);
463#endif
464
465 /* 460 /*
466 * We do a passive status test under shared PCI interrupts on 461 * We do a passive status test under shared PCI interrupts on
467 * cards that truly share the ATA side interrupt, but may also share 462 * cards that truly share the ATA side interrupt, but may also share
468 * an interrupt with another pci card/device. We make no assumptions 463 * an interrupt with another pci card/device. We make no assumptions
469 * about possible isa-pnp and pci-pnp issues yet. 464 * about possible isa-pnp and pci-pnp issues yet.
470 */ 465 */
471 if (hwif->io_ports.ctl_addr) 466 if (hwif->io_ports.ctl_addr &&
467 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
472 stat = hwif->tp_ops->read_altstatus(hwif); 468 stat = hwif->tp_ops->read_altstatus(hwif);
473 else 469 else
474 /* Note: this may clear a pending IRQ!! */ 470 /* Note: this may clear a pending IRQ!! */
@@ -610,6 +606,7 @@ static const struct drive_list_entry ivb_list[] = {
610 { "TSSTcorp CDDVDW SH-S202N" , "SB01" }, 606 { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
611 { "TSSTcorp CDDVDW SH-S202H" , "SB00" }, 607 { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
612 { "TSSTcorp CDDVDW SH-S202H" , "SB01" }, 608 { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
609 { "SAMSUNG SP0822N" , "WA100-10" },
613 { NULL , NULL } 610 { NULL , NULL }
614}; 611};
615 612
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 1649ea54f76c..c55bdbd22314 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -266,7 +266,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
266 /* take a deep breath */ 266 /* take a deep breath */
267 msleep(50); 267 msleep(50);
268 268
269 if (io_ports->ctl_addr) { 269 if (io_ports->ctl_addr &&
270 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
270 a = tp_ops->read_altstatus(hwif); 271 a = tp_ops->read_altstatus(hwif);
271 s = tp_ops->read_status(hwif); 272 s = tp_ops->read_status(hwif);
272 if ((a ^ s) & ~ATA_IDX) 273 if ((a ^ s) & ~ATA_IDX)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 2e19d6298536..7c481bb56fab 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
66 struct macio_dev *mdev; 66 struct macio_dev *mdev;
67 u32 timings[4]; 67 u32 timings[4];
68 volatile u32 __iomem * *kauai_fcr; 68 volatile u32 __iomem * *kauai_fcr;
69#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
70 /* Those fields are duplicating what is in hwif. We currently 69 /* Those fields are duplicating what is in hwif. We currently
71 * can't use the hwif ones because of some assumptions that are 70 * can't use the hwif ones because of some assumptions that are
72 * beeing done by the generic code about the kind of dma controller 71 * beeing done by the generic code about the kind of dma controller
@@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
74 */ 73 */
75 volatile struct dbdma_regs __iomem * dma_regs; 74 volatile struct dbdma_regs __iomem * dma_regs;
76 struct dbdma_cmd* dma_table_cpu; 75 struct dbdma_cmd* dma_table_cpu;
77#endif
78
79} pmac_ide_hwif_t; 76} pmac_ide_hwif_t;
80 77
81enum { 78enum {
@@ -222,8 +219,6 @@ static const char* model_name[] = {
222#define KAUAI_FCR_UATA_RESET_N 0x00000002 219#define KAUAI_FCR_UATA_RESET_N 0x00000002
223#define KAUAI_FCR_UATA_ENABLE 0x00000001 220#define KAUAI_FCR_UATA_ENABLE 0x00000001
224 221
225#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
226
227/* Rounded Multiword DMA timings 222/* Rounded Multiword DMA timings
228 * 223 *
229 * I gave up finding a generic formula for all controller 224 * I gave up finding a generic formula for all controller
@@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
413static void pmac_ide_selectproc(ide_drive_t *drive); 408static void pmac_ide_selectproc(ide_drive_t *drive);
414static void pmac_ide_kauai_selectproc(ide_drive_t *drive); 409static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
415 410
416#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
417
418#define PMAC_IDE_REG(x) \ 411#define PMAC_IDE_REG(x) \
419 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) 412 ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
420 413
@@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
584 pmac_ide_do_update_timings(drive); 577 pmac_ide_do_update_timings(drive);
585} 578}
586 579
587#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
588
589/* 580/*
590 * Calculate KeyLargo ATA/66 UDMA timings 581 * Calculate KeyLargo ATA/66 UDMA timings
591 */ 582 */
@@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
786 drive->name, speed & 0xf, *timings); 777 drive->name, speed & 0xf, *timings);
787#endif 778#endif
788} 779}
789#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
790 780
791static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 781static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
792{ 782{
@@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
804 tl[0] = *timings; 794 tl[0] = *timings;
805 tl[1] = *timings2; 795 tl[1] = *timings2;
806 796
807#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
808 if (speed >= XFER_UDMA_0) { 797 if (speed >= XFER_UDMA_0) {
809 if (pmif->kind == controller_kl_ata4) 798 if (pmif->kind == controller_kl_ata4)
810 ret = set_timings_udma_ata4(&tl[0], speed); 799 ret = set_timings_udma_ata4(&tl[0], speed);
@@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
817 ret = -1; 806 ret = -1;
818 } else 807 } else
819 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); 808 set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
820#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 809
821 if (ret) 810 if (ret)
822 return; 811 return;
823 812
@@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
1008 .chipset = ide_pmac, 997 .chipset = ide_pmac,
1009 .tp_ops = &pmac_tp_ops, 998 .tp_ops = &pmac_tp_ops,
1010 .port_ops = &pmac_ide_port_ops, 999 .port_ops = &pmac_ide_port_ops,
1011#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1012 .dma_ops = &pmac_dma_ops, 1000 .dma_ops = &pmac_dma_ops,
1013#endif
1014 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1001 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1015 IDE_HFLAG_POST_SET_MODE | 1002 IDE_HFLAG_POST_SET_MODE |
1016 IDE_HFLAG_MMIO | 1003 IDE_HFLAG_MMIO |
@@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1182 pmif->regbase = regbase; 1169 pmif->regbase = regbase;
1183 pmif->irq = irq; 1170 pmif->irq = irq;
1184 pmif->kauai_fcr = NULL; 1171 pmif->kauai_fcr = NULL;
1185#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1172
1186 if (macio_resource_count(mdev) >= 2) { 1173 if (macio_resource_count(mdev) >= 2) {
1187 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1174 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1188 printk(KERN_WARNING "ide-pmac: can't request DMA " 1175 printk(KERN_WARNING "ide-pmac: can't request DMA "
@@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1192 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1179 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1193 } else 1180 } else
1194 pmif->dma_regs = NULL; 1181 pmif->dma_regs = NULL;
1195#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1182
1196 dev_set_drvdata(&mdev->ofdev.dev, pmif); 1183 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1197 1184
1198 memset(&hw, 0, sizeof(hw)); 1185 memset(&hw, 0, sizeof(hw));
@@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1300 1287
1301 base = ioremap(rbase, rlen); 1288 base = ioremap(rbase, rlen);
1302 pmif->regbase = (unsigned long) base + 0x2000; 1289 pmif->regbase = (unsigned long) base + 0x2000;
1303#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1304 pmif->dma_regs = base + 0x1000; 1290 pmif->dma_regs = base + 0x1000;
1305#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1306 pmif->kauai_fcr = base; 1291 pmif->kauai_fcr = base;
1307 pmif->irq = pdev->irq; 1292 pmif->irq = pdev->irq;
1308 1293
@@ -1434,8 +1419,6 @@ out:
1434 return error; 1419 return error;
1435} 1420}
1436 1421
1437#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1438
1439/* 1422/*
1440 * pmac_ide_build_dmatable builds the DBDMA command list 1423 * pmac_ide_build_dmatable builds the DBDMA command list
1441 * for a transfer and sets the DBDMA channel to point to it. 1424 * for a transfer and sets the DBDMA channel to point to it.
@@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1723 1706
1724 return 0; 1707 return 0;
1725} 1708}
1726#else
1727static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1728 const struct ide_port_info *d)
1729{
1730 return -EOPNOTSUPP;
1731}
1732#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1733 1709
1734module_init(pmac_ide_probe); 1710module_init(pmac_ide_probe);
1735 1711
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index 7defa0ae2014..a687a7dfea6f 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -550,7 +550,7 @@ static const struct ide_dma_ops sgiioc4_dma_ops = {
550 .dma_timeout = ide_dma_timeout, 550 .dma_timeout = ide_dma_timeout,
551}; 551};
552 552
553static const struct ide_port_info sgiioc4_port_info __devinitdata = { 553static const struct ide_port_info sgiioc4_port_info __devinitconst = {
554 .name = DRV_NAME, 554 .name = DRV_NAME,
555 .chipset = ide_pci, 555 .chipset = ide_pci,
556 .init_dma = ide_dma_sgiioc4, 556 .init_dma = ide_dma_sgiioc4,
@@ -633,7 +633,7 @@ out:
633 return ret; 633 return ret;
634} 634}
635 635
636int 636int __devinit
637ioc4_ide_attach_one(struct ioc4_driver_data *idd) 637ioc4_ide_attach_one(struct ioc4_driver_data *idd)
638{ 638{
639 /* PCI-RT does not bring out IDE connection. 639 /* PCI-RT does not bring out IDE connection.
@@ -645,7 +645,7 @@ ioc4_ide_attach_one(struct ioc4_driver_data *idd)
645 return pci_init_sgiioc4(idd->idd_pdev); 645 return pci_init_sgiioc4(idd->idd_pdev);
646} 646}
647 647
648static struct ioc4_submodule ioc4_ide_submodule = { 648static struct ioc4_submodule __devinitdata ioc4_ide_submodule = {
649 .is_name = "IOC4_ide", 649 .is_name = "IOC4_ide",
650 .is_owner = THIS_MODULE, 650 .is_owner = THIS_MODULE,
651 .is_probe = ioc4_ide_attach_one, 651 .is_probe = ioc4_ide_attach_one,
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 9e39f73282ee..d333ae22459c 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1685,6 +1685,7 @@ static int nodemgr_host_thread(void *data)
1685 g = get_hpsb_generation(host); 1685 g = get_hpsb_generation(host);
1686 for (i = 0; i < 4 ; i++) { 1686 for (i = 0; i < 4 ; i++) {
1687 msleep_interruptible(63); 1687 msleep_interruptible(63);
1688 try_to_freeze();
1688 if (kthread_should_stop()) 1689 if (kthread_should_stop())
1689 goto exit; 1690 goto exit;
1690 1691
@@ -1725,6 +1726,7 @@ static int nodemgr_host_thread(void *data)
1725 /* Sleep 3 seconds */ 1726 /* Sleep 3 seconds */
1726 for (i = 3000/200; i; i--) { 1727 for (i = 3000/200; i; i--) {
1727 msleep_interruptible(200); 1728 msleep_interruptible(200);
1729 try_to_freeze();
1728 if (kthread_should_stop()) 1730 if (kthread_should_stop())
1729 goto exit; 1731 goto exit;
1730 1732
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 69e674ecf19a..db22fd9b4cf2 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -101,7 +101,7 @@ static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
101 if (cpu_is_omap24xx()) { 101 if (cpu_is_omap24xx()) {
102 int i; 102 int i;
103 for (i = 0; i < omap_kp->rows; i++) 103 for (i = 0; i < omap_kp->rows; i++)
104 disable_irq(OMAP_GPIO_IRQ(row_gpios[i])); 104 disable_irq(gpio_to_irq(row_gpios[i]));
105 } else 105 } else
106 /* disable keyboard interrupt and schedule for handling */ 106 /* disable keyboard interrupt and schedule for handling */
107 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 107 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
@@ -224,7 +224,7 @@ static void omap_kp_tasklet(unsigned long data)
224 if (cpu_is_omap24xx()) { 224 if (cpu_is_omap24xx()) {
225 int i; 225 int i;
226 for (i = 0; i < omap_kp_data->rows; i++) 226 for (i = 0; i < omap_kp_data->rows; i++)
227 enable_irq(OMAP_GPIO_IRQ(row_gpios[i])); 227 enable_irq(gpio_to_irq(row_gpios[i]));
228 } else { 228 } else {
229 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 229 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
230 kp_cur_group = -1; 230 kp_cur_group = -1;
@@ -397,7 +397,7 @@ static int __init omap_kp_probe(struct platform_device *pdev)
397 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 397 omap_writew(0, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
398 } else { 398 } else {
399 for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) { 399 for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) {
400 if (request_irq(OMAP_GPIO_IRQ(row_gpios[irq_idx]), 400 if (request_irq(gpio_to_irq(row_gpios[irq_idx]),
401 omap_kp_interrupt, 401 omap_kp_interrupt,
402 IRQF_TRIGGER_FALLING, 402 IRQF_TRIGGER_FALLING,
403 "omap-keypad", omap_kp) < 0) 403 "omap-keypad", omap_kp) < 0)
@@ -438,7 +438,7 @@ static int omap_kp_remove(struct platform_device *pdev)
438 gpio_free(col_gpios[i]); 438 gpio_free(col_gpios[i]);
439 for (i = 0; i < omap_kp->rows; i++) { 439 for (i = 0; i < omap_kp->rows; i++) {
440 gpio_free(row_gpios[i]); 440 gpio_free(row_gpios[i]);
441 free_irq(OMAP_GPIO_IRQ(row_gpios[i]), 0); 441 free_irq(gpio_to_irq(row_gpios[i]), 0);
442 } 442 }
443 } else { 443 } else {
444 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); 444 omap_writew(1, OMAP_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 1e288eeb5e2a..6461a32bc838 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -233,10 +233,8 @@ static void __exit b1isa_exit(void)
233 int i; 233 int i;
234 234
235 for (i = 0; i < MAX_CARDS; i++) { 235 for (i = 0; i < MAX_CARDS; i++) {
236 if (!io[i]) 236 if (isa_dev[i].resource[0].start)
237 break; 237 b1isa_remove(&isa_dev[i]);
238
239 b1isa_remove(&isa_dev[i]);
240 } 238 }
241 unregister_capi_driver(&capi_driver_b1isa); 239 unregister_capi_driver(&capi_driver_b1isa);
242} 240}
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index cfa8fa5e44ab..3f2a0a20c19b 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -83,12 +83,12 @@ net_open(struct net_device *dev)
83 83
84 /* Fill in the MAC-level header (if not already set) */ 84 /* Fill in the MAC-level header (if not already set) */
85 if (!card->mac_addr[0]) { 85 if (!card->mac_addr[0]) {
86 for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) 86 for (i = 0; i < ETH_ALEN; i++)
87 dev->dev_addr[i] = 0xfc; 87 dev->dev_addr[i] = 0xfc;
88 if ((in_dev = dev->ip_ptr) != NULL) { 88 if ((in_dev = dev->ip_ptr) != NULL) {
89 struct in_ifaddr *ifa = in_dev->ifa_list; 89 struct in_ifaddr *ifa = in_dev->ifa_list;
90 if (ifa != NULL) 90 if (ifa != NULL)
91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); 91 memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
92 } 92 }
93 } else 93 } else
94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); 94 memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a63161aec487..04e5fd742c2c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -668,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs)
668 if (!rs->max_segment_size) 668 if (!rs->max_segment_size)
669 rs->max_segment_size = MAX_SEGMENT_SIZE; 669 rs->max_segment_size = MAX_SEGMENT_SIZE;
670 if (!rs->seg_boundary_mask) 670 if (!rs->seg_boundary_mask)
671 rs->seg_boundary_mask = -1; 671 rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
672 if (!rs->bounce_pfn) 672 if (!rs->bounce_pfn)
673 rs->bounce_pfn = -1; 673 rs->bounce_pfn = -1;
674} 674}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 84bdc2ee69e6..a443e136dc41 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -354,7 +354,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
354 * @req: the request to prepare 354 * @req: the request to prepare
355 * 355 *
356 * Allocate the necessary i2o_block_request struct and connect it to 356 * Allocate the necessary i2o_block_request struct and connect it to
357 * the request. This is needed that we not loose the SG list later on. 357 * the request. This is needed that we not lose the SG list later on.
358 * 358 *
359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
360 */ 360 */
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index be2b5926d26c..6e53a30bfd38 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -49,7 +49,6 @@ static int i2o_hrt_get(struct i2o_controller *c);
49/** 49/**
50 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
51 * @c: I2O controller 51 * @c: I2O controller
52 * @msg: pointer to a I2O message pointer
53 * @wait: how long to wait until timeout 52 * @wait: how long to wait until timeout
54 * 53 *
55 * This function waits up to wait seconds for a message slot to be 54 * This function waits up to wait seconds for a message slot to be
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 8c389d606c30..3ee698ad8599 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -254,7 +254,11 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
254 return 1; 254 return 1;
255 255
256 *paddr = pte_pfn(pte) << PAGE_SHIFT; 256 *paddr = pte_pfn(pte) << PAGE_SHIFT;
257#ifdef CONFIG_HUGETLB_PAGE
257 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; 258 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
259#else
260 *pageshift = PAGE_SHIFT;
261#endif
258 return 0; 262 return 0;
259 263
260err: 264err:
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 5c027b6b4e5a..650983806392 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -481,7 +481,7 @@ struct vm_operations_struct gru_vm_ops = {
481 .fault = gru_fault, 481 .fault = gru_fault,
482}; 482};
483 483
484module_init(gru_init); 484fs_initcall(gru_init);
485module_exit(gru_exit); 485module_exit(gru_exit);
486 486
487module_param(gru_options, ulong, 0644); 487module_param(gru_options, ulong, 0644);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 76a76751da36..6659b2275c0c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -37,9 +37,9 @@
37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ 37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ 38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ 40#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ 41#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
42#define OPCODE_BE 0xc7 /* Erase whole flash block */ 42#define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ 43#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
44#define OPCODE_RDID 0x9f /* Read JEDEC ID */ 44#define OPCODE_RDID 0x9f /* Read JEDEC ID */
45 45
@@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
167 * 167 *
168 * Returns 0 if successful, non-zero otherwise. 168 * Returns 0 if successful, non-zero otherwise.
169 */ 169 */
170static int erase_block(struct m25p *flash) 170static int erase_chip(struct m25p *flash)
171{ 171{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
173 flash->spi->dev.bus_id, __func__, 173 flash->spi->dev.bus_id, __func__,
@@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
181 write_enable(flash); 181 write_enable(flash);
182 182
183 /* Set up command buffer. */ 183 /* Set up command buffer. */
184 flash->command[0] = OPCODE_BE; 184 flash->command[0] = OPCODE_CHIP_ERASE;
185 185
186 spi_write(flash->spi, flash->command, 1); 186 spi_write(flash->spi, flash->command, 1);
187 187
@@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
250 250
251 mutex_lock(&flash->lock); 251 mutex_lock(&flash->lock);
252 252
253 /* REVISIT in some cases we could speed up erasing large regions 253 /* whole-chip erase? */
254 * by using OPCODE_SE instead of OPCODE_BE_4K 254 if (len == flash->mtd.size && erase_chip(flash)) {
255 */
256
257 /* now erase those sectors */
258 if (len == flash->mtd.size && erase_block(flash)) {
259 instr->state = MTD_ERASE_FAILED; 255 instr->state = MTD_ERASE_FAILED;
260 mutex_unlock(&flash->lock); 256 mutex_unlock(&flash->lock);
261 return -EIO; 257 return -EIO;
258
259 /* REVISIT in some cases we could speed up erasing large regions
260 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
261 * to use "small sector erase", but that's not always optimal.
262 */
263
264 /* "sector"-at-a-time erase */
262 } else { 265 } else {
263 while (len) { 266 while (len) {
264 if (erase_sector(flash, addr)) { 267 if (erase_sector(flash, addr)) {
@@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
574 for (tmp = 0, info = m25p_data; 577 for (tmp = 0, info = m25p_data;
575 tmp < ARRAY_SIZE(m25p_data); 578 tmp < ARRAY_SIZE(m25p_data);
576 tmp++, info++) { 579 tmp++, info++) {
577 if (info->jedec_id == jedec) 580 if (info->jedec_id == jedec) {
578 if (ext_jedec != 0 && info->ext_id != ext_jedec) 581 if (info->ext_id != 0 && info->ext_id != ext_jedec)
579 continue; 582 continue;
580 return info; 583 return info;
584 }
581 } 585 }
582 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); 586 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
583 return NULL; 587 return NULL;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 42d844f8f6bf..dfbf3f270cea 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -19,7 +19,7 @@
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20#include <linux/mtd/physmap.h> 20#include <linux/mtd/physmap.h>
21#include <linux/mtd/concat.h> 21#include <linux/mtd/concat.h>
22#include <asm/io.h> 22#include <linux/io.h>
23 23
24#define MAX_RESOURCES 4 24#define MAX_RESOURCES 4
25 25
@@ -27,7 +27,6 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 struct resource *res;
31#ifdef CONFIG_MTD_PARTITIONS 30#ifdef CONFIG_MTD_PARTITIONS
32 int nr_parts; 31 int nr_parts;
33 struct mtd_partition *parts; 32 struct mtd_partition *parts;
@@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
70#endif 69#endif
71 map_destroy(info->mtd[i]); 70 map_destroy(info->mtd[i]);
72 } 71 }
73
74 if (info->map[i].virt != NULL)
75 iounmap(info->map[i].virt);
76 }
77
78 if (info->res != NULL) {
79 release_resource(info->res);
80 kfree(info->res);
81 } 72 }
82
83 return 0; 73 return 0;
84} 74}
85 75
@@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
101 if (physmap_data == NULL) 91 if (physmap_data == NULL)
102 return -ENODEV; 92 return -ENODEV;
103 93
104 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 94 info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
95 GFP_KERNEL);
105 if (info == NULL) { 96 if (info == NULL) {
106 err = -ENOMEM; 97 err = -ENOMEM;
107 goto err_out; 98 goto err_out;
@@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
114 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), 105 (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
115 (unsigned long long)dev->resource[i].start); 106 (unsigned long long)dev->resource[i].start);
116 107
117 info->res = request_mem_region(dev->resource[i].start, 108 if (!devm_request_mem_region(&dev->dev,
118 dev->resource[i].end - dev->resource[i].start + 1, 109 dev->resource[i].start,
119 dev->dev.bus_id); 110 dev->resource[i].end - dev->resource[i].start + 1,
120 if (info->res == NULL) { 111 dev->dev.bus_id)) {
121 dev_err(&dev->dev, "Could not reserve memory region\n"); 112 dev_err(&dev->dev, "Could not reserve memory region\n");
122 err = -ENOMEM; 113 err = -ENOMEM;
123 goto err_out; 114 goto err_out;
@@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
129 info->map[i].bankwidth = physmap_data->width; 120 info->map[i].bankwidth = physmap_data->width;
130 info->map[i].set_vpp = physmap_data->set_vpp; 121 info->map[i].set_vpp = physmap_data->set_vpp;
131 122
132 info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); 123 info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
124 info->map[i].size);
133 if (info->map[i].virt == NULL) { 125 if (info->map[i].virt == NULL) {
134 dev_err(&dev->dev, "Failed to ioremap flash region\n"); 126 dev_err(&dev->dev, "Failed to ioremap flash region\n");
135 err = EIO; 127 err = EIO;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 024e3fffd4bb..a83192f80eba 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); 163 ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
164 164
165#ifdef CONFIG_MTD_OF_PARTS 165#ifdef CONFIG_MTD_OF_PARTS
166 if (ret == 0) 166 if (ret == 0) {
167 ret = of_mtd_parse_partitions(fun->dev, &fun->mtd, 167 ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
168 flash_np, &fun->parts); 168 if (ret < 0)
169 goto err;
170 }
169#endif 171#endif
170 if (ret > 0) 172 if (ret > 0)
171 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret); 173 ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 75c899039023..9bd6c9ac8443 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
141 } 141 }
142 142
143 lpcctl = pci_resource_start(pdev, 0); 143 lpcctl = pci_resource_start(pdev, 0);
144 pci_dev_put(pdev);
144 145
145 if (!request_region(lpcctl, 4, driver_name)) { 146 if (!request_region(lpcctl, 4, driver_name)) {
146 err = -EBUSY; 147 err = -EBUSY;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index ce5752ab579d..fc4144495610 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
269 269
270static struct pxa3xx_nand_flash stm2GbX16 = { 270static struct pxa3xx_nand_flash stm2GbX16 = {
271 .timing = &stm2GbX16_timing, 271 .timing = &stm2GbX16_timing,
272 .cmdset = &largepage_cmdset,
272 .page_per_block = 64, 273 .page_per_block = 64,
273 .page_size = 2048, 274 .page_size = 2048,
274 .flash_width = 16, 275 .flash_width = 16,
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index e39b21d3e168..d1e0b8e7224b 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -32,19 +32,18 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
35 37
36#include <asm/io.h>
37#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
38#include <asm/arch/gpmc.h> 39#include <mach/gpmc.h>
39#include <asm/arch/onenand.h> 40#include <mach/onenand.h>
40#include <asm/arch/gpio.h> 41#include <mach/gpio.h>
41#include <asm/arch/pm.h> 42#include <mach/pm.h>
42 43
43#include <linux/dma-mapping.h> 44#include <mach/dma.h>
44#include <asm/dma-mapping.h>
45#include <asm/arch/dma.h>
46 45
47#include <asm/arch/board.h> 46#include <mach/board.h>
48 47
49#define DRIVER_NAME "omap2-onenand" 48#define DRIVER_NAME "omap2-onenand"
50 49
@@ -150,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
150 149
151 INIT_COMPLETION(c->irq_done); 150 INIT_COMPLETION(c->irq_done);
152 if (c->gpio_irq) { 151 if (c->gpio_irq) {
153 result = omap_get_gpio_datain(c->gpio_irq); 152 result = gpio_get_value(c->gpio_irq);
154 if (result == -1) { 153 if (result == -1) {
155 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
156 intr = read_reg(c, ONENAND_REG_INTERRUPT); 155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -635,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
635 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
636 goto err_iounmap; 635 goto err_iounmap;
637 } 636 }
638 omap_set_gpio_direction(c->gpio_irq, 1); 637 gpio_direction_input(c->gpio_irq);
639 638
640 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), 639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
641 omap2_onenand_interrupt, IRQF_TRIGGER_RISING, 640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
642 pdev->dev.driver->name, c)) < 0) 641 pdev->dev.driver->name, c)) < 0)
643 goto err_release_gpio; 642 goto err_release_gpio;
@@ -724,7 +723,7 @@ err_release_dma:
724 if (c->dma_channel != -1) 723 if (c->dma_channel != -1)
725 omap_free_dma(c->dma_channel); 724 omap_free_dma(c->dma_channel);
726 if (c->gpio_irq) 725 if (c->gpio_irq)
727 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
728err_release_gpio: 727err_release_gpio:
729 if (c->gpio_irq) 728 if (c->gpio_irq)
730 omap_free_gpio(c->gpio_irq); 729 omap_free_gpio(c->gpio_irq);
@@ -761,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
761 omap2_onenand_shutdown(pdev); 760 omap2_onenand_shutdown(pdev);
762 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
763 if (c->gpio_irq) { 762 if (c->gpio_irq) {
764 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
765 omap_free_gpio(c->gpio_irq); 764 omap_free_gpio(c->gpio_irq);
766 } 765 }
767 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f19acf8b9220..017383ad5ec6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -114,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
114obj-$(CONFIG_NE2000) += ne.o 8390p.o 114obj-$(CONFIG_NE2000) += ne.o 8390p.o
115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
116obj-$(CONFIG_HPLAN) += hp.o 8390p.o 116obj-$(CONFIG_HPLAN) += hp.o 8390p.o
117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o 117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o 119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d07e3f148951..a1a3d0e5d2b4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3144 return 0; 3144 return 0;
3145} 3145}
3146 3146
3147static void
3148bnx2_chk_missed_msi(struct bnx2 *bp)
3149{
3150 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3151 u32 msi_ctrl;
3152
3153 if (bnx2_has_work(bnapi)) {
3154 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3155 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3156 return;
3157
3158 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3159 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3160 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3161 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3162 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3163 }
3164 }
3165
3166 bp->idle_chk_status_idx = bnapi->last_status_idx;
3167}
3168
3147static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3169static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3148{ 3170{
3149 struct status_block *sblk = bnapi->status_blk.msi; 3171 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3218 3240
3219 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3241 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3220 3242
3221 if (unlikely(work_done >= budget))
3222 break;
3223
3224 /* bnapi->last_status_idx is used below to tell the hw how 3243 /* bnapi->last_status_idx is used below to tell the hw how
3225 * much work has been processed, so we must read it before 3244 * much work has been processed, so we must read it before
3226 * checking for more work. 3245 * checking for more work.
3227 */ 3246 */
3228 bnapi->last_status_idx = sblk->status_idx; 3247 bnapi->last_status_idx = sblk->status_idx;
3248
3249 if (unlikely(work_done >= budget))
3250 break;
3251
3229 rmb(); 3252 rmb();
3230 if (likely(!bnx2_has_work(bnapi))) { 3253 if (likely(!bnx2_has_work(bnapi))) {
3231 netif_rx_complete(bp->dev, napi); 3254 netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
4570 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4571 bp->bnx2_napi[i].last_status_idx = 0; 4594 bp->bnx2_napi[i].last_status_idx = 0;
4572 4595
4596 bp->idle_chk_status_idx = 0xffff;
4597
4573 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4598 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4574 4599
4575 /* Set up how to generate a link change interrupt. */ 4600 /* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
5718 if (atomic_read(&bp->intr_sem) != 0) 5743 if (atomic_read(&bp->intr_sem) != 0)
5719 goto bnx2_restart_timer; 5744 goto bnx2_restart_timer;
5720 5745
5746 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5747 BNX2_FLAG_USING_MSI)
5748 bnx2_chk_missed_msi(bp);
5749
5721 bnx2_send_heart_beat(bp); 5750 bnx2_send_heart_beat(bp);
5722 5751
5723 bp->stats_blk->stat_FwRxDrop = 5752 bp->stats_blk->stat_FwRxDrop =
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d95340160..0b032c3c7b61 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
378 * pci_config_l definition 378 * pci_config_l definition
379 * offset: 0000 379 * offset: 0000
380 */ 380 */
381#define BNX2_PCICFG_MSI_CONTROL 0x00000058
382#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
383
381#define BNX2_PCICFG_MISC_CONFIG 0x00000068 384#define BNX2_PCICFG_MISC_CONFIG 0x00000068
382#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) 385#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
383#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) 386#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
6863 6866
6864 u8 num_tx_rings; 6867 u8 num_tx_rings;
6865 u8 num_rx_rings; 6868 u8 num_rx_rings;
6869
6870 u32 idle_chk_status_idx;
6871
6866}; 6872};
6867 6873
6868#define REG_RD(bp, offset) \ 6874#define REG_RD(bp, offset) \
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d6c7d2aa761b..7092df50ff78 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1035,10 +1035,6 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1035 * @pdev: the PCI device that received the packet 1035 * @pdev: the PCI device that received the packet
1036 * @fl: the SGE free list holding the packet 1036 * @fl: the SGE free list holding the packet
1037 * @len: the actual packet length, excluding any SGE padding 1037 * @len: the actual packet length, excluding any SGE padding
1038 * @dma_pad: padding at beginning of buffer left by SGE DMA
1039 * @skb_pad: padding to be used if the packet is copied
1040 * @copy_thres: length threshold under which a packet should be copied
1041 * @drop_thres: # of remaining buffers before we start dropping packets
1042 * 1038 *
1043 * Get the next packet from a free list and complete setup of the 1039 * Get the next packet from a free list and complete setup of the
1044 * sk_buff. If the packet is small we make a copy and recycle the 1040 * sk_buff. If the packet is small we make a copy and recycle the
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91795f78c3e4..122539a0e1fe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -345,7 +345,6 @@ no_buffers:
345/** 345/**
346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 346 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
347 * @adapter: address of board private structure 347 * @adapter: address of board private structure
348 * @rx_ring: pointer to receive ring structure
349 * @cleaned_count: number of buffers to allocate this pass 348 * @cleaned_count: number of buffers to allocate this pass
350 **/ 349 **/
351 350
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbbe..c414554ac321 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
568 return erxrdpt; 568 return erxrdpt;
569} 569}
570 570
571/*
572 * Calculate wrap around when reading beyond the end of the RX buffer
573 */
574static u16 rx_packet_start(u16 ptr)
575{
576 if (ptr + RSV_SIZE > RXEND_INIT)
577 return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
578 else
579 return ptr + RSV_SIZE;
580}
581
571static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) 582static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
572{ 583{
573 u16 erxrdpt; 584 u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
938 skb->dev = ndev; 949 skb->dev = ndev;
939 skb_reserve(skb, NET_IP_ALIGN); 950 skb_reserve(skb, NET_IP_ALIGN);
940 /* copy the packet from the receive buffer */ 951 /* copy the packet from the receive buffer */
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 952 enc28j60_mem_read(priv,
942 len, skb_put(skb, len)); 953 rx_packet_start(priv->next_pk_ptr),
954 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 955 if (netif_msg_pktdata(priv))
944 dump_packet(__func__, skb->len, skb->data); 956 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 957 skb->protocol = eth_type_trans(skb, ndev);
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index fbbd3e660c27..c01e290d09d2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -230,7 +230,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
230 dev->open = &hpp_open; 230 dev->open = &hpp_open;
231 dev->stop = &hpp_close; 231 dev->stop = &hpp_close;
232#ifdef CONFIG_NET_POLL_CONTROLLER 232#ifdef CONFIG_NET_POLL_CONTROLLER
233 dev->poll_controller = ei_poll; 233 dev->poll_controller = eip_poll;
234#endif 234#endif
235 235
236 ei_status.name = name; 236 ei_status.name = name;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 1cbae85b1426..20d27e622ec1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1980,7 +1980,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1980 1980
1981/** 1981/**
1982 * igb_free_tx_resources - Free Tx Resources per Queue 1982 * igb_free_tx_resources - Free Tx Resources per Queue
1983 * @adapter: board private structure
1984 * @tx_ring: Tx descriptor ring for a specific queue 1983 * @tx_ring: Tx descriptor ring for a specific queue
1985 * 1984 *
1986 * Free all transmit software resources 1985 * Free all transmit software resources
@@ -2033,7 +2032,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2033 2032
2034/** 2033/**
2035 * igb_clean_tx_ring - Free Tx Buffers 2034 * igb_clean_tx_ring - Free Tx Buffers
2036 * @adapter: board private structure
2037 * @tx_ring: ring to be cleaned 2035 * @tx_ring: ring to be cleaned
2038 **/ 2036 **/
2039static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2037static void igb_clean_tx_ring(struct igb_ring *tx_ring)
@@ -2080,7 +2078,6 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2080 2078
2081/** 2079/**
2082 * igb_free_rx_resources - Free Rx Resources 2080 * igb_free_rx_resources - Free Rx Resources
2083 * @adapter: board private structure
2084 * @rx_ring: ring to clean the resources from 2081 * @rx_ring: ring to clean the resources from
2085 * 2082 *
2086 * Free all receive software resources 2083 * Free all receive software resources
@@ -2120,7 +2117,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2120 2117
2121/** 2118/**
2122 * igb_clean_rx_ring - Free Rx Buffers per Queue 2119 * igb_clean_rx_ring - Free Rx Buffers per Queue
2123 * @adapter: board private structure
2124 * @rx_ring: ring to free buffers from 2120 * @rx_ring: ring to free buffers from
2125 **/ 2121 **/
2126static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2122static void igb_clean_rx_ring(struct igb_ring *rx_ring)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8ed823ae639e..5236f633ee36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1320,7 +1320,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1320 * ixgbe_intr - legacy mode Interrupt Handler 1320 * ixgbe_intr - legacy mode Interrupt Handler
1321 * @irq: interrupt number 1321 * @irq: interrupt number
1322 * @data: pointer to a network interface device structure 1322 * @data: pointer to a network interface device structure
1323 * @pt_regs: CPU registers structure
1324 **/ 1323 **/
1325static irqreturn_t ixgbe_intr(int irq, void *data) 1324static irqreturn_t ixgbe_intr(int irq, void *data)
1326{ 1325{
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 42394505bb50..590039cbb146 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -70,6 +70,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
70 struct sk_buff *nskb; 70 struct sk_buff *nskb;
71 unsigned int i; 71 unsigned int i;
72 72
73 if (skb->protocol == htons(ETH_P_PAUSE))
74 return;
75
73 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 76 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
74 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 77 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
75 dev = vlan->dev; 78 dev = vlan->dev;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d21..b289a0a2b945 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
401 priv->xmac_base = priv->xc->xmac_base; 401 priv->xmac_base = priv->xc->xmac_base;
402 priv->sram_base = priv->xc->sram_base; 402 priv->sram_base = priv->xc->sram_base;
403 403
404 spin_lock_init(&priv->lock);
405
404 ret = pfifo_request(PFIFO_MASK(priv->id)); 406 ret = pfifo_request(PFIFO_MASK(priv->id));
405 if (ret) { 407 if (ret) {
406 printk("unable to request PFIFO\n"); 408 printk("unable to request PFIFO\n");
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index b37a498939ae..0418045166c3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), 779 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 780 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), 781 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
782 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
782 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), 783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
783 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), 784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
784 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), 785 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
@@ -1174,7 +1175,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
1174 * ax_interrupt - handle the interrupts from an 8390 1175 * ax_interrupt - handle the interrupts from an 8390
1175 * @irq: interrupt number 1176 * @irq: interrupt number
1176 * @dev_id: a pointer to the net_device 1177 * @dev_id: a pointer to the net_device
1177 * @regs: unused
1178 * 1178 *
1179 * Handle the ether interface interrupts. We pull packets from 1179 * Handle the ether interface interrupts. We pull packets from
1180 * the 8390 via the card specific functions and fire them at the networking 1180 * the 8390 via the card specific functions and fire them at the networking
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e40d6301aa7a..ce486f094492 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1693,7 +1693,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), 1693 PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8),
1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), 1694 PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76),
1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), 1695 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e),
1696 PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
1697 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), 1696 PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f),
1698 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), 1697 PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1),
1699 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), 1698 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 55bc24b234e3..25acbbde4a60 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -227,8 +227,17 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
227 if (r) 227 if (r)
228 return ERR_PTR(r); 228 return ERR_PTR(r);
229 229
230 /* If the phy_id is all Fs or all 0s, there is no device there */ 230 /* If the phy_id is mostly Fs, there is no device there */
231 if ((0xffff == phy_id) || (0x00 == phy_id)) 231 if ((phy_id & 0x1fffffff) == 0x1fffffff)
232 return NULL;
233
234 /*
235 * Broken hardware is sometimes missing the pull down resistor on the
236 * MDIO line, which results in reads to non-existent devices returning
237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
238 * device as well.
239 */
240 if (phy_id == 0)
232 return NULL; 241 return NULL;
233 242
234 dev = phy_device_create(bus, addr, phy_id); 243 dev = phy_device_create(bus, addr, phy_id);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 8874497b6bbf..dd3b2447e85a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -34,6 +34,8 @@
34#define MII_VSC8244_IMASK_DUPLEX 0x1000 34#define MII_VSC8244_IMASK_DUPLEX 0x1000
35#define MII_VSC8244_IMASK_MASK 0xf000 35#define MII_VSC8244_IMASK_MASK 0xf000
36 36
37#define MII_VSC8221_IMASK_MASK 0xa000
38
37/* Vitesse Interrupt Status Register */ 39/* Vitesse Interrupt Status Register */
38#define MII_VSC8244_ISTAT 0x1a 40#define MII_VSC8244_ISTAT 0x1a
39#define MII_VSC8244_ISTAT_STATUS 0x8000 41#define MII_VSC8244_ISTAT_STATUS 0x8000
@@ -49,6 +51,12 @@
49#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 51#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
50#define MII_VSC8244_AUXCONSTAT_100 0x0008 52#define MII_VSC8244_AUXCONSTAT_100 0x0008
51 53
54#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
55#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
56
57#define PHY_ID_VSC8244 0x000fc6c0
58#define PHY_ID_VSC8221 0x000fc550
59
52MODULE_DESCRIPTION("Vitesse PHY driver"); 60MODULE_DESCRIPTION("Vitesse PHY driver");
53MODULE_AUTHOR("Kriston Carson"); 61MODULE_AUTHOR("Kriston Carson");
54MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
@@ -95,13 +103,15 @@ static int vsc824x_ack_interrupt(struct phy_device *phydev)
95 return (err < 0) ? err : 0; 103 return (err < 0) ? err : 0;
96} 104}
97 105
98static int vsc824x_config_intr(struct phy_device *phydev) 106static int vsc82xx_config_intr(struct phy_device *phydev)
99{ 107{
100 int err; 108 int err;
101 109
102 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 110 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
103 err = phy_write(phydev, MII_VSC8244_IMASK, 111 err = phy_write(phydev, MII_VSC8244_IMASK,
104 MII_VSC8244_IMASK_MASK); 112 phydev->drv->phy_id == PHY_ID_VSC8244 ?
113 MII_VSC8244_IMASK_MASK :
114 MII_VSC8221_IMASK_MASK);
105 else { 115 else {
106 /* 116 /*
107 * The Vitesse PHY cannot clear the interrupt 117 * The Vitesse PHY cannot clear the interrupt
@@ -120,7 +130,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
120 130
121/* Vitesse 824x */ 131/* Vitesse 824x */
122static struct phy_driver vsc8244_driver = { 132static struct phy_driver vsc8244_driver = {
123 .phy_id = 0x000fc6c0, 133 .phy_id = PHY_ID_VSC8244,
124 .name = "Vitesse VSC8244", 134 .name = "Vitesse VSC8244",
125 .phy_id_mask = 0x000fffc0, 135 .phy_id_mask = 0x000fffc0,
126 .features = PHY_GBIT_FEATURES, 136 .features = PHY_GBIT_FEATURES,
@@ -129,19 +139,55 @@ static struct phy_driver vsc8244_driver = {
129 .config_aneg = &genphy_config_aneg, 139 .config_aneg = &genphy_config_aneg,
130 .read_status = &genphy_read_status, 140 .read_status = &genphy_read_status,
131 .ack_interrupt = &vsc824x_ack_interrupt, 141 .ack_interrupt = &vsc824x_ack_interrupt,
132 .config_intr = &vsc824x_config_intr, 142 .config_intr = &vsc82xx_config_intr,
133 .driver = { .owner = THIS_MODULE,}, 143 .driver = { .owner = THIS_MODULE,},
134}; 144};
135 145
136static int __init vsc8244_init(void) 146static int vsc8221_config_init(struct phy_device *phydev)
137{ 147{
138 return phy_driver_register(&vsc8244_driver); 148 int err;
149
150 err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
151 MII_VSC8221_AUXCONSTAT_INIT);
152 return err;
153
154 /* Perhaps we should set EXT_CON1 based on the interface?
155 Options are 802.3Z SerDes or SGMII */
156}
157
158/* Vitesse 8221 */
159static struct phy_driver vsc8221_driver = {
160 .phy_id = PHY_ID_VSC8221,
161 .phy_id_mask = 0x000ffff0,
162 .name = "Vitesse VSC8221",
163 .features = PHY_GBIT_FEATURES,
164 .flags = PHY_HAS_INTERRUPT,
165 .config_init = &vsc8221_config_init,
166 .config_aneg = &genphy_config_aneg,
167 .read_status = &genphy_read_status,
168 .ack_interrupt = &vsc824x_ack_interrupt,
169 .config_intr = &vsc82xx_config_intr,
170 .driver = { .owner = THIS_MODULE,},
171};
172
173static int __init vsc82xx_init(void)
174{
175 int err;
176
177 err = phy_driver_register(&vsc8244_driver);
178 if (err < 0)
179 return err;
180 err = phy_driver_register(&vsc8221_driver);
181 if (err < 0)
182 phy_driver_unregister(&vsc8244_driver);
183 return err;
139} 184}
140 185
141static void __exit vsc8244_exit(void) 186static void __exit vsc82xx_exit(void)
142{ 187{
143 phy_driver_unregister(&vsc8244_driver); 188 phy_driver_unregister(&vsc8244_driver);
189 phy_driver_unregister(&vsc8221_driver);
144} 190}
145 191
146module_init(vsc8244_init); 192module_init(vsc82xx_init);
147module_exit(vsc8244_exit); 193module_exit(vsc82xx_exit);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 185b1dff10a8..e98d9773158d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1353,6 +1353,7 @@ static int pppol2tp_release(struct socket *sock)
1353 kfree_skb(skb); 1353 kfree_skb(skb);
1354 sock_put(sk); 1354 sock_put(sk);
1355 } 1355 }
1356 sock_put(sk);
1356 } 1357 }
1357 1358
1358 release_sock(sk); 1359 release_sock(sk);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fa3a460f8e2f..8e8337e8b072 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1630,7 +1630,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1630 * sis900_interrupt - sis900 interrupt handler 1630 * sis900_interrupt - sis900 interrupt handler
1631 * @irq: the irq number 1631 * @irq: the irq number
1632 * @dev_instance: the client data object 1632 * @dev_instance: the client data object
1633 * @regs: snapshot of processor context
1634 * 1633 *
1635 * The interrupt handler does all of the Rx thread work, 1634 * The interrupt handler does all of the Rx thread work,
1636 * and cleans up after the Tx thread 1635 * and cleans up after the Tx thread
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index b6435d0d71f9..07599b492359 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -672,7 +672,6 @@ write_hash:
672/** 672/**
673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
674 * @card: card structure 674 * @card: card structure
675 * @descr: descriptor structure to fill out
676 * @skb: packet to use 675 * @skb: packet to use
677 * 676 *
678 * returns 0 on success, <0 on failure. 677 * returns 0 on success, <0 on failure.
@@ -867,7 +866,6 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
867/** 866/**
868 * spider_net_kick_tx_dma - enables TX DMA processing 867 * spider_net_kick_tx_dma - enables TX DMA processing
869 * @card: card structure 868 * @card: card structure
870 * @descr: descriptor address to enable TX processing at
871 * 869 *
872 * This routine will start the transmit DMA running if 870 * This routine will start the transmit DMA running if
873 * it is not already running. This routine ned only be 871 * it is not already running. This routine ned only be
@@ -1637,7 +1635,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1637 * spider_net_interrupt - interrupt handler for spider_net 1635 * spider_net_interrupt - interrupt handler for spider_net
1638 * @irq: interrupt number 1636 * @irq: interrupt number
1639 * @ptr: pointer to net_device 1637 * @ptr: pointer to net_device
1640 * @regs: PU registers
1641 * 1638 *
1642 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no 1639 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1643 * interrupt found raised by card. 1640 * interrupt found raised by card.
@@ -2419,7 +2416,6 @@ spider_net_undo_pci_setup(struct spider_net_card *card)
2419 2416
2420/** 2417/**
2421 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations 2418 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2422 * @card: card structure
2423 * @pdev: PCI device 2419 * @pdev: PCI device
2424 * 2420 *
2425 * Returns the card structure or NULL if any errors occur 2421 * Returns the card structure or NULL if any errors occur
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4291458955ef..1349e419673c 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1714,7 +1714,7 @@ static void gem_init_phy(struct gem *gp)
1714 /* Reset PCS unit. */ 1714 /* Reset PCS unit. */
1715 val = readl(gp->regs + PCS_MIICTRL); 1715 val = readl(gp->regs + PCS_MIICTRL);
1716 val |= PCS_MIICTRL_RST; 1716 val |= PCS_MIICTRL_RST;
1717 writeb(val, gp->regs + PCS_MIICTRL); 1717 writel(val, gp->regs + PCS_MIICTRL);
1718 1718
1719 limit = 32; 1719 limit = 32;
1720 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1720 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index cfd4d052d666..2d14255eb103 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -240,6 +240,10 @@ static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
240static void ath5k_reset_tsf(struct ieee80211_hw *hw); 240static void ath5k_reset_tsf(struct ieee80211_hw *hw);
241static int ath5k_beacon_update(struct ieee80211_hw *hw, 241static int ath5k_beacon_update(struct ieee80211_hw *hw,
242 struct sk_buff *skb); 242 struct sk_buff *skb);
243static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
244 struct ieee80211_vif *vif,
245 struct ieee80211_bss_conf *bss_conf,
246 u32 changes);
243 247
244static struct ieee80211_ops ath5k_hw_ops = { 248static struct ieee80211_ops ath5k_hw_ops = {
245 .tx = ath5k_tx, 249 .tx = ath5k_tx,
@@ -256,6 +260,7 @@ static struct ieee80211_ops ath5k_hw_ops = {
256 .get_tx_stats = ath5k_get_tx_stats, 260 .get_tx_stats = ath5k_get_tx_stats,
257 .get_tsf = ath5k_get_tsf, 261 .get_tsf = ath5k_get_tsf,
258 .reset_tsf = ath5k_reset_tsf, 262 .reset_tsf = ath5k_reset_tsf,
263 .bss_info_changed = ath5k_bss_info_changed,
259}; 264};
260 265
261/* 266/*
@@ -2942,7 +2947,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2942 sc->opmode != NL80211_IFTYPE_MESH_POINT && 2947 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2948 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2949 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == NL80211_IFTYPE_STATION || 2950 if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) ||
2946 sc->opmode == NL80211_IFTYPE_ADHOC) { 2951 sc->opmode == NL80211_IFTYPE_ADHOC) {
2947 rfilt |= AR5K_RX_FILTER_BEACON; 2952 rfilt |= AR5K_RX_FILTER_BEACON;
2948 } 2953 }
@@ -3083,4 +3088,32 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3083end: 3088end:
3084 return ret; 3089 return ret;
3085} 3090}
3091static void
3092set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3093{
3094 struct ath5k_softc *sc = hw->priv;
3095 struct ath5k_hw *ah = sc->ah;
3096 u32 rfilt;
3097 rfilt = ath5k_hw_get_rx_filter(ah);
3098 if (enable)
3099 rfilt |= AR5K_RX_FILTER_BEACON;
3100 else
3101 rfilt &= ~AR5K_RX_FILTER_BEACON;
3102 ath5k_hw_set_rx_filter(ah, rfilt);
3103 sc->filter_flags = rfilt;
3104}
3086 3105
3106static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3107 struct ieee80211_vif *vif,
3108 struct ieee80211_bss_conf *bss_conf,
3109 u32 changes)
3110{
3111 struct ath5k_softc *sc = hw->priv;
3112 if (changes & BSS_CHANGED_ASSOC) {
3113 mutex_lock(&sc->lock);
3114 sc->assoc = bss_conf->assoc;
3115 if (sc->opmode == NL80211_IFTYPE_STATION)
3116 set_beacon_filter(hw, sc->assoc);
3117 mutex_unlock(&sc->lock);
3118 }
3119}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 06d1054ca94b..facc60ddada2 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -179,6 +179,7 @@ struct ath5k_softc {
179 179
180 struct timer_list calib_tim; /* calibration timer */ 180 struct timer_list calib_tim; /* calibration timer */
181 int power_level; /* Requested tx power in dbm */ 181 int power_level; /* Requested tx power in dbm */
182 bool assoc; /* assocate state */
182}; 183};
183 184
184#define ath5k_hw_hasbssidmask(_ah) \ 185#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 19980cbd5d5f..ccaeb5c219d2 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -417,19 +417,19 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 417 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
418 ath5k_global_debugfs); 418 ath5k_global_debugfs);
419 419
420 sc->debug.debugfs_debug = debugfs_create_file("debug", 0666, 420 sc->debug.debugfs_debug = debugfs_create_file("debug", S_IWUSR | S_IRUGO,
421 sc->debug.debugfs_phydir, sc, &fops_debug); 421 sc->debug.debugfs_phydir, sc, &fops_debug);
422 422
423 sc->debug.debugfs_registers = debugfs_create_file("registers", 0444, 423 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO,
424 sc->debug.debugfs_phydir, sc, &fops_registers); 424 sc->debug.debugfs_phydir, sc, &fops_registers);
425 425
426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", 0666, 426 sc->debug.debugfs_tsf = debugfs_create_file("tsf", S_IWUSR | S_IRUGO,
427 sc->debug.debugfs_phydir, sc, &fops_tsf); 427 sc->debug.debugfs_phydir, sc, &fops_tsf);
428 428
429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", 0666, 429 sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO,
430 sc->debug.debugfs_phydir, sc, &fops_beacon); 430 sc->debug.debugfs_phydir, sc, &fops_beacon);
431 431
432 sc->debug.debugfs_reset = debugfs_create_file("reset", 0222, 432 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
433 sc->debug.debugfs_phydir, sc, &fops_reset); 433 sc->debug.debugfs_phydir, sc, &fops_reset);
434} 434}
435 435
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 9e15c30bbc06..4dd1c1bda0fb 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -170,7 +170,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
170 skb = (struct sk_buff *)bf->bf_mpdu; 170 skb = (struct sk_buff *)bf->bf_mpdu;
171 if (skb) { 171 if (skb) {
172 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 172 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
173 skb_end_pointer(skb) - skb->head, 173 skb->len,
174 PCI_DMA_TODEVICE); 174 PCI_DMA_TODEVICE);
175 } 175 }
176 176
@@ -193,7 +193,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
193 193
194 bf->bf_buf_addr = bf->bf_dmacontext = 194 bf->bf_buf_addr = bf->bf_dmacontext =
195 pci_map_single(sc->pdev, skb->data, 195 pci_map_single(sc->pdev, skb->data,
196 skb_end_pointer(skb) - skb->head, 196 skb->len,
197 PCI_DMA_TODEVICE); 197 PCI_DMA_TODEVICE);
198 198
199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); 199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
@@ -352,7 +352,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
352 if (bf->bf_mpdu != NULL) { 352 if (bf->bf_mpdu != NULL) {
353 skb = (struct sk_buff *)bf->bf_mpdu; 353 skb = (struct sk_buff *)bf->bf_mpdu;
354 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 354 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
355 skb_end_pointer(skb) - skb->head, 355 skb->len,
356 PCI_DMA_TODEVICE); 356 PCI_DMA_TODEVICE);
357 dev_kfree_skb_any(skb); 357 dev_kfree_skb_any(skb);
358 bf->bf_mpdu = NULL; 358 bf->bf_mpdu = NULL;
@@ -412,7 +412,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
412 412
413 bf->bf_buf_addr = bf->bf_dmacontext = 413 bf->bf_buf_addr = bf->bf_dmacontext =
414 pci_map_single(sc->pdev, skb->data, 414 pci_map_single(sc->pdev, skb->data,
415 skb_end_pointer(skb) - skb->head, 415 skb->len,
416 PCI_DMA_TODEVICE); 416 PCI_DMA_TODEVICE);
417 bf->bf_mpdu = skb; 417 bf->bf_mpdu = skb;
418 418
@@ -439,7 +439,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
439 if (bf->bf_mpdu != NULL) { 439 if (bf->bf_mpdu != NULL) {
440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
441 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 441 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
442 skb_end_pointer(skb) - skb->head, 442 skb->len,
443 PCI_DMA_TODEVICE); 443 PCI_DMA_TODEVICE);
444 dev_kfree_skb_any(skb); 444 dev_kfree_skb_any(skb);
445 bf->bf_mpdu = NULL; 445 bf->bf_mpdu = NULL;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 4983402af559..504a0444d89f 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -49,10 +49,12 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49 ASSERT(skb != NULL); 49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data; 50 ds->ds_vdata = skb->data;
51 51
52 /* setup rx descriptors */ 52 /* setup rx descriptors. The sc_rxbufsize here tells the harware
53 * how much data it can DMA to us and that we are prepared
54 * to process */
53 ath9k_hw_setuprxdesc(ah, 55 ath9k_hw_setuprxdesc(ah,
54 ds, 56 ds,
55 skb_tailroom(skb), /* buffer size */ 57 sc->sc_rxbufsize,
56 0); 58 0);
57 59
58 if (sc->sc_rxlink == NULL) 60 if (sc->sc_rxlink == NULL)
@@ -398,6 +400,13 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
398 * in rx'd frames. 400 * in rx'd frames.
399 */ 401 */
400 402
403 /* Note: the kernel can allocate a value greater than
404 * what we ask it to give us. We really only need 4 KB as that
405 * is this hardware supports and in fact we need at least 3849
406 * as that is the MAX AMSDU size this hardware supports.
407 * Unfortunately this means we may get 8 KB here from the
408 * kernel... and that is actually what is observed on some
409 * systems :( */
401 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); 410 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
402 if (skb != NULL) { 411 if (skb != NULL) {
403 off = ((unsigned long) skb->data) % sc->sc_cachelsz; 412 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
@@ -456,7 +465,7 @@ static int ath_rx_indicate(struct ath_softc *sc,
456 if (nskb != NULL) { 465 if (nskb != NULL) {
457 bf->bf_mpdu = nskb; 466 bf->bf_mpdu = nskb;
458 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, 467 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
459 skb_end_pointer(nskb) - nskb->head, 468 sc->sc_rxbufsize,
460 PCI_DMA_FROMDEVICE); 469 PCI_DMA_FROMDEVICE);
461 bf->bf_dmacontext = bf->bf_buf_addr; 470 bf->bf_dmacontext = bf->bf_buf_addr;
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 471 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
@@ -542,7 +551,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
542 551
543 bf->bf_mpdu = skb; 552 bf->bf_mpdu = skb;
544 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, 553 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
545 skb_end_pointer(skb) - skb->head, 554 sc->sc_rxbufsize,
546 PCI_DMA_FROMDEVICE); 555 PCI_DMA_FROMDEVICE);
547 bf->bf_dmacontext = bf->bf_buf_addr; 556 bf->bf_dmacontext = bf->bf_buf_addr;
548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; 557 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
@@ -1007,7 +1016,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
1007 1016
1008 pci_dma_sync_single_for_cpu(sc->pdev, 1017 pci_dma_sync_single_for_cpu(sc->pdev,
1009 bf->bf_buf_addr, 1018 bf->bf_buf_addr,
1010 skb_tailroom(skb), 1019 sc->sc_rxbufsize,
1011 PCI_DMA_FROMDEVICE); 1020 PCI_DMA_FROMDEVICE);
1012 pci_unmap_single(sc->pdev, 1021 pci_unmap_single(sc->pdev,
1013 bf->bf_buf_addr, 1022 bf->bf_buf_addr,
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a7..7a9f901d4ff6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3897 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3898 return 0; 3898 return 0;
3899 ipw_send_disassociate(data, 0); 3899 ipw_send_disassociate(data, 0);
3900 netif_carrier_off(priv->net_dev);
3900 return 1; 3901 return 1;
3901} 3902}
3902 3903
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10190 u16 remaining_bytes; 10191 u16 remaining_bytes;
10191 int fc; 10192 int fc;
10192 10193
10194 if (!(priv->status & STATUS_ASSOCIATED))
10195 goto drop;
10196
10193 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10197 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10194 switch (priv->ieee->iw_mode) { 10198 switch (priv->ieee->iw_mode) {
10195 case IW_MODE_ADHOC: 10199 case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 444c5cc05f03..c4c0371c763b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1384,9 +1384,11 @@ void iwl_rx_handle(struct iwl_priv *priv)
1384 1384
1385 rxq->queue[i] = NULL; 1385 rxq->queue[i] = NULL;
1386 1386
1387 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->aligned_dma_addr, 1387 dma_sync_single_range_for_cpu(
1388 priv->hw_params.rx_buf_size, 1388 &priv->pci_dev->dev, rxb->real_dma_addr,
1389 PCI_DMA_FROMDEVICE); 1389 rxb->aligned_dma_addr - rxb->real_dma_addr,
1390 priv->hw_params.rx_buf_size,
1391 PCI_DMA_FROMDEVICE);
1390 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1392 pkt = (struct iwl_rx_packet *)rxb->skb->data;
1391 1393
1392 /* Reclaim a command buffer only if this packet is a response 1394 /* Reclaim a command buffer only if this packet is a response
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90c..01a845851338 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
290 priv->num_stations = 0; 290 priv->num_stations = 0;
291 memset(priv->stations, 0, sizeof(priv->stations)); 291 memset(priv->stations, 0, sizeof(priv->stations));
292 292
293 /* clean ucode key table bit map */
294 priv->ucode_key_table = 0;
295
293 spin_unlock_irqrestore(&priv->sta_lock, flags); 296 spin_unlock_irqrestore(&priv->sta_lock, flags);
294} 297}
295EXPORT_SYMBOL(iwl_clear_stations_table); 298EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5c..26f7084d3011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
475 if (!test_and_set_bit(i, &priv->ucode_key_table)) 475 if (!test_and_set_bit(i, &priv->ucode_key_table))
476 return i; 476 return i;
477 477
478 return -1; 478 return WEP_INVALID_OFFSET;
479} 479}
480 480
481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 481int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
620 /* else, we are overriding an existing key => no need to allocated room 620 /* else, we are overriding an existing key => no need to allocated room
621 * in uCode. */ 621 * in uCode. */
622 622
623 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
624 "no space for new kew");
625
623 priv->stations[sta_id].sta.key.key_flags = key_flags; 626 priv->stations[sta_id].sta.key.key_flags = key_flags;
624 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
625 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 628 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
637{ 640{
638 unsigned long flags; 641 unsigned long flags;
639 __le16 key_flags = 0; 642 __le16 key_flags = 0;
643 int ret;
640 644
641 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 645 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
642 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 646 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
664 /* else, we are overriding an existing key => no need to allocated room 668 /* else, we are overriding an existing key => no need to allocated room
665 * in uCode. */ 669 * in uCode. */
666 670
671 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
672 "no space for new kew");
673
667 priv->stations[sta_id].sta.key.key_flags = key_flags; 674 priv->stations[sta_id].sta.key.key_flags = key_flags;
668 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 675 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
669 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 676 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
670 677
678 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
679
671 spin_unlock_irqrestore(&priv->sta_lock, flags); 680 spin_unlock_irqrestore(&priv->sta_lock, flags);
672 681
673 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 682 return ret;
674 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
675} 683}
676 684
677static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 685static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
696 /* else, we are overriding an existing key => no need to allocated room 704 /* else, we are overriding an existing key => no need to allocated room
697 * in uCode. */ 705 * in uCode. */
698 706
707 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
708 "no space for new kew");
709
699 /* This copy is acutally not needed: we get the key with each TX */ 710 /* This copy is acutally not needed: we get the key with each TX */
700 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 711 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
701 712
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
734 return 0; 745 return 0;
735 } 746 }
736 747
748 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
749 IWL_WARNING("Removing wrong key %d 0x%x\n",
750 keyconf->keyidx, key_flags);
751 spin_unlock_irqrestore(&priv->sta_lock, flags);
752 return 0;
753 }
754
737 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 755 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
738 &priv->ucode_key_table)) 756 &priv->ucode_key_table))
739 IWL_ERROR("index %d not used in uCode key table.\n", 757 IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff7..cac732f4047f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
615 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
616 616
617 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
618 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
619 { 619 {
620 __skb_unlink(skb, q); 620 __skb_unlink(skb, q);
621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); 621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 8f63f4c6b85f..9aad608bcf3f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/jiffies.h>
19#include <linux/pci-aspm.h> 20#include <linux/pci-aspm.h>
20#include "../pci.h" 21#include "../pci.h"
21 22
@@ -161,11 +162,12 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
161 */ 162 */
162static void pcie_aspm_configure_common_clock(struct pci_dev *pdev) 163static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
163{ 164{
164 int pos, child_pos; 165 int pos, child_pos, i = 0;
165 u16 reg16 = 0; 166 u16 reg16 = 0;
166 struct pci_dev *child_dev; 167 struct pci_dev *child_dev;
167 int same_clock = 1; 168 int same_clock = 1;
168 169 unsigned long start_jiffies;
170 u16 child_regs[8], parent_reg;
169 /* 171 /*
170 * all functions of a slot should have the same Slot Clock 172 * all functions of a slot should have the same Slot Clock
171 * Configuration, so just check one function 173 * Configuration, so just check one function
@@ -191,16 +193,19 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
191 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); 193 child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
192 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 194 pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
193 &reg16); 195 &reg16);
196 child_regs[i] = reg16;
194 if (same_clock) 197 if (same_clock)
195 reg16 |= PCI_EXP_LNKCTL_CCC; 198 reg16 |= PCI_EXP_LNKCTL_CCC;
196 else 199 else
197 reg16 &= ~PCI_EXP_LNKCTL_CCC; 200 reg16 &= ~PCI_EXP_LNKCTL_CCC;
198 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL, 201 pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
199 reg16); 202 reg16);
203 i++;
200 } 204 }
201 205
202 /* Configure upstream component */ 206 /* Configure upstream component */
203 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 207 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
208 parent_reg = reg16;
204 if (same_clock) 209 if (same_clock)
205 reg16 |= PCI_EXP_LNKCTL_CCC; 210 reg16 |= PCI_EXP_LNKCTL_CCC;
206 else 211 else
@@ -212,12 +217,30 @@ static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
212 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 217 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
213 218
214 /* Wait for link training end */ 219 /* Wait for link training end */
215 while (1) { 220 /* break out after waiting for 1 second */
221 start_jiffies = jiffies;
222 while ((jiffies - start_jiffies) < HZ) {
216 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 223 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
217 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 224 if (!(reg16 & PCI_EXP_LNKSTA_LT))
218 break; 225 break;
219 cpu_relax(); 226 cpu_relax();
220 } 227 }
228 /* training failed -> recover */
229 if ((jiffies - start_jiffies) >= HZ) {
230 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
231 " common clock\n");
232 i = 0;
233 list_for_each_entry(child_dev, &pdev->subordinate->devices,
234 bus_list) {
235 child_pos = pci_find_capability(child_dev,
236 PCI_CAP_ID_EXP);
237 pci_write_config_word(child_dev,
238 child_pos + PCI_EXP_LNKCTL,
239 child_regs[i]);
240 i++;
241 }
242 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
243 }
221} 244}
222 245
223/* 246/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5049a47030ac..5f4f85f56cb7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h>
25#include "pci.h" 26#include "pci.h"
26 27
27int isa_dma_bridge_buggy; 28int isa_dma_bridge_buggy;
@@ -1828,6 +1829,22 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
1828 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, 1829 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
1829 ht_enable_msi_mapping); 1830 ht_enable_msi_mapping);
1830 1831
1832/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
1833 * for the MCP55 NIC. It is not yet determined whether the msi problem
1834 * also affects other devices. As for now, turn off msi for this device.
1835 */
1836static void __devinit nvenet_msi_disable(struct pci_dev *dev)
1837{
1838 if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
1839 dev_info(&dev->dev,
1840 "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
1841 dev->no_msi = 1;
1842 }
1843}
1844DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
1845 PCI_DEVICE_ID_NVIDIA_NVENET_15,
1846 nvenet_msi_disable);
1847
1831static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 1848static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
1832{ 1849{
1833 struct pci_dev *host_bridge; 1850 struct pci_dev *host_bridge;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 4dd1c3e157ae..5a8ccb4f604d 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -253,6 +253,7 @@ placeholder:
253 __func__, pci_domain_nr(parent), parent->number, slot_nr); 253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
254 254
255out: 255out:
256 kfree(slot_name);
256 up_write(&pci_bus_sem); 257 up_write(&pci_bus_sem);
257 return slot; 258 return slot;
258err: 259err:
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 341d7a5b45a2..4e91419e8911 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -209,12 +209,18 @@ static int ds1672_probe(struct i2c_client *client,
209 return err; 209 return err;
210} 210}
211 211
212static struct i2c_device_id ds1672_id[] = {
213 { "ds1672", 0 },
214 { }
215};
216
212static struct i2c_driver ds1672_driver = { 217static struct i2c_driver ds1672_driver = {
213 .driver = { 218 .driver = {
214 .name = "rtc-ds1672", 219 .name = "rtc-ds1672",
215 }, 220 },
216 .probe = &ds1672_probe, 221 .probe = &ds1672_probe,
217 .remove = &ds1672_remove, 222 .remove = &ds1672_remove,
223 .id_table = ds1672_id,
218}; 224};
219 225
220static int __init ds1672_init(void) 226static int __init ds1672_init(void)
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index 80782798763f..a4f6665ab3c5 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -247,12 +247,18 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
247 return 0; 247 return 0;
248} 248}
249 249
250static struct i2c_device_id max6900_id[] = {
251 { "max6900", 0 },
252 { }
253};
254
250static struct i2c_driver max6900_driver = { 255static struct i2c_driver max6900_driver = {
251 .driver = { 256 .driver = {
252 .name = "rtc-max6900", 257 .name = "rtc-max6900",
253 }, 258 },
254 .probe = max6900_probe, 259 .probe = max6900_probe,
255 .remove = max6900_remove, 260 .remove = max6900_remove,
261 .id_table = max6900_id,
256}; 262};
257 263
258static int __init max6900_init(void) 264static int __init max6900_init(void)
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 7ccb0dd700af..5be98bfd7ed3 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -6,7 +6,6 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/time.h>
10#include <linux/rtc.h> 9#include <linux/rtc.h>
11#include <linux/platform_device.h> 10#include <linux/platform_device.h>
12 11
@@ -16,11 +15,6 @@ MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
16MODULE_DESCRIPTION("Starfire RTC driver"); 15MODULE_DESCRIPTION("Starfire RTC driver");
17MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
18 17
19struct starfire_rtc {
20 struct rtc_device *rtc;
21 spinlock_t lock;
22};
23
24static u32 starfire_get_time(void) 18static u32 starfire_get_time(void)
25{ 19{
26 static char obp_gettod[32]; 20 static char obp_gettod[32];
@@ -35,64 +29,31 @@ static u32 starfire_get_time(void)
35 29
36static int starfire_read_time(struct device *dev, struct rtc_time *tm) 30static int starfire_read_time(struct device *dev, struct rtc_time *tm)
37{ 31{
38 struct starfire_rtc *p = dev_get_drvdata(dev); 32 rtc_time_to_tm(starfire_get_time(), tm);
39 unsigned long flags, secs; 33 return rtc_valid_tm(tm);
40
41 spin_lock_irqsave(&p->lock, flags);
42 secs = starfire_get_time();
43 spin_unlock_irqrestore(&p->lock, flags);
44
45 rtc_time_to_tm(secs, tm);
46
47 return 0;
48}
49
50static int starfire_set_time(struct device *dev, struct rtc_time *tm)
51{
52 unsigned long secs;
53 int err;
54
55 err = rtc_tm_to_time(tm, &secs);
56 if (err)
57 return err;
58
59 /* Do nothing, time is set using the service processor
60 * console on this platform.
61 */
62 return 0;
63} 34}
64 35
65static const struct rtc_class_ops starfire_rtc_ops = { 36static const struct rtc_class_ops starfire_rtc_ops = {
66 .read_time = starfire_read_time, 37 .read_time = starfire_read_time,
67 .set_time = starfire_set_time,
68}; 38};
69 39
70static int __devinit starfire_rtc_probe(struct platform_device *pdev) 40static int __init starfire_rtc_probe(struct platform_device *pdev)
71{ 41{
72 struct starfire_rtc *p = kzalloc(sizeof(*p), GFP_KERNEL); 42 struct rtc_device *rtc = rtc_device_register("starfire", &pdev->dev,
73 43 &starfire_rtc_ops, THIS_MODULE);
74 if (!p) 44 if (IS_ERR(rtc))
75 return -ENOMEM; 45 return PTR_ERR(rtc);
76 46
77 spin_lock_init(&p->lock); 47 platform_set_drvdata(pdev, rtc);
78 48
79 p->rtc = rtc_device_register("starfire", &pdev->dev,
80 &starfire_rtc_ops, THIS_MODULE);
81 if (IS_ERR(p->rtc)) {
82 int err = PTR_ERR(p->rtc);
83 kfree(p);
84 return err;
85 }
86 platform_set_drvdata(pdev, p);
87 return 0; 49 return 0;
88} 50}
89 51
90static int __devexit starfire_rtc_remove(struct platform_device *pdev) 52static int __exit starfire_rtc_remove(struct platform_device *pdev)
91{ 53{
92 struct starfire_rtc *p = platform_get_drvdata(pdev); 54 struct rtc_device *rtc = platform_get_drvdata(pdev);
93 55
94 rtc_device_unregister(p->rtc); 56 rtc_device_unregister(rtc);
95 kfree(p);
96 57
97 return 0; 58 return 0;
98} 59}
@@ -102,13 +63,12 @@ static struct platform_driver starfire_rtc_driver = {
102 .name = "rtc-starfire", 63 .name = "rtc-starfire",
103 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
104 }, 65 },
105 .probe = starfire_rtc_probe, 66 .remove = __exit_p(starfire_rtc_remove),
106 .remove = __devexit_p(starfire_rtc_remove),
107}; 67};
108 68
109static int __init starfire_rtc_init(void) 69static int __init starfire_rtc_init(void)
110{ 70{
111 return platform_driver_register(&starfire_rtc_driver); 71 return platform_driver_probe(&starfire_rtc_driver, starfire_rtc_probe);
112} 72}
113 73
114static void __exit starfire_rtc_exit(void) 74static void __exit starfire_rtc_exit(void)
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
index abe87a4d2665..01d8da9afdc8 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl4030.c
@@ -337,7 +337,7 @@ static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
337} 337}
338 338
339#else 339#else
340#define omap_rtc_ioctl NULL 340#define twl4030_rtc_ioctl NULL
341#endif 341#endif
342 342
343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) 343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 35364f64da7f..c557ba34e1aa 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -720,7 +720,6 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
720 goto failed_openfcp; 720 goto failed_openfcp;
721 721
722 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status); 722 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);
723 schedule_work(&act->adapter->scan_work);
724 723
725 return ZFCP_ERP_SUCCEEDED; 724 return ZFCP_ERP_SUCCEEDED;
726 725
@@ -1186,7 +1185,9 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
1186 container_of(work, struct zfcp_erp_add_work, work); 1185 container_of(work, struct zfcp_erp_add_work, work);
1187 struct zfcp_unit *unit = p->unit; 1186 struct zfcp_unit *unit = p->unit;
1188 struct fc_rport *rport = unit->port->rport; 1187 struct fc_rport *rport = unit->port->rport;
1189 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1188
1189 if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
1190 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1190 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0); 1191 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
1191 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1192 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1192 zfcp_unit_put(unit); 1193 zfcp_unit_put(unit);
@@ -1282,6 +1283,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1282 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1283 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1283 if (result != ZFCP_ERP_SUCCEEDED) 1284 if (result != ZFCP_ERP_SUCCEEDED)
1284 zfcp_erp_rports_del(adapter); 1285 zfcp_erp_rports_del(adapter);
1286 else
1287 schedule_work(&adapter->scan_work);
1285 zfcp_adapter_put(adapter); 1288 zfcp_adapter_put(adapter);
1286 break; 1289 break;
1287 } 1290 }
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 1a7c80a77ff5..8aab3091a7b1 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -50,7 +50,8 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
50 if (mutex_lock_interruptible(&wka_port->mutex)) 50 if (mutex_lock_interruptible(&wka_port->mutex))
51 return -ERESTARTSYS; 51 return -ERESTARTSYS;
52 52
53 if (wka_port->status != ZFCP_WKA_PORT_ONLINE) { 53 if (wka_port->status == ZFCP_WKA_PORT_OFFLINE ||
54 wka_port->status == ZFCP_WKA_PORT_CLOSING) {
54 wka_port->status = ZFCP_WKA_PORT_OPENING; 55 wka_port->status = ZFCP_WKA_PORT_OPENING;
55 if (zfcp_fsf_open_wka_port(wka_port)) 56 if (zfcp_fsf_open_wka_port(wka_port))
56 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 57 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
@@ -125,8 +126,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
125 126
126 read_lock_irqsave(&zfcp_data.config_lock, flags); 127 read_lock_irqsave(&zfcp_data.config_lock, flags);
127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 128 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
128 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_PHYS_OPEN))
129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
130 /* Try to connect to unused ports anyway. */ 130 /* Try to connect to unused ports anyway. */
131 zfcp_erp_port_reopen(port, 131 zfcp_erp_port_reopen(port,
132 ZFCP_STATUS_COMMON_ERP_FAILED, 132 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -610,7 +610,6 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
610 int ret, i; 610 int ret, i;
611 struct zfcp_gpn_ft *gpn_ft; 611 struct zfcp_gpn_ft *gpn_ft;
612 612
613 zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
614 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 613 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
615 return 0; 614 return 0;
616 615
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index d024442ee128..dc0367690405 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -930,8 +930,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
930 goto out; 930 goto out;
931 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 931 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
932 req_flags, adapter->pool.fsf_req_abort); 932 req_flags, adapter->pool.fsf_req_abort);
933 if (IS_ERR(req)) 933 if (IS_ERR(req)) {
934 req = NULL;
934 goto out; 935 goto out;
936 }
935 937
936 if (unlikely(!(atomic_read(&unit->status) & 938 if (unlikely(!(atomic_read(&unit->status) &
937 ZFCP_STATUS_COMMON_UNBLOCKED))) 939 ZFCP_STATUS_COMMON_UNBLOCKED)))
@@ -1584,6 +1586,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1584 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1586 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1585 break; 1587 break;
1586 case FSF_PORT_ALREADY_OPEN: 1588 case FSF_PORT_ALREADY_OPEN:
1589 break;
1587 case FSF_GOOD: 1590 case FSF_GOOD:
1588 wka_port->handle = header->port_handle; 1591 wka_port->handle = header->port_handle;
1589 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1592 wka_port->status = ZFCP_WKA_PORT_ONLINE;
@@ -2113,18 +2116,21 @@ static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2113 2116
2114static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2117static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2115{ 2118{
2116 struct scsi_cmnd *scpnt = req->data; 2119 struct scsi_cmnd *scpnt;
2117 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2120 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
2118 &(req->qtcb->bottom.io.fcp_rsp); 2121 &(req->qtcb->bottom.io.fcp_rsp);
2119 u32 sns_len; 2122 u32 sns_len;
2120 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1]; 2123 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2121 unsigned long flags; 2124 unsigned long flags;
2122 2125
2123 if (unlikely(!scpnt))
2124 return;
2125
2126 read_lock_irqsave(&req->adapter->abort_lock, flags); 2126 read_lock_irqsave(&req->adapter->abort_lock, flags);
2127 2127
2128 scpnt = req->data;
2129 if (unlikely(!scpnt)) {
2130 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2131 return;
2132 }
2133
2128 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) { 2134 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2129 set_host_byte(scpnt, DID_SOFT_ERROR); 2135 set_host_byte(scpnt, DID_SOFT_ERROR);
2130 set_driver_byte(scpnt, SUGGEST_RETRY); 2136 set_driver_byte(scpnt, SUGGEST_RETRY);
@@ -2442,8 +2448,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2442 goto out; 2448 goto out;
2443 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2449 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2444 adapter->pool.fsf_req_scsi); 2450 adapter->pool.fsf_req_scsi);
2445 if (IS_ERR(req)) 2451 if (IS_ERR(req)) {
2452 req = NULL;
2446 goto out; 2453 goto out;
2454 }
2447 2455
2448 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; 2456 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2449 req->data = unit; 2457 req->data = unit;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e46fd3e9f68f..468c880f8b6d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -88,7 +88,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
88 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0, 88 ret = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, 0,
89 ZFCP_REQ_AUTO_CLEANUP); 89 ZFCP_REQ_AUTO_CLEANUP);
90 if (unlikely(ret == -EBUSY)) 90 if (unlikely(ret == -EBUSY))
91 zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT); 91 return SCSI_MLQUEUE_DEVICE_BUSY;
92 else if (unlikely(ret < 0)) 92 else if (unlikely(ret < 0))
93 return SCSI_MLQUEUE_HOST_BUSY; 93 return SCSI_MLQUEUE_HOST_BUSY;
94 94
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9aa301c1ed07..162cd927d94b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -427,8 +427,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
427 * Firmware has an individual device recovery time typically 427 * Firmware has an individual device recovery time typically
428 * of 35 seconds, give us a margin. 428 * of 35 seconds, give us a margin.
429 */ 429 */
430 if (sdev->timeout < (45 * HZ)) 430 if (sdev->request_queue->rq_timeout < (45 * HZ))
431 sdev->timeout = 45 * HZ; 431 blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
432 for (cid = 0; cid < aac->maximum_num_containers; ++cid) 432 for (cid = 0; cid < aac->maximum_num_containers; ++cid)
433 if (aac->fsa_dev[cid].valid) 433 if (aac->fsa_dev[cid].valid)
434 ++num_lsu; 434 ++num_lsu;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 87e09f35d3d4..6cad1758243a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1442,7 +1442,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1442 spin_lock_irqsave(shost->host_lock, lock_flags); 1442 spin_lock_irqsave(shost->host_lock, lock_flags);
1443 if (sdev->type == TYPE_DISK) { 1443 if (sdev->type == TYPE_DISK) {
1444 sdev->allow_restart = 1; 1444 sdev->allow_restart = 1;
1445 sdev->timeout = 60 * HZ; 1445 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1446 } 1446 }
1447 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1447 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1448 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1448 spin_unlock_irqrestore(shost->host_lock, lock_flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a454f94623d7..17ce7abe17ee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1016,7 +1016,8 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1016 * The RAID firmware may require extended timeouts. 1016 * The RAID firmware may require extended timeouts.
1017 */ 1017 */
1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) 1018 if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
1019 sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ; 1019 blk_queue_rq_timeout(sdev->request_queue,
1020 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1020 return 0; 1021 return 0;
1021} 1022}
1022 1023
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 386361778ebb..edfaf241c5ba 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -932,8 +932,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
932 int i, rtn = NEEDS_RETRY; 932 int i, rtn = NEEDS_RETRY;
933 933
934 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) 934 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
935 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, 935 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
936 scmd->device->timeout, 0);
937 936
938 if (rtn == SUCCESS) 937 if (rtn == SUCCESS)
939 return 0; 938 return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f5d3b96890dc..fa45a1a66867 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -567,15 +567,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
567 */ 567 */
568static void scsi_run_queue(struct request_queue *q) 568static void scsi_run_queue(struct request_queue *q)
569{ 569{
570 struct scsi_device *starved_head = NULL, *sdev = q->queuedata; 570 struct scsi_device *sdev = q->queuedata;
571 struct Scsi_Host *shost = sdev->host; 571 struct Scsi_Host *shost = sdev->host;
572 LIST_HEAD(starved_list);
572 unsigned long flags; 573 unsigned long flags;
573 574
574 if (scsi_target(sdev)->single_lun) 575 if (scsi_target(sdev)->single_lun)
575 scsi_single_lun_run(sdev); 576 scsi_single_lun_run(sdev);
576 577
577 spin_lock_irqsave(shost->host_lock, flags); 578 spin_lock_irqsave(shost->host_lock, flags);
578 while (!list_empty(&shost->starved_list) && !scsi_host_is_busy(shost)) { 579 list_splice_init(&shost->starved_list, &starved_list);
580
581 while (!list_empty(&starved_list)) {
579 int flagset; 582 int flagset;
580 583
581 /* 584 /*
@@ -588,24 +591,18 @@ static void scsi_run_queue(struct request_queue *q)
588 * scsi_request_fn must get the host_lock before checking 591 * scsi_request_fn must get the host_lock before checking
589 * or modifying starved_list or starved_entry. 592 * or modifying starved_list or starved_entry.
590 */ 593 */
591 sdev = list_entry(shost->starved_list.next, 594 if (scsi_host_is_busy(shost))
592 struct scsi_device, starved_entry);
593 /*
594 * The *queue_ready functions can add a device back onto the
595 * starved list's tail, so we must check for a infinite loop.
596 */
597 if (sdev == starved_head)
598 break; 595 break;
599 if (!starved_head)
600 starved_head = sdev;
601 596
597 sdev = list_entry(starved_list.next,
598 struct scsi_device, starved_entry);
599 list_del_init(&sdev->starved_entry);
602 if (scsi_target_is_busy(scsi_target(sdev))) { 600 if (scsi_target_is_busy(scsi_target(sdev))) {
603 list_move_tail(&sdev->starved_entry, 601 list_move_tail(&sdev->starved_entry,
604 &shost->starved_list); 602 &shost->starved_list);
605 continue; 603 continue;
606 } 604 }
607 605
608 list_del_init(&sdev->starved_entry);
609 spin_unlock(shost->host_lock); 606 spin_unlock(shost->host_lock);
610 607
611 spin_lock(sdev->request_queue->queue_lock); 608 spin_lock(sdev->request_queue->queue_lock);
@@ -621,6 +618,8 @@ static void scsi_run_queue(struct request_queue *q)
621 618
622 spin_lock(shost->host_lock); 619 spin_lock(shost->host_lock);
623 } 620 }
621 /* put any unprocessed entries back */
622 list_splice(&starved_list, &shost->starved_list);
624 spin_unlock_irqrestore(shost->host_lock, flags); 623 spin_unlock_irqrestore(shost->host_lock, flags);
625 624
626 blk_run_queue(q); 625 blk_run_queue(q);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c9e1242eaf25..5081b3981d3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -757,7 +757,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
757 * access to the device is prohibited. 757 * access to the device is prohibited.
758 */ 758 */
759 error = scsi_nonblockable_ioctl(sdp, cmd, p, 759 error = scsi_nonblockable_ioctl(sdp, cmd, p,
760 (mode & FMODE_NDELAY_NOW) != 0); 760 (mode & FMODE_NDELAY) != 0);
761 if (!scsi_block_when_processing_errors(sdp) || !error) 761 if (!scsi_block_when_processing_errors(sdp) || !error)
762 return error; 762 return error;
763 763
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 62b6633e3a97..45b66b98a516 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -521,7 +521,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
521 * if it doesn't recognise the ioctl 521 * if it doesn't recognise the ioctl
522 */ 522 */
523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, 523 ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
524 (mode & FMODE_NDELAY_NOW) != 0); 524 (mode & FMODE_NDELAY) != 0);
525 if (ret != -ENODEV) 525 if (ret != -ENODEV)
526 return ret; 526 return ret;
527 return scsi_ioctl(sdev, cmd, argp); 527 return scsi_ioctl(sdev, cmd, argp);
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3790906a77d1..2fa830c0be27 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -477,7 +477,7 @@ stex_slave_config(struct scsi_device *sdev)
477{ 477{
478 sdev->use_10_for_rw = 1; 478 sdev->use_10_for_rw = 1;
479 sdev->use_10_for_ms = 1; 479 sdev->use_10_for_ms = 1;
480 sdev->timeout = 60 * HZ; 480 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
481 sdev->tagged_supported = 1; 481 sdev->tagged_supported = 1;
482 482
483 return 0; 483 return 0;
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 6dd98f9fb89c..ae3699d77dd0 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2149,7 +2149,7 @@ out4:
2149 return ret; 2149 return ret;
2150} 2150}
2151 2151
2152static struct ioc3_submodule ioc3uart_submodule = { 2152static struct ioc3_submodule ioc3uart_ops = {
2153 .name = "IOC3uart", 2153 .name = "IOC3uart",
2154 .probe = ioc3uart_probe, 2154 .probe = ioc3uart_probe,
2155 .remove = ioc3uart_remove, 2155 .remove = ioc3uart_remove,
@@ -2173,7 +2173,7 @@ static int __devinit ioc3uart_init(void)
2173 __func__); 2173 __func__);
2174 return ret; 2174 return ret;
2175 } 2175 }
2176 ret = ioc3_register_submodule(&ioc3uart_submodule); 2176 ret = ioc3_register_submodule(&ioc3uart_ops);
2177 if (ret) 2177 if (ret)
2178 uart_unregister_driver(&ioc3_uart); 2178 uart_unregister_driver(&ioc3_uart);
2179 return ret; 2179 return ret;
@@ -2181,7 +2181,7 @@ static int __devinit ioc3uart_init(void)
2181 2181
2182static void __devexit ioc3uart_exit(void) 2182static void __devexit ioc3uart_exit(void)
2183{ 2183{
2184 ioc3_unregister_submodule(&ioc3uart_submodule); 2184 ioc3_unregister_submodule(&ioc3uart_ops);
2185 uart_unregister_driver(&ioc3_uart); 2185 uart_unregister_driver(&ioc3_uart);
2186} 2186}
2187 2187
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 6117d3db0b66..28c00c3d58f5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -591,8 +591,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
591 /* Update the per-port timeout */ 591 /* Update the per-port timeout */
592 uart_update_timeout(port, new->c_cflag, baud); 592 uart_update_timeout(port, new->c_cflag, baud);
593 593
594 /* Do our best to flush TX & RX, so we don't loose anything */ 594 /* Do our best to flush TX & RX, so we don't lose anything */
595 /* But we don't wait indefinitly ! */ 595 /* But we don't wait indefinitely ! */
596 j = 5000000; /* Maximum wait */ 596 j = 5000000; /* Maximum wait */
597 /* FIXME Can't receive chars since set_termios might be called at early 597 /* FIXME Can't receive chars since set_termios might be called at early
598 * boot for the console, all stuff is not yet ready to receive at that 598 * boot for the console, all stuff is not yet ready to receive at that
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 317d239ab740..29cbb0afef8e 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -177,5 +177,5 @@ module_exit(s3c2440_serial_exit);
177 177
178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver"); 178MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 179MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
180MODULE_LICENSE("GPLi v2"); 180MODULE_LICENSE("GPL v2");
181MODULE_ALIAS("platform:s3c2440-uart"); 181MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 91697bdb399f..4bbddb73abd9 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -18,6 +18,7 @@
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/gpio.h>
21 22
22#include <mach/hardware.h> 23#include <mach/hardware.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -25,7 +26,6 @@
25 26
26#include <mach/mux.h> 27#include <mach/mux.h>
27#include <mach/irqs.h> 28#include <mach/irqs.h>
28#include <mach/gpio.h>
29#include <mach/fpga.h> 29#include <mach/fpga.h>
30#include <mach/usb.h> 30#include <mach/usb.h>
31 31
@@ -254,8 +254,8 @@ static int ohci_omap_init(struct usb_hcd *hcd)
254 254
255 /* gpio9 for overcurrent detction */ 255 /* gpio9 for overcurrent detction */
256 omap_cfg_reg(W8_1610_GPIO9); 256 omap_cfg_reg(W8_1610_GPIO9);
257 omap_request_gpio(9); 257 gpio_request(9, "OHCI overcurrent");
258 omap_set_gpio_direction(9, 1 /* IN */); 258 gpio_direction_input(9);
259 259
260 /* for paranoia's sake: disable USB.PUEN */ 260 /* for paranoia's sake: disable USB.PUEN */
261 omap_cfg_reg(W4_USB_HIGHZ); 261 omap_cfg_reg(W4_USB_HIGHZ);
@@ -407,7 +407,7 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
407 put_device(ohci->transceiver->dev); 407 put_device(ohci->transceiver->dev);
408 } 408 }
409 if (machine_is_omap_osk()) 409 if (machine_is_omap_osk())
410 omap_free_gpio(9); 410 gpio_free(9);
411 iounmap(hcd->regs); 411 iounmap(hcd->regs);
412 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 412 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
413 usb_put_hcd(hcd); 413 usb_put_hcd(hcd);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 51d7bdea2869..aad1359a3eb1 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1498,7 +1498,7 @@ static int ftdi_open(struct tty_struct *tty,
1498 priv->interface, buf, 0, WDR_TIMEOUT); 1498 priv->interface, buf, 0, WDR_TIMEOUT);
1499 1499
1500 /* Termios defaults are set by usb_serial_init. We don't change 1500 /* Termios defaults are set by usb_serial_init. We don't change
1501 port->tty->termios - this would loose speed settings, etc. 1501 port->tty->termios - this would lose speed settings, etc.
1502 This is same behaviour as serial.c/rs_open() - Kuba */ 1502 This is same behaviour as serial.c/rs_open() - Kuba */
1503 1503
1504 /* ftdi_set_termios will send usb control messages */ 1504 /* ftdi_set_termios will send usb control messages */
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index a547e5d4c8bf..8da5e5ab8547 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -256,7 +256,8 @@ void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image)
256 return; 256 return;
257 257
258 /* We only do 1 bpp color expansion for now */ 258 /* We only do 1 bpp color expansion for now */
259 if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1) 259 if (!accel_cexp ||
260 (info->flags & FBINFO_HWACCEL_DISABLED) || image->depth != 1)
260 goto fallback; 261 goto fallback;
261 262
262 /* Fallback if running out of the screen. We may do clipping 263 /* Fallback if running out of the screen. We may do clipping
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index b3ffe8205d2b..d5b27f9d374d 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -282,6 +282,8 @@ static int backlight = 1;
282static int backlight = 0; 282static int backlight = 0;
283#endif 283#endif
284 284
285int accel_cexp = 0;
286
285/* 287/*
286 * prototypes 288 * prototypes
287 */ 289 */
@@ -2520,6 +2522,8 @@ static int __init radeonfb_setup (char *options)
2520 } else if (!strncmp(this_opt, "ignore_devlist", 14)) { 2522 } else if (!strncmp(this_opt, "ignore_devlist", 14)) {
2521 ignore_devlist = 1; 2523 ignore_devlist = 1;
2522#endif 2524#endif
2525 } else if (!strncmp(this_opt, "accel_cexp", 12)) {
2526 accel_cexp = 1;
2523 } else 2527 } else
2524 mode_option = this_opt; 2528 mode_option = this_opt;
2525 } 2529 }
@@ -2567,6 +2571,8 @@ module_param(monitor_layout, charp, 0);
2567MODULE_PARM_DESC(monitor_layout, "Specify monitor mapping (like XFree86)"); 2571MODULE_PARM_DESC(monitor_layout, "Specify monitor mapping (like XFree86)");
2568module_param(force_measure_pll, bool, 0); 2572module_param(force_measure_pll, bool, 0);
2569MODULE_PARM_DESC(force_measure_pll, "Force measurement of PLL (debug)"); 2573MODULE_PARM_DESC(force_measure_pll, "Force measurement of PLL (debug)");
2574module_param(accel_cexp, bool, 0);
2575MODULE_PARM_DESC(accel_cexp, "Use acceleration engine for color expansion");
2570#ifdef CONFIG_MTRR 2576#ifdef CONFIG_MTRR
2571module_param(nomtrr, bool, 0); 2577module_param(nomtrr, bool, 0);
2572MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers"); 2578MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index ea0b5b47acaf..974ca6d86540 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -638,4 +638,6 @@ static inline void radeonfb_bl_init(struct radeonfb_info *rinfo) {}
638static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {} 638static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {}
639#endif 639#endif
640 640
641extern int accel_cexp;
642
641#endif /* __RADEONFB_H__ */ 643#endif /* __RADEONFB_H__ */
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 67ff370d80af..0b2adefe9e3d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3531,12 +3531,18 @@ static void fbcon_exit(void)
3531 softback_buf = 0UL; 3531 softback_buf = 0UL;
3532 3532
3533 for (i = 0; i < FB_MAX; i++) { 3533 for (i = 0; i < FB_MAX; i++) {
3534 int pending;
3535
3534 mapped = 0; 3536 mapped = 0;
3535 info = registered_fb[i]; 3537 info = registered_fb[i];
3536 3538
3537 if (info == NULL) 3539 if (info == NULL)
3538 continue; 3540 continue;
3539 3541
3542 pending = cancel_work_sync(&info->queue);
3543 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3544 "no"));
3545
3540 for (j = first_fb_vc; j <= last_fb_vc; j++) { 3546 for (j = first_fb_vc; j <= last_fb_vc; j++) {
3541 if (con2fb_map[j] == i) 3547 if (con2fb_map[j] == i)
3542 mapped = 1; 3548 mapped = 1;
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index b790ddff76f9..ee380d5f3410 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -164,7 +164,6 @@ static struct fb_var_screeninfo macfb_defined = {
164}; 164};
165 165
166static struct fb_fix_screeninfo macfb_fix = { 166static struct fb_fix_screeninfo macfb_fix = {
167 .id = "Macintosh ",
168 .type = FB_TYPE_PACKED_PIXELS, 167 .type = FB_TYPE_PACKED_PIXELS,
169 .accel = FB_ACCEL_NONE, 168 .accel = FB_ACCEL_NONE,
170}; 169};
@@ -760,22 +759,22 @@ static int __init macfb_init(void)
760 759
761 switch(ndev->dr_hw) { 760 switch(ndev->dr_hw) {
762 case NUBUS_DRHW_APPLE_MDC: 761 case NUBUS_DRHW_APPLE_MDC:
763 strcat( macfb_fix.id, "Display Card" ); 762 strcpy(macfb_fix.id, "Mac Disp. Card");
764 macfb_setpalette = mdc_setpalette; 763 macfb_setpalette = mdc_setpalette;
765 macfb_defined.activate = FB_ACTIVATE_NOW; 764 macfb_defined.activate = FB_ACTIVATE_NOW;
766 break; 765 break;
767 case NUBUS_DRHW_APPLE_TFB: 766 case NUBUS_DRHW_APPLE_TFB:
768 strcat( macfb_fix.id, "Toby" ); 767 strcpy(macfb_fix.id, "Toby");
769 macfb_setpalette = toby_setpalette; 768 macfb_setpalette = toby_setpalette;
770 macfb_defined.activate = FB_ACTIVATE_NOW; 769 macfb_defined.activate = FB_ACTIVATE_NOW;
771 break; 770 break;
772 case NUBUS_DRHW_APPLE_JET: 771 case NUBUS_DRHW_APPLE_JET:
773 strcat( macfb_fix.id, "Jet"); 772 strcpy(macfb_fix.id, "Jet");
774 macfb_setpalette = jet_setpalette; 773 macfb_setpalette = jet_setpalette;
775 macfb_defined.activate = FB_ACTIVATE_NOW; 774 macfb_defined.activate = FB_ACTIVATE_NOW;
776 break; 775 break;
777 default: 776 default:
778 strcat( macfb_fix.id, "Generic NuBus" ); 777 strcpy(macfb_fix.id, "Generic NuBus");
779 break; 778 break;
780 } 779 }
781 } 780 }
@@ -786,21 +785,11 @@ static int __init macfb_init(void)
786 if (!video_is_nubus) 785 if (!video_is_nubus)
787 switch( mac_bi_data.id ) 786 switch( mac_bi_data.id )
788 { 787 {
789 /* These don't have onboard video. Eventually, we may
790 be able to write separate framebuffer drivers for
791 them (tobyfb.c, hiresfb.c, etc, etc) */
792 case MAC_MODEL_II:
793 case MAC_MODEL_IIX:
794 case MAC_MODEL_IICX:
795 case MAC_MODEL_IIFX:
796 strcat( macfb_fix.id, "Generic NuBus" );
797 break;
798
799 /* Valkyrie Quadras */ 788 /* Valkyrie Quadras */
800 case MAC_MODEL_Q630: 789 case MAC_MODEL_Q630:
801 /* I'm not sure about this one */ 790 /* I'm not sure about this one */
802 case MAC_MODEL_P588: 791 case MAC_MODEL_P588:
803 strcat( macfb_fix.id, "Valkyrie built-in" ); 792 strcpy(macfb_fix.id, "Valkyrie");
804 macfb_setpalette = valkyrie_setpalette; 793 macfb_setpalette = valkyrie_setpalette;
805 macfb_defined.activate = FB_ACTIVATE_NOW; 794 macfb_defined.activate = FB_ACTIVATE_NOW;
806 valkyrie_cmap_regs = ioremap(DAC_BASE, 0x1000); 795 valkyrie_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -823,7 +812,7 @@ static int __init macfb_init(void)
823 case MAC_MODEL_Q700: 812 case MAC_MODEL_Q700:
824 case MAC_MODEL_Q900: 813 case MAC_MODEL_Q900:
825 case MAC_MODEL_Q950: 814 case MAC_MODEL_Q950:
826 strcat( macfb_fix.id, "DAFB built-in" ); 815 strcpy(macfb_fix.id, "DAFB");
827 macfb_setpalette = dafb_setpalette; 816 macfb_setpalette = dafb_setpalette;
828 macfb_defined.activate = FB_ACTIVATE_NOW; 817 macfb_defined.activate = FB_ACTIVATE_NOW;
829 dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000); 818 dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
@@ -831,7 +820,7 @@ static int __init macfb_init(void)
831 820
832 /* LC II uses the V8 framebuffer */ 821 /* LC II uses the V8 framebuffer */
833 case MAC_MODEL_LCII: 822 case MAC_MODEL_LCII:
834 strcat( macfb_fix.id, "V8 built-in" ); 823 strcpy(macfb_fix.id, "V8");
835 macfb_setpalette = v8_brazil_setpalette; 824 macfb_setpalette = v8_brazil_setpalette;
836 macfb_defined.activate = FB_ACTIVATE_NOW; 825 macfb_defined.activate = FB_ACTIVATE_NOW;
837 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 826 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -843,7 +832,7 @@ static int __init macfb_init(void)
843 case MAC_MODEL_IIVI: 832 case MAC_MODEL_IIVI:
844 case MAC_MODEL_IIVX: 833 case MAC_MODEL_IIVX:
845 case MAC_MODEL_P600: 834 case MAC_MODEL_P600:
846 strcat( macfb_fix.id, "Brazil built-in" ); 835 strcpy(macfb_fix.id, "Brazil");
847 macfb_setpalette = v8_brazil_setpalette; 836 macfb_setpalette = v8_brazil_setpalette;
848 macfb_defined.activate = FB_ACTIVATE_NOW; 837 macfb_defined.activate = FB_ACTIVATE_NOW;
849 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 838 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
@@ -860,7 +849,7 @@ static int __init macfb_init(void)
860 case MAC_MODEL_P460: 849 case MAC_MODEL_P460:
861 macfb_setpalette = v8_brazil_setpalette; 850 macfb_setpalette = v8_brazil_setpalette;
862 macfb_defined.activate = FB_ACTIVATE_NOW; 851 macfb_defined.activate = FB_ACTIVATE_NOW;
863 strcat( macfb_fix.id, "Sonora built-in" ); 852 strcpy(macfb_fix.id, "Sonora");
864 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); 853 v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
865 break; 854 break;
866 855
@@ -871,7 +860,7 @@ static int __init macfb_init(void)
871 case MAC_MODEL_IISI: 860 case MAC_MODEL_IISI:
872 macfb_setpalette = rbv_setpalette; 861 macfb_setpalette = rbv_setpalette;
873 macfb_defined.activate = FB_ACTIVATE_NOW; 862 macfb_defined.activate = FB_ACTIVATE_NOW;
874 strcat( macfb_fix.id, "RBV built-in" ); 863 strcpy(macfb_fix.id, "RBV");
875 rbv_cmap_regs = ioremap(DAC_BASE, 0x1000); 864 rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
876 break; 865 break;
877 866
@@ -880,7 +869,7 @@ static int __init macfb_init(void)
880 case MAC_MODEL_C660: 869 case MAC_MODEL_C660:
881 macfb_setpalette = civic_setpalette; 870 macfb_setpalette = civic_setpalette;
882 macfb_defined.activate = FB_ACTIVATE_NOW; 871 macfb_defined.activate = FB_ACTIVATE_NOW;
883 strcat( macfb_fix.id, "Civic built-in" ); 872 strcpy(macfb_fix.id, "Civic");
884 civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000); 873 civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
885 break; 874 break;
886 875
@@ -901,7 +890,7 @@ static int __init macfb_init(void)
901 v8_brazil_cmap_regs = 890 v8_brazil_cmap_regs =
902 ioremap(DAC_BASE, 0x1000); 891 ioremap(DAC_BASE, 0x1000);
903 } 892 }
904 strcat( macfb_fix.id, "LC built-in" ); 893 strcpy(macfb_fix.id, "LC");
905 break; 894 break;
906 /* We think this may be like the LC II */ 895 /* We think this may be like the LC II */
907 case MAC_MODEL_CCL: 896 case MAC_MODEL_CCL:
@@ -911,18 +900,18 @@ static int __init macfb_init(void)
911 v8_brazil_cmap_regs = 900 v8_brazil_cmap_regs =
912 ioremap(DAC_BASE, 0x1000); 901 ioremap(DAC_BASE, 0x1000);
913 } 902 }
914 strcat( macfb_fix.id, "Color Classic built-in" ); 903 strcpy(macfb_fix.id, "Color Classic");
915 break; 904 break;
916 905
917 /* And we *do* mean "weirdos" */ 906 /* And we *do* mean "weirdos" */
918 case MAC_MODEL_TV: 907 case MAC_MODEL_TV:
919 strcat( macfb_fix.id, "Mac TV built-in" ); 908 strcpy(macfb_fix.id, "Mac TV");
920 break; 909 break;
921 910
922 /* These don't have colour, so no need to worry */ 911 /* These don't have colour, so no need to worry */
923 case MAC_MODEL_SE30: 912 case MAC_MODEL_SE30:
924 case MAC_MODEL_CLII: 913 case MAC_MODEL_CLII:
925 strcat( macfb_fix.id, "Monochrome built-in" ); 914 strcpy(macfb_fix.id, "Monochrome");
926 break; 915 break;
927 916
928 /* Powerbooks are particularly difficult. Many of 917 /* Powerbooks are particularly difficult. Many of
@@ -935,7 +924,7 @@ static int __init macfb_init(void)
935 case MAC_MODEL_PB140: 924 case MAC_MODEL_PB140:
936 case MAC_MODEL_PB145: 925 case MAC_MODEL_PB145:
937 case MAC_MODEL_PB170: 926 case MAC_MODEL_PB170:
938 strcat( macfb_fix.id, "DDC built-in" ); 927 strcpy(macfb_fix.id, "DDC");
939 break; 928 break;
940 929
941 /* Internal is GSC, External (if present) is ViSC */ 930 /* Internal is GSC, External (if present) is ViSC */
@@ -945,13 +934,13 @@ static int __init macfb_init(void)
945 case MAC_MODEL_PB180: 934 case MAC_MODEL_PB180:
946 case MAC_MODEL_PB210: 935 case MAC_MODEL_PB210:
947 case MAC_MODEL_PB230: 936 case MAC_MODEL_PB230:
948 strcat( macfb_fix.id, "GSC built-in" ); 937 strcpy(macfb_fix.id, "GSC");
949 break; 938 break;
950 939
951 /* Internal is TIM, External is ViSC */ 940 /* Internal is TIM, External is ViSC */
952 case MAC_MODEL_PB165C: 941 case MAC_MODEL_PB165C:
953 case MAC_MODEL_PB180C: 942 case MAC_MODEL_PB180C:
954 strcat( macfb_fix.id, "TIM built-in" ); 943 strcpy(macfb_fix.id, "TIM");
955 break; 944 break;
956 945
957 /* Internal is CSC, External is Keystone+Ariel. */ 946 /* Internal is CSC, External is Keystone+Ariel. */
@@ -963,12 +952,12 @@ static int __init macfb_init(void)
963 case MAC_MODEL_PB280C: 952 case MAC_MODEL_PB280C:
964 macfb_setpalette = csc_setpalette; 953 macfb_setpalette = csc_setpalette;
965 macfb_defined.activate = FB_ACTIVATE_NOW; 954 macfb_defined.activate = FB_ACTIVATE_NOW;
966 strcat( macfb_fix.id, "CSC built-in" ); 955 strcpy(macfb_fix.id, "CSC");
967 csc_cmap_regs = ioremap(CSC_BASE, 0x1000); 956 csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
968 break; 957 break;
969 958
970 default: 959 default:
971 strcat( macfb_fix.id, "Unknown/Unsupported built-in" ); 960 strcpy(macfb_fix.id, "Unknown");
972 break; 961 break;
973 } 962 }
974 963
@@ -978,16 +967,23 @@ static int __init macfb_init(void)
978 fb_info.pseudo_palette = pseudo_palette; 967 fb_info.pseudo_palette = pseudo_palette;
979 fb_info.flags = FBINFO_DEFAULT; 968 fb_info.flags = FBINFO_DEFAULT;
980 969
981 fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0); 970 err = fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
971 if (err)
972 goto fail_unmap;
982 973
983 err = register_framebuffer(&fb_info); 974 err = register_framebuffer(&fb_info);
984 if (!err) 975 if (err)
985 printk("fb%d: %s frame buffer device\n", 976 goto fail_dealloc;
986 fb_info.node, fb_info.fix.id); 977
987 else { 978 printk("fb%d: %s frame buffer device\n",
988 iounmap(fb_info.screen_base); 979 fb_info.node, fb_info.fix.id);
989 iounmap_macfb(); 980 return 0;
990 } 981
982fail_dealloc:
983 fb_dealloc_cmap(&fb_info.cmap);
984fail_unmap:
985 iounmap(fb_info.screen_base);
986 iounmap_macfb();
991 return err; 987 return err;
992} 988}
993 989
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 38718d95fbb9..fb64234a3825 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -927,9 +927,9 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
927 } 927 }
928 928
929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n", 929 dev_dbg(dev, "fb phys 0x%llx 0x%lx\n",
930 (u64)par->fb_base_phys, (ulong)par->mapped_vram); 930 (unsigned long long)par->fb_base_phys, (ulong)par->mapped_vram);
931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n", 931 dev_dbg(dev, "mmio phys 0x%llx 0x%lx\n",
932 (u64)par->mmio_base_phys, (ulong)par->mmio_len); 932 (unsigned long long)par->mmio_base_phys, (ulong)par->mmio_len);
933 933
934 if (mb862xx_pci_gdc_init(par)) 934 if (mb862xx_pci_gdc_init(par))
935 goto io_unmap; 935 goto io_unmap;
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 5a5e407dc45f..1a49519dafa4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi)
392 int bpp; 392 int bpp;
393 393
394 rg = &plane->fbdev->mem_desc.region[plane->idx]; 394 rg = &plane->fbdev->mem_desc.region[plane->idx];
395 fbi->screen_base = (char __iomem *)rg->vaddr; 395 fbi->screen_base = rg->vaddr;
396 fix->smem_start = rg->paddr; 396 fix->smem_start = rg->paddr;
397 fix->smem_len = rg->size; 397 fix->smem_len = rg->size;
398 398
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a3765e0be4a8..763c1ea5dce5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -40,6 +40,7 @@
40#include <linux/bootmem.h> 40#include <linux/bootmem.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <asm/desc.h> 42#include <asm/desc.h>
43#include <asm/cacheflush.h>
43 44
44#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ 45#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
45#define CRU_BIOS_SIGNATURE_VALUE 0x55524324 46#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
@@ -394,6 +395,8 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm)
394 smbios_cru64_ptr->double_offset; 395 smbios_cru64_ptr->double_offset;
395 cru_rom_addr = ioremap(cru_physical_address, 396 cru_rom_addr = ioremap(cru_physical_address,
396 smbios_cru64_ptr->double_length); 397 smbios_cru64_ptr->double_length);
398 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
399 smbios_cru64_ptr->double_length >> PAGE_SHIFT);
397 } 400 }
398 } 401 }
399} 402}
@@ -482,7 +485,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
482 "Management Log for details.\n"); 485 "Management Log for details.\n");
483 } 486 }
484 487
485 return NOTIFY_STOP; 488 return NOTIFY_OK;
486} 489}
487 490
488/* 491/*
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index ca344a85eb95..2474ebca88f6 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel TCO vendor specific watchdog driver support 2 * intel TCO vendor specific watchdog driver support
3 * 3 *
4 * (c) Copyright 2006 Wim Van Sebroeck <wim@iguana.be>. 4 * (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -19,8 +19,7 @@
19 19
20/* Module and version information */ 20/* Module and version information */
21#define DRV_NAME "iTCO_vendor_support" 21#define DRV_NAME "iTCO_vendor_support"
22#define DRV_VERSION "1.01" 22#define DRV_VERSION "1.02"
23#define DRV_RELDATE "11-Nov-2006"
24#define PFX DRV_NAME ": " 23#define PFX DRV_NAME ": "
25 24
26/* Includes */ 25/* Includes */
@@ -78,24 +77,6 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (n
78 * 20.6 seconds. 77 * 20.6 seconds.
79 */ 78 */
80 79
81static void supermicro_old_pre_start(unsigned long acpibase)
82{
83 unsigned long val32;
84
85 val32 = inl(SMI_EN);
86 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
87 outl(val32, SMI_EN); /* Needed to activate watchdog */
88}
89
90static void supermicro_old_pre_stop(unsigned long acpibase)
91{
92 unsigned long val32;
93
94 val32 = inl(SMI_EN);
95 val32 &= 0x00002000; /* Turn on SMI clearing watchdog */
96 outl(val32, SMI_EN); /* Needed to deactivate watchdog */
97}
98
99static void supermicro_old_pre_keepalive(unsigned long acpibase) 80static void supermicro_old_pre_keepalive(unsigned long acpibase)
100{ 81{
101 /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */ 82 /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
@@ -247,18 +228,14 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
247void iTCO_vendor_pre_start(unsigned long acpibase, 228void iTCO_vendor_pre_start(unsigned long acpibase,
248 unsigned int heartbeat) 229 unsigned int heartbeat)
249{ 230{
250 if (vendorsupport == SUPERMICRO_OLD_BOARD) 231 if (vendorsupport == SUPERMICRO_NEW_BOARD)
251 supermicro_old_pre_start(acpibase);
252 else if (vendorsupport == SUPERMICRO_NEW_BOARD)
253 supermicro_new_pre_start(heartbeat); 232 supermicro_new_pre_start(heartbeat);
254} 233}
255EXPORT_SYMBOL(iTCO_vendor_pre_start); 234EXPORT_SYMBOL(iTCO_vendor_pre_start);
256 235
257void iTCO_vendor_pre_stop(unsigned long acpibase) 236void iTCO_vendor_pre_stop(unsigned long acpibase)
258{ 237{
259 if (vendorsupport == SUPERMICRO_OLD_BOARD) 238 if (vendorsupport == SUPERMICRO_NEW_BOARD)
260 supermicro_old_pre_stop(acpibase);
261 else if (vendorsupport == SUPERMICRO_NEW_BOARD)
262 supermicro_new_pre_stop(); 239 supermicro_new_pre_stop();
263} 240}
264EXPORT_SYMBOL(iTCO_vendor_pre_stop); 241EXPORT_SYMBOL(iTCO_vendor_pre_stop);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index bfb93bc2ca9f..5b395a4ddfdf 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets) 2 * intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets)
3 * 3 *
4 * (c) Copyright 2006-2007 Wim Van Sebroeck <wim@iguana.be>. 4 * (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -20,34 +20,41 @@
20 * 82801BAM (ICH2-M) : document number 290687-002, 298242-027, 20 * 82801BAM (ICH2-M) : document number 290687-002, 298242-027,
21 * 82801CA (ICH3-S) : document number 290733-003, 290739-013, 21 * 82801CA (ICH3-S) : document number 290733-003, 290739-013,
22 * 82801CAM (ICH3-M) : document number 290716-001, 290718-007, 22 * 82801CAM (ICH3-M) : document number 290716-001, 290718-007,
23 * 82801DB (ICH4) : document number 290744-001, 290745-020, 23 * 82801DB (ICH4) : document number 290744-001, 290745-025,
24 * 82801DBM (ICH4-M) : document number 252337-001, 252663-005, 24 * 82801DBM (ICH4-M) : document number 252337-001, 252663-008,
25 * 82801E (C-ICH) : document number 273599-001, 273645-002, 25 * 82801E (C-ICH) : document number 273599-001, 273645-002,
26 * 82801EB (ICH5) : document number 252516-001, 252517-003, 26 * 82801EB (ICH5) : document number 252516-001, 252517-028,
27 * 82801ER (ICH5R) : document number 252516-001, 252517-003, 27 * 82801ER (ICH5R) : document number 252516-001, 252517-028,
28 * 82801FB (ICH6) : document number 301473-002, 301474-007, 28 * 6300ESB (6300ESB) : document number 300641-004, 300884-013,
29 * 82801FR (ICH6R) : document number 301473-002, 301474-007, 29 * 82801FB (ICH6) : document number 301473-002, 301474-026,
30 * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, 30 * 82801FR (ICH6R) : document number 301473-002, 301474-026,
31 * 82801FW (ICH6W) : document number 301473-001, 301474-007, 31 * 82801FBM (ICH6-M) : document number 301473-002, 301474-026,
32 * 82801FRW (ICH6RW) : document number 301473-001, 301474-007, 32 * 82801FW (ICH6W) : document number 301473-001, 301474-026,
33 * 82801GB (ICH7) : document number 307013-002, 307014-009, 33 * 82801FRW (ICH6RW) : document number 301473-001, 301474-026,
34 * 82801GR (ICH7R) : document number 307013-002, 307014-009, 34 * 631xESB (631xESB) : document number 313082-001, 313075-006,
35 * 82801GDH (ICH7DH) : document number 307013-002, 307014-009, 35 * 632xESB (632xESB) : document number 313082-001, 313075-006,
36 * 82801GBM (ICH7-M) : document number 307013-002, 307014-009, 36 * 82801GB (ICH7) : document number 307013-003, 307014-024,
37 * 82801GHM (ICH7-M DH) : document number 307013-002, 307014-009, 37 * 82801GR (ICH7R) : document number 307013-003, 307014-024,
38 * 82801HB (ICH8) : document number 313056-003, 313057-009, 38 * 82801GDH (ICH7DH) : document number 307013-003, 307014-024,
39 * 82801HR (ICH8R) : document number 313056-003, 313057-009, 39 * 82801GBM (ICH7-M) : document number 307013-003, 307014-024,
40 * 82801HBM (ICH8M) : document number 313056-003, 313057-009, 40 * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024,
41 * 82801HH (ICH8DH) : document number 313056-003, 313057-009, 41 * 82801GU (ICH7-U) : document number 307013-003, 307014-024,
42 * 82801HO (ICH8DO) : document number 313056-003, 313057-009, 42 * 82801HB (ICH8) : document number 313056-003, 313057-017,
43 * 82801HEM (ICH8M-E) : document number 313056-003, 313057-009, 43 * 82801HR (ICH8R) : document number 313056-003, 313057-017,
44 * 82801IB (ICH9) : document number 316972-001, 316973-006, 44 * 82801HBM (ICH8M) : document number 313056-003, 313057-017,
45 * 82801IR (ICH9R) : document number 316972-001, 316973-006, 45 * 82801HH (ICH8DH) : document number 313056-003, 313057-017,
46 * 82801IH (ICH9DH) : document number 316972-001, 316973-006, 46 * 82801HO (ICH8DO) : document number 313056-003, 313057-017,
47 * 82801IO (ICH9DO) : document number 316972-001, 316973-006, 47 * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017,
48 * 6300ESB (6300ESB) : document number 300641-003, 300884-010, 48 * 82801IB (ICH9) : document number 316972-004, 316973-012,
49 * 631xESB (631xESB) : document number 313082-001, 313075-005, 49 * 82801IR (ICH9R) : document number 316972-004, 316973-012,
50 * 632xESB (632xESB) : document number 313082-001, 313075-005 50 * 82801IH (ICH9DH) : document number 316972-004, 316973-012,
51 * 82801IO (ICH9DO) : document number 316972-004, 316973-012,
52 * 82801IBM (ICH9M) : document number 316972-004, 316973-012,
53 * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012,
54 * 82801JIB (ICH10) : document number 319973-002, 319974-002,
55 * 82801JIR (ICH10R) : document number 319973-002, 319974-002,
56 * 82801JD (ICH10D) : document number 319973-002, 319974-002,
57 * 82801JDO (ICH10DO) : document number 319973-002, 319974-002
51 */ 58 */
52 59
53/* 60/*
@@ -56,8 +63,7 @@
56 63
57/* Module and version information */ 64/* Module and version information */
58#define DRV_NAME "iTCO_wdt" 65#define DRV_NAME "iTCO_wdt"
59#define DRV_VERSION "1.03" 66#define DRV_VERSION "1.04"
60#define DRV_RELDATE "30-Apr-2008"
61#define PFX DRV_NAME ": " 67#define PFX DRV_NAME ": "
62 68
63/* Includes */ 69/* Includes */
@@ -96,19 +102,26 @@ enum iTCO_chipsets {
96 TCO_ICH6, /* ICH6 & ICH6R */ 102 TCO_ICH6, /* ICH6 & ICH6R */
97 TCO_ICH6M, /* ICH6-M */ 103 TCO_ICH6M, /* ICH6-M */
98 TCO_ICH6W, /* ICH6W & ICH6RW */ 104 TCO_ICH6W, /* ICH6W & ICH6RW */
105 TCO_631XESB, /* 631xESB/632xESB */
99 TCO_ICH7, /* ICH7 & ICH7R */ 106 TCO_ICH7, /* ICH7 & ICH7R */
100 TCO_ICH7M, /* ICH7-M */ 107 TCO_ICH7DH, /* ICH7DH */
108 TCO_ICH7M, /* ICH7-M & ICH7-U */
101 TCO_ICH7MDH, /* ICH7-M DH */ 109 TCO_ICH7MDH, /* ICH7-M DH */
102 TCO_ICH8, /* ICH8 & ICH8R */ 110 TCO_ICH8, /* ICH8 & ICH8R */
103 TCO_ICH8ME, /* ICH8M-E */
104 TCO_ICH8DH, /* ICH8DH */ 111 TCO_ICH8DH, /* ICH8DH */
105 TCO_ICH8DO, /* ICH8DO */ 112 TCO_ICH8DO, /* ICH8DO */
106 TCO_ICH8M, /* ICH8M */ 113 TCO_ICH8M, /* ICH8M */
114 TCO_ICH8ME, /* ICH8M-E */
107 TCO_ICH9, /* ICH9 */ 115 TCO_ICH9, /* ICH9 */
108 TCO_ICH9R, /* ICH9R */ 116 TCO_ICH9R, /* ICH9R */
109 TCO_ICH9DH, /* ICH9DH */ 117 TCO_ICH9DH, /* ICH9DH */
110 TCO_ICH9DO, /* ICH9DO */ 118 TCO_ICH9DO, /* ICH9DO */
111 TCO_631XESB, /* 631xESB/632xESB */ 119 TCO_ICH9M, /* ICH9M */
120 TCO_ICH9ME, /* ICH9M-E */
121 TCO_ICH10, /* ICH10 */
122 TCO_ICH10R, /* ICH10R */
123 TCO_ICH10D, /* ICH10D */
124 TCO_ICH10DO, /* ICH10DO */
112}; 125};
113 126
114static struct { 127static struct {
@@ -129,19 +142,26 @@ static struct {
129 {"ICH6 or ICH6R", 2}, 142 {"ICH6 or ICH6R", 2},
130 {"ICH6-M", 2}, 143 {"ICH6-M", 2},
131 {"ICH6W or ICH6RW", 2}, 144 {"ICH6W or ICH6RW", 2},
145 {"631xESB/632xESB", 2},
132 {"ICH7 or ICH7R", 2}, 146 {"ICH7 or ICH7R", 2},
133 {"ICH7-M", 2}, 147 {"ICH7DH", 2},
148 {"ICH7-M or ICH7-U", 2},
134 {"ICH7-M DH", 2}, 149 {"ICH7-M DH", 2},
135 {"ICH8 or ICH8R", 2}, 150 {"ICH8 or ICH8R", 2},
136 {"ICH8M-E", 2},
137 {"ICH8DH", 2}, 151 {"ICH8DH", 2},
138 {"ICH8DO", 2}, 152 {"ICH8DO", 2},
139 {"ICH8M", 2}, 153 {"ICH8M", 2},
154 {"ICH8M-E", 2},
140 {"ICH9", 2}, 155 {"ICH9", 2},
141 {"ICH9R", 2}, 156 {"ICH9R", 2},
142 {"ICH9DH", 2}, 157 {"ICH9DH", 2},
143 {"ICH9DO", 2}, 158 {"ICH9DO", 2},
144 {"631xESB/632xESB", 2}, 159 {"ICH9M", 2},
160 {"ICH9M-E", 2},
161 {"ICH10", 2},
162 {"ICH10R", 2},
163 {"ICH10D", 2},
164 {"ICH10DO", 2},
145 {NULL, 0} 165 {NULL, 0}
146}; 166};
147 167
@@ -175,18 +195,6 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
175 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)}, 195 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)},
176 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)}, 196 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)},
177 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)}, 197 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)},
178 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
179 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
180 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
181 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
182 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
183 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
184 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
185 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
186 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
187 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
188 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
189 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
190 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)}, 198 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)},
191 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)}, 199 { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)},
192 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)}, 200 { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)},
@@ -203,6 +211,25 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
203 { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)}, 211 { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)},
204 { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)}, 212 { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)},
205 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)}, 213 { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)},
214 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
215 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)},
216 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
217 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
218 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
219 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
220 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
221 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
222 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
223 { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
224 { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
225 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
226 { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
227 { ITCO_PCI_DEVICE(0x2919, TCO_ICH9M)},
228 { ITCO_PCI_DEVICE(0x2917, TCO_ICH9ME)},
229 { ITCO_PCI_DEVICE(0x3a18, TCO_ICH10)},
230 { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
231 { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
232 { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
206 { 0, }, /* End of list */ 233 { 0, }, /* End of list */
207}; 234};
208MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 235MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
@@ -311,6 +338,7 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
311static int iTCO_wdt_start(void) 338static int iTCO_wdt_start(void)
312{ 339{
313 unsigned int val; 340 unsigned int val;
341 unsigned long val32;
314 342
315 spin_lock(&iTCO_wdt_private.io_lock); 343 spin_lock(&iTCO_wdt_private.io_lock);
316 344
@@ -323,6 +351,18 @@ static int iTCO_wdt_start(void)
323 return -EIO; 351 return -EIO;
324 } 352 }
325 353
354 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
355 val32 = inl(SMI_EN);
356 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
357 outl(val32, SMI_EN);
358
359 /* Force the timer to its reload value by writing to the TCO_RLD
360 register */
361 if (iTCO_wdt_private.iTCO_version == 2)
362 outw(0x01, TCO_RLD);
363 else if (iTCO_wdt_private.iTCO_version == 1)
364 outb(0x01, TCO_RLD);
365
326 /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled to count */ 366 /* Bit 11: TCO Timer Halt -> 0 = The TCO timer is enabled to count */
327 val = inw(TCO1_CNT); 367 val = inw(TCO1_CNT);
328 val &= 0xf7ff; 368 val &= 0xf7ff;
@@ -338,6 +378,7 @@ static int iTCO_wdt_start(void)
338static int iTCO_wdt_stop(void) 378static int iTCO_wdt_stop(void)
339{ 379{
340 unsigned int val; 380 unsigned int val;
381 unsigned long val32;
341 382
342 spin_lock(&iTCO_wdt_private.io_lock); 383 spin_lock(&iTCO_wdt_private.io_lock);
343 384
@@ -349,6 +390,11 @@ static int iTCO_wdt_stop(void)
349 outw(val, TCO1_CNT); 390 outw(val, TCO1_CNT);
350 val = inw(TCO1_CNT); 391 val = inw(TCO1_CNT);
351 392
393 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
394 val32 = inl(SMI_EN);
395 val32 |= 0x00002000;
396 outl(val32, SMI_EN);
397
352 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 398 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
353 iTCO_wdt_set_NO_REBOOT_bit(); 399 iTCO_wdt_set_NO_REBOOT_bit();
354 400
@@ -459,7 +505,6 @@ static int iTCO_wdt_open(struct inode *inode, struct file *file)
459 /* 505 /*
460 * Reload and activate timer 506 * Reload and activate timer
461 */ 507 */
462 iTCO_wdt_keepalive();
463 iTCO_wdt_start(); 508 iTCO_wdt_start();
464 return nonseekable_open(inode, file); 509 return nonseekable_open(inode, file);
465} 510}
@@ -604,7 +649,6 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
604 int ret; 649 int ret;
605 u32 base_address; 650 u32 base_address;
606 unsigned long RCBA; 651 unsigned long RCBA;
607 unsigned long val32;
608 652
609 /* 653 /*
610 * Find the ACPI/PM base I/O address which is the base 654 * Find the ACPI/PM base I/O address which is the base
@@ -644,17 +688,13 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
644 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 688 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
645 iTCO_wdt_set_NO_REBOOT_bit(); 689 iTCO_wdt_set_NO_REBOOT_bit();
646 690
647 /* Set the TCO_EN bit in SMI_EN register */ 691 /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
648 if (!request_region(SMI_EN, 4, "iTCO_wdt")) { 692 if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
649 printk(KERN_ERR PFX 693 printk(KERN_ERR PFX
650 "I/O address 0x%04lx already in use\n", SMI_EN); 694 "I/O address 0x%04lx already in use\n", SMI_EN);
651 ret = -EIO; 695 ret = -EIO;
652 goto out; 696 goto out;
653 } 697 }
654 val32 = inl(SMI_EN);
655 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
656 outl(val32, SMI_EN);
657 release_region(SMI_EN, 4);
658 698
659 /* The TCO I/O registers reside in a 32-byte range pointed to 699 /* The TCO I/O registers reside in a 32-byte range pointed to
660 by the TCOBASE value */ 700 by the TCOBASE value */
@@ -662,7 +702,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
662 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n", 702 printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n",
663 TCOBASE); 703 TCOBASE);
664 ret = -EIO; 704 ret = -EIO;
665 goto out; 705 goto unreg_smi_en;
666 } 706 }
667 707
668 printk(KERN_INFO PFX 708 printk(KERN_INFO PFX
@@ -672,8 +712,9 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
672 TCOBASE); 712 TCOBASE);
673 713
674 /* Clear out the (probably old) status */ 714 /* Clear out the (probably old) status */
675 outb(0, TCO1_STS); 715 outb(8, TCO1_STS); /* Clear the Time Out Status bit */
676 outb(3, TCO2_STS); 716 outb(2, TCO2_STS); /* Clear SECOND_TO_STS bit */
717 outb(4, TCO2_STS); /* Clear BOOT_STS bit */
677 718
678 /* Make sure the watchdog is not running */ 719 /* Make sure the watchdog is not running */
679 iTCO_wdt_stop(); 720 iTCO_wdt_stop();
@@ -701,6 +742,8 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
701 742
702unreg_region: 743unreg_region:
703 release_region(TCOBASE, 0x20); 744 release_region(TCOBASE, 0x20);
745unreg_smi_en:
746 release_region(SMI_EN, 4);
704out: 747out:
705 if (iTCO_wdt_private.iTCO_version == 2) 748 if (iTCO_wdt_private.iTCO_version == 2)
706 iounmap(iTCO_wdt_private.gcs); 749 iounmap(iTCO_wdt_private.gcs);
@@ -718,6 +761,7 @@ static void __devexit iTCO_wdt_cleanup(void)
718 /* Deregister */ 761 /* Deregister */
719 misc_deregister(&iTCO_wdt_miscdev); 762 misc_deregister(&iTCO_wdt_miscdev);
720 release_region(TCOBASE, 0x20); 763 release_region(TCOBASE, 0x20);
764 release_region(SMI_EN, 4);
721 if (iTCO_wdt_private.iTCO_version == 2) 765 if (iTCO_wdt_private.iTCO_version == 2)
722 iounmap(iTCO_wdt_private.gcs); 766 iounmap(iTCO_wdt_private.gcs);
723 pci_dev_put(iTCO_wdt_private.pdev); 767 pci_dev_put(iTCO_wdt_private.pdev);
@@ -782,8 +826,8 @@ static int __init iTCO_wdt_init_module(void)
782{ 826{
783 int err; 827 int err;
784 828
785 printk(KERN_INFO PFX "Intel TCO WatchDog Timer Driver v%s (%s)\n", 829 printk(KERN_INFO PFX "Intel TCO WatchDog Timer Driver v%s\n",
786 DRV_VERSION, DRV_RELDATE); 830 DRV_VERSION);
787 831
788 err = platform_driver_register(&iTCO_wdt_driver); 832 err = platform_driver_register(&iTCO_wdt_driver);
789 if (err) 833 if (err)
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index b4b7b0a4c119..3acce623f209 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -98,6 +98,8 @@ static void mtx1_wdt_reset(void)
98 98
99static void mtx1_wdt_start(void) 99static void mtx1_wdt_start(void)
100{ 100{
101 unsigned long flags;
102
101 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 103 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
102 if (!mtx1_wdt_device.queue) { 104 if (!mtx1_wdt_device.queue) {
103 mtx1_wdt_device.queue = 1; 105 mtx1_wdt_device.queue = 1;
@@ -110,6 +112,8 @@ static void mtx1_wdt_start(void)
110 112
111static int mtx1_wdt_stop(void) 113static int mtx1_wdt_stop(void)
112{ 114{
115 unsigned long flags;
116
113 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 117 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
114 if (mtx1_wdt_device.queue) { 118 if (mtx1_wdt_device.queue) {
115 mtx1_wdt_device.queue = 0; 119 mtx1_wdt_device.queue = 0;